From bf6e6e718cdc7488e2da87b21e258ccc065fe499 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 27 May 2010 23:05:26 -0700 Subject: [PATCH] initial commit --- CA/.gitignore | 11 + CA/INTER/.gitignore | 1 + CA/geninter.sh | 30 + CA/genrootca.sh | 26 + CA/newcerts/.placeholder | 0 CA/openssl.cnf.tmpl | 87 + CA/private/.placeholder | 0 CA/reqs/.gitignore | 1 + HACKING | 53 + LICENSE | 176 + bin/nova-api | 63 + bin/nova-compute | 97 + bin/nova-manage | 158 + bin/nova-objectstore | 49 + bin/nova-volume | 68 + debian/changelog | 6 + debian/compat | 1 + debian/control | 40 + debian/nova-api.init | 69 + debian/nova-api.install | 1 + debian/nova-common.install | 4 + debian/nova-compute.init | 69 + debian/nova-compute.install | 1 + debian/nova-objectstore.init | 69 + debian/nova-objectstore.install | 1 + debian/nova-volume.init | 69 + debian/nova-volume.install | 1 + debian/pycompat | 1 + debian/pyversions | 1 + debian/rules | 4 + docs/.gitignore | 1 + docs/Makefile | 89 + docs/_build/.gitignore | 1 + docs/_static/.gitignore | 0 docs/_templates/.gitignore | 0 docs/architecture.rst | 46 + docs/auth.rst | 213 + docs/binaries.rst | 29 + docs/compute.rst | 72 + docs/conf.py | 202 + docs/endpoint.rst | 89 + docs/fakes.rst | 41 + docs/getting.started.rst | 70 + docs/index.rst | 53 + docs/modules.rst | 32 + docs/network.rst | 86 + docs/nova.rst | 89 + docs/objectstore.rst | 64 + docs/packages.rst | 27 + docs/storage.rst | 29 + docs/volume.rst | 43 + nova/__init__.py | 30 + nova/adminclient.py | 113 + nova/auth/__init__.py | 25 + nova/auth/access.py | 69 + nova/auth/fakeldap.py | 81 + nova/auth/novarc.template | 26 + nova/auth/rbac.ldif | 60 + nova/auth/signer.py | 127 + nova/auth/slap.sh | 226 + nova/auth/users.py | 454 ++ nova/compute/__init__.py | 28 + nova/compute/disk.py | 122 + nova/compute/exception.py | 35 + nova/compute/fakevirtinstance.xml | 43 + nova/compute/libvirt.xml.template | 46 + nova/compute/linux_net.py | 146 + nova/compute/model.py | 203 + nova/compute/network.py | 520 ++ nova/compute/node.py | 549 ++ nova/crypto.py | 224 + nova/datastore.py | 367 ++ nova/endpoint/__init__.py | 28 + nova/endpoint/admin.py | 131 + nova/endpoint/api.py | 337 + nova/endpoint/cloud.py | 572 ++ nova/endpoint/images.py | 92 + nova/exception.py | 53 + nova/fakerabbit.py | 131 + nova/fakevirt.py | 109 + nova/flags.py | 78 + nova/objectstore/__init__.py | 28 + nova/objectstore/bucket.py | 174 + nova/objectstore/handler.py | 285 + nova/objectstore/image.py | 177 + nova/objectstore/stored.py | 58 + nova/process.py | 131 + nova/rpc.py | 222 + nova/server.py | 139 + nova/test.py | 246 + nova/tests/CA/cacert.pem | 17 + nova/tests/CA/private/cakey.pem | 15 + nova/tests/__init__.py | 27 + nova/tests/access_unittest.py | 60 + nova/tests/api_integration.py | 50 + nova/tests/api_unittest.py | 189 + nova/tests/bundle/1mb.manifest.xml | 1 + nova/tests/bundle/1mb.part.0 | Bin 0 -> 1024 bytes nova/tests/bundle/1mb.part.1 | 1 + nova/tests/cloud_unittest.py | 161 + nova/tests/datastore_unittest.py | 60 + nova/tests/fake_flags.py | 26 + nova/tests/future_unittest.py | 74 + nova/tests/keeper_unittest.py | 57 + nova/tests/network_unittest.py | 113 + nova/tests/node_unittest.py | 128 + nova/tests/objectstore_unittest.py | 190 + nova/tests/real_flags.py | 24 + nova/tests/storage_unittest.py | 86 + nova/tests/users_unittest.py | 137 + nova/twistd.py | 249 + nova/utils.py | 96 + nova/vendor.py | 43 + nova/volume/__init__.py | 27 + nova/volume/storage.py | 250 + run_tests.py | 99 + setup.py | 32 + vendor/IPy.py | 1304 ++++ vendor/Twisted-10.0.0/INSTALL | 32 + vendor/Twisted-10.0.0/LICENSE | 57 + vendor/Twisted-10.0.0/NEWS | 1416 ++++ vendor/Twisted-10.0.0/README | 118 + vendor/Twisted-10.0.0/bin/.twistd.swp | Bin 0 -> 12288 bytes vendor/Twisted-10.0.0/bin/conch/cftp | 20 + vendor/Twisted-10.0.0/bin/conch/ckeygen | 20 + vendor/Twisted-10.0.0/bin/conch/conch | 20 + vendor/Twisted-10.0.0/bin/conch/tkconch | 20 + vendor/Twisted-10.0.0/bin/lore/lore | 21 + vendor/Twisted-10.0.0/bin/mail/mailmail | 25 + vendor/Twisted-10.0.0/bin/manhole | 21 + vendor/Twisted-10.0.0/bin/mktap | 18 + vendor/Twisted-10.0.0/bin/pyhtmlizer | 17 + vendor/Twisted-10.0.0/bin/tap2deb | 20 + vendor/Twisted-10.0.0/bin/tap2rpm | 22 + vendor/Twisted-10.0.0/bin/tapconvert | 18 + vendor/Twisted-10.0.0/bin/trial | 22 + vendor/Twisted-10.0.0/bin/twistd | 19 + .../doc/conch/benchmarks/README | 15 + .../doc/conch/benchmarks/buffering_mixin.py | 182 + .../doc/conch/examples/demo.tac | 25 + .../doc/conch/examples/demo_draw.tac | 80 + .../doc/conch/examples/demo_insults.tac | 252 + .../doc/conch/examples/demo_manhole.tac | 56 + .../doc/conch/examples/demo_recvline.tac | 77 + .../doc/conch/examples/demo_scroll.tac | 100 + .../doc/conch/examples/index.html | 40 + .../doc/conch/examples/sshsimpleclient.py | 111 + .../doc/conch/examples/sshsimpleserver.py | 117 + .../doc/conch/examples/telnet_echo.tac | 37 + .../doc/conch/examples/window.tac | 190 + .../doc/conch/howto/conch_client.html | 318 + .../Twisted-10.0.0/doc/conch/howto/index.html | 28 + vendor/Twisted-10.0.0/doc/conch/index.html | 25 + .../doc/conch/man/cftp-man.html | 87 + vendor/Twisted-10.0.0/doc/conch/man/cftp.1 | 89 + .../doc/conch/man/ckeygen-man.html | 107 + vendor/Twisted-10.0.0/doc/conch/man/ckeygen.1 | 58 + .../doc/conch/man/conch-man.html | 148 + vendor/Twisted-10.0.0/doc/conch/man/conch.1 | 206 + .../doc/conch/man/tkconch-man.html | 129 + vendor/Twisted-10.0.0/doc/conch/man/tkconch.1 | 72 + .../doc/core/benchmarks/banana.py | 10 + .../doc/core/benchmarks/deferreds.py | 145 + .../doc/core/benchmarks/failure.py | 66 + .../doc/core/benchmarks/linereceiver.py | 47 + .../doc/core/benchmarks/task.py | 26 + .../doc/core/benchmarks/timer.py | 24 + .../doc/core/benchmarks/tpclient.py | 60 + .../doc/core/benchmarks/tpclient_nt.py | 22 + .../doc/core/benchmarks/tpserver.py | 19 + .../doc/core/benchmarks/tpserver_nt.py | 22 + .../doc/core/development/index.html | 27 + .../listings/new_module_template.py | 12 + .../doc/core/development/naming.html | 38 + .../doc/core/development/philosophy.html | 58 + .../development/policy/coding-standard.html | 809 +++ .../core/development/policy/doc-standard.html | 188 + .../doc/core/development/policy/index.html | 33 + .../doc/core/development/policy/svn-dev.html | 227 + .../development/policy/test-standard.html | 362 ++ .../development/policy/writing-standard.html | 313 + .../doc/core/development/security.html | 43 + .../doc/core/examples/ampclient.py | 26 + .../doc/core/examples/ampserver.py | 40 + .../doc/core/examples/bananabench.py | 79 + .../doc/core/examples/chatserver.py | 37 + .../doc/core/examples/courier.py | 111 + .../Twisted-10.0.0/doc/core/examples/cred.py | 163 + .../doc/core/examples/dbcred.py | 179 + .../doc/core/examples/echoclient.py | 41 + .../doc/core/examples/echoclient_ssl.py | 46 + .../doc/core/examples/echoclient_udp.py | 38 + .../doc/core/examples/echoserv.py | 27 + .../doc/core/examples/echoserv_ssl.py | 30 + .../doc/core/examples/echoserv_udp.py | 19 + .../doc/core/examples/filewatch.py | 17 + .../doc/core/examples/ftpclient.py | 113 + .../doc/core/examples/ftpserver.py | 55 + .../doc/core/examples/gpsfix.py | 78 + .../doc/core/examples/index.html | 127 + .../doc/core/examples/longex.py | 66 + .../doc/core/examples/longex2.py | 101 + .../Twisted-10.0.0/doc/core/examples/mouse.py | 80 + .../doc/core/examples/pb_exceptions.py | 36 + .../doc/core/examples/pbbenchclient.py | 42 + .../doc/core/examples/pbbenchserver.py | 54 + .../doc/core/examples/pbecho.py | 51 + .../doc/core/examples/pbechoclient.py | 32 + .../doc/core/examples/pbgtk2.py | 122 + .../doc/core/examples/pbgtk2login.glade | 330 + .../doc/core/examples/pbinterop.py | 71 + .../doc/core/examples/pbsimple.py | 16 + .../doc/core/examples/pbsimpleclient.py | 18 + .../doc/core/examples/postfix.py | 29 + .../doc/core/examples/ptyserv.py | 32 + .../doc/core/examples/pyui_bg.png | Bin 0 -> 29913 bytes .../doc/core/examples/pyuidemo.py | 31 + .../doc/core/examples/rotatinglog.py | 26 + .../doc/core/examples/row_example.py | 105 + .../doc/core/examples/row_schema.sql | 65 + .../doc/core/examples/row_util.py | 103 + .../doc/core/examples/server.pem | 36 + .../doc/core/examples/shaper.py | 52 + .../doc/core/examples/shoutcast.py | 26 + .../doc/core/examples/simple.tac | 39 + .../doc/core/examples/simpleclient.py | 49 + .../doc/core/examples/simpleserv.py | 26 + .../Twisted-10.0.0/doc/core/examples/stdin.py | 30 + .../doc/core/examples/stdiodemo.py | 78 + .../doc/core/examples/testlogging.py | 41 + .../English.lproj/MainMenu.nib/classes.nib | 13 + .../English.lproj/MainMenu.nib/info.nib | 24 + .../MainMenu.nib/keyedobjects.nib | Bin 0 -> 14896 bytes .../Cocoa/SimpleWebClient/README.txt | 6 + .../Cocoa/SimpleWebClient/Twistzilla.py | 79 + .../Cocoa/SimpleWebClient/setup.py | 14 + .../doc/core/examples/threadedselect/README | 15 + .../examples/threadedselect/blockingdemo.py | 92 + .../examples/threadedselect/pygamedemo.py | 78 + .../doc/core/examples/twistd-logging.tac | 33 + .../doc/core/examples/wxacceptance.py | 113 + .../doc/core/examples/wxdemo.py | 64 + .../doc/core/howto/application.html | 376 ++ .../Twisted-10.0.0/doc/core/howto/basics.html | 99 + vendor/Twisted-10.0.0/doc/core/howto/book.tex | 116 + .../doc/core/howto/choosing-reactor.html | 355 + .../doc/core/howto/clients.html | 635 ++ .../doc/core/howto/components.html | 600 ++ .../Twisted-10.0.0/doc/core/howto/cred.html | 566 ++ .../doc/core/howto/debug-with-emacs.html | 65 + .../Twisted-10.0.0/doc/core/howto/defer.html | 840 +++ .../doc/core/howto/deferredindepth.html | 2183 +++++++ .../Twisted-10.0.0/doc/core/howto/design.html | 257 + .../Twisted-10.0.0/doc/core/howto/dirdbm.html | 77 + .../doc/core/howto/gendefer.html | 415 ++ .../doc/core/howto/glossary.html | 347 + .../doc/core/howto/howto.tidyrc | 6 + .../Twisted-10.0.0/doc/core/howto/index.html | 198 + .../doc/core/howto/internet-overview.html | 48 + .../howto/listings/TwistedQuotes/__init__.py | 3 + .../howto/listings/TwistedQuotes/pbquote.py | 10 + .../listings/TwistedQuotes/pbquoteclient.py | 32 + .../listings/TwistedQuotes/quoteproto.py | 36 + .../howto/listings/TwistedQuotes/quoters.py | 39 + .../howto/listings/TwistedQuotes/quotes.txt | 15 + .../howto/listings/TwistedQuotes/quotetap.py | 29 + .../howto/listings/TwistedQuotes/quotetap2.py | 36 + .../howto/listings/TwistedQuotes/webquote.rpy | 12 + .../howto/listings/application/service.tac | 34 + .../howto/listings/deferred/deferred_ex.py | 60 + .../howto/listings/deferred/deferred_ex1a.py | 73 + .../howto/listings/deferred/deferred_ex1b.py | 79 + .../howto/listings/deferred/deferred_ex2.py | 91 + .../howto/listings/deferred/deferred_ex3.py | 100 + .../howto/listings/deferred/deferred_ex4.py | 104 + .../howto/listings/deferred/deferred_ex5.py | 136 + .../howto/listings/deferred/deferred_ex6.py | 148 + .../howto/listings/deferred/deferred_ex7.py | 61 + .../howto/listings/deferred/deferred_ex8.py | 66 + .../listings/deferred/synch-validation.py | 5 + .../core/howto/listings/pb/cache_classes.py | 43 + .../core/howto/listings/pb/cache_receiver.py | 28 + .../core/howto/listings/pb/cache_sender.py | 50 + .../doc/core/howto/listings/pb/chatclient.py | 40 + .../doc/core/howto/listings/pb/chatserver.py | 65 + .../core/howto/listings/pb/copy2_classes.py | 29 + .../core/howto/listings/pb/copy2_receiver.py | 21 + .../core/howto/listings/pb/copy2_sender.py | 44 + .../core/howto/listings/pb/copy_receiver.tac | 41 + .../doc/core/howto/listings/pb/copy_sender.py | 57 + .../doc/core/howto/listings/pb/exc_client.py | 33 + .../doc/core/howto/listings/pb/exc_server.py | 32 + .../doc/core/howto/listings/pb/pb1client.py | 31 + .../doc/core/howto/listings/pb/pb1server.py | 20 + .../doc/core/howto/listings/pb/pb2client.py | 36 + .../doc/core/howto/listings/pb/pb2server.py | 30 + .../doc/core/howto/listings/pb/pb3client.py | 26 + .../doc/core/howto/listings/pb/pb3server.py | 16 + .../doc/core/howto/listings/pb/pb4client.py | 58 + .../doc/core/howto/listings/pb/pb5client.py | 22 + .../doc/core/howto/listings/pb/pb5server.py | 29 + .../doc/core/howto/listings/pb/pb6client1.py | 22 + .../doc/core/howto/listings/pb/pb6client2.py | 25 + .../doc/core/howto/listings/pb/pb6server.py | 30 + .../doc/core/howto/listings/pb/pb7client.py | 29 + .../core/howto/listings/pb/pbAnonClient.py | 70 + .../core/howto/listings/pb/pbAnonServer.py | 91 + .../doc/core/howto/listings/pb/trap_client.py | 88 + .../doc/core/howto/listings/pb/trap_server.py | 21 + .../core/howto/listings/process/process.py | 46 + .../doc/core/howto/listings/process/quotes.py | 25 + .../howto/listings/process/trueandfalse.py | 14 + .../howto/listings/udp/MulticastClient.py | 13 + .../howto/listings/udp/MulticastServer.py | 25 + .../doc/core/howto/logging.html | 181 + .../doc/core/howto/options.html | 533 ++ .../doc/core/howto/overview.html | 18 + .../doc/core/howto/pb-copyable.html | 1195 ++++ .../doc/core/howto/pb-cred.html | 1723 +++++ .../doc/core/howto/pb-intro.html | 320 + .../doc/core/howto/pb-usage.html | 1158 ++++ vendor/Twisted-10.0.0/doc/core/howto/pb.html | 52 + .../doc/core/howto/pclients.html | 364 ++ .../Twisted-10.0.0/doc/core/howto/plugin.html | 292 + .../doc/core/howto/process.html | 725 +++ .../doc/core/howto/producers.html | 88 + .../Twisted-10.0.0/doc/core/howto/quotes.html | 214 + .../Twisted-10.0.0/doc/core/howto/rdbms.html | 228 + .../doc/core/howto/reactor-basics.html | 92 + vendor/Twisted-10.0.0/doc/core/howto/row.html | 279 + .../doc/core/howto/servers.html | 429 ++ vendor/Twisted-10.0.0/doc/core/howto/ssl.html | 550 ++ .../doc/core/howto/stylesheet-unprocessed.css | 20 + .../doc/core/howto/stylesheet.css | 189 + vendor/Twisted-10.0.0/doc/core/howto/tap.html | 346 + .../Twisted-10.0.0/doc/core/howto/telnet.html | 83 + .../doc/core/howto/template.tpl | 23 + .../doc/core/howto/testing.html | 168 + .../doc/core/howto/threading.html | 213 + .../Twisted-10.0.0/doc/core/howto/time.html | 118 + .../doc/core/howto/tutorial/backends.html | 1207 ++++ .../doc/core/howto/tutorial/client.html | 260 + .../doc/core/howto/tutorial/components.html | 1068 +++ .../core/howto/tutorial/configuration.html | 792 +++ .../doc/core/howto/tutorial/factory.html | 633 ++ .../doc/core/howto/tutorial/index.html | 83 + .../doc/core/howto/tutorial/intro.html | 716 +++ .../doc/core/howto/tutorial/library.html | 269 + .../howto/tutorial/listings/finger/etc.users | 2 + .../listings/finger/finger/__init__.py | 3 + .../tutorial/listings/finger/finger/finger.py | 331 + .../tutorial/listings/finger/finger/tap.py | 20 + .../tutorial/listings/finger/finger01.py | 2 + .../tutorial/listings/finger/finger02.py | 10 + .../tutorial/listings/finger/finger03.py | 11 + .../tutorial/listings/finger/finger04.py | 12 + .../tutorial/listings/finger/finger05.py | 13 + .../tutorial/listings/finger/finger06.py | 18 + .../tutorial/listings/finger/finger07.py | 21 + .../tutorial/listings/finger/finger08.py | 30 + .../tutorial/listings/finger/finger09.py | 26 + .../tutorial/listings/finger/finger10.py | 30 + .../tutorial/listings/finger/finger11.tac | 34 + .../tutorial/listings/finger/finger12.tac | 55 + .../tutorial/listings/finger/finger13.tac | 59 + .../tutorial/listings/finger/finger14.tac | 55 + .../tutorial/listings/finger/finger15.tac | 76 + .../tutorial/listings/finger/finger16.tac | 91 + .../tutorial/listings/finger/finger17.tac | 91 + .../tutorial/listings/finger/finger18.tac | 137 + .../tutorial/listings/finger/finger19.tac | 238 + .../tutorial/listings/finger/finger19a.tac | 231 + .../listings/finger/finger19a_changes.py | 29 + .../tutorial/listings/finger/finger19b.tac | 257 + .../listings/finger/finger19b_changes.py | 19 + .../tutorial/listings/finger/finger19c.tac | 269 + .../listings/finger/finger19c_changes.py | 32 + .../tutorial/listings/finger/finger20.tac | 251 + .../tutorial/listings/finger/finger21.tac | 280 + .../tutorial/listings/finger/finger22.py | 297 + .../listings/finger/fingerPBclient.py | 26 + .../listings/finger/fingerXRclient.py | 5 + .../tutorial/listings/finger/finger_config.py | 38 + .../tutorial/listings/finger/fingerproxy.tac | 110 + .../listings/finger/organized-finger.tac | 31 + .../listings/finger/simple-finger.tac | 17 + .../finger/twisted/plugins/finger_tutorial.py | 5 + .../doc/core/howto/tutorial/pb.html | 650 ++ .../doc/core/howto/tutorial/protocol.html | 1055 +++ .../doc/core/howto/tutorial/style.html | 313 + .../doc/core/howto/tutorial/web.html | 537 ++ vendor/Twisted-10.0.0/doc/core/howto/udp.html | 275 + .../doc/core/howto/upgrading.html | 331 + .../Twisted-10.0.0/doc/core/howto/vision.html | 43 + .../doc/core/howto/website-template.tpl | 22 + .../doc/core/img/TwistedLogo.bmp | Bin 0 -> 55494 bytes .../doc/core/img/cred-login.dia | Bin 0 -> 2369 bytes .../doc/core/img/cred-login.png | Bin 0 -> 34148 bytes .../doc/core/img/deferred-attach.dia | Bin 0 -> 2234 bytes .../doc/core/img/deferred-attach.png | Bin 0 -> 9356 bytes .../doc/core/img/deferred-process.dia | Bin 0 -> 2099 bytes .../doc/core/img/deferred-process.png | Bin 0 -> 10809 bytes .../Twisted-10.0.0/doc/core/img/deferred.dia | Bin 0 -> 4348 bytes .../Twisted-10.0.0/doc/core/img/deferred.png | Bin 0 -> 33282 bytes .../doc/core/img/twisted-overview.dia | Bin 0 -> 5984 bytes .../doc/core/img/twisted-overview.png | Bin 0 -> 50929 bytes vendor/Twisted-10.0.0/doc/core/index.html | 33 + .../doc/core/man/manhole-man.html | 50 + vendor/Twisted-10.0.0/doc/core/man/manhole.1 | 16 + .../doc/core/man/mktap-man.html | 328 + vendor/Twisted-10.0.0/doc/core/man/mktap.1 | 219 + .../doc/core/man/pyhtmlizer-man.html | 51 + .../Twisted-10.0.0/doc/core/man/pyhtmlizer.1 | 22 + .../doc/core/man/tap2deb-man.html | 106 + vendor/Twisted-10.0.0/doc/core/man/tap2deb.1 | 57 + .../doc/core/man/tap2rpm-man.html | 107 + vendor/Twisted-10.0.0/doc/core/man/tap2rpm.1 | 58 + .../doc/core/man/tapconvert-man.html | 82 + .../Twisted-10.0.0/doc/core/man/tapconvert.1 | 40 + .../doc/core/man/trial-man.html | 194 + vendor/Twisted-10.0.0/doc/core/man/trial.1 | 132 + .../doc/core/man/twistd-man.html | 194 + vendor/Twisted-10.0.0/doc/core/man/twistd.1 | 123 + .../doc/core/specifications/banana.html | 199 + .../doc/core/specifications/index.html | 21 + .../doc/core/upgrades/2.0/components.html | 115 + .../doc/core/upgrades/2.0/index.html | 31 + .../doc/core/upgrades/2.0/split.html | 163 + .../doc/core/upgrades/index.html | 29 + vendor/Twisted-10.0.0/doc/fun/Twisted.Quotes | 5722 +++++++++++++++++ vendor/Twisted-10.0.0/doc/fun/lightbulb | 7 + vendor/Twisted-10.0.0/doc/fun/register.html | 77 + .../twisted-network-framework/errata.html | 256 + .../twisted-network-framework/index.html | 1568 +++++ .../historic/2003/europython/doanddont.html | 508 ++ .../doc/historic/2003/europython/index.html | 35 + .../doc/historic/2003/europython/lore.html | 502 ++ .../2003/europython/slides-template.tpl | 19 + .../historic/2003/europython/tw-deploy.html | 1106 ++++ .../doc/historic/2003/europython/twisted.html | 608 ++ .../historic/2003/europython/webclients.html | 482 ++ .../doc/historic/2003/haifux/haifux.html | 2235 +++++++ .../doc/historic/2003/haifux/notes.html | 60 + .../2003/pycon/applications/applications | 257 + .../2003/pycon/applications/applications.html | 343 + .../2003/pycon/applications/pynfo-chart.png | Bin 0 -> 13018 bytes .../doc/historic/2003/pycon/conch/conch | 98 + .../doc/historic/2003/pycon/conch/conch.html | 165 + .../historic/2003/pycon/conch/conchtalk.txt | 144 + .../2003/pycon/conch/smalltwisted.png | Bin 0 -> 1472 bytes .../historic/2003/pycon/conch/twistedlogo.png | Bin 0 -> 7256 bytes .../2003/pycon/deferex/deferex-bad-adding.py | 8 + .../2003/pycon/deferex/deferex-chaining.py | 13 + .../pycon/deferex/deferex-complex-failure.py | 30 + .../pycon/deferex/deferex-complex-raise.py | 12 + .../2003/pycon/deferex/deferex-forwarding.py | 9 + .../2003/pycon/deferex/deferex-listing0.py | 18 + .../2003/pycon/deferex/deferex-listing1.py | 6 + .../2003/pycon/deferex/deferex-listing2.py | 8 + .../pycon/deferex/deferex-simple-failure.py | 9 + .../pycon/deferex/deferex-simple-raise.py | 3 + .../historic/2003/pycon/deferex/deferex.html | 499 ++ .../historic/2003/pycon/deferex/deferexex.py | 16 + .../intrinsics-lightning/intrinsics-lightning | 97 + .../2003/pycon/lore/lore-presentation | 108 + .../historic/2003/pycon/lore/lore-slides.html | 187 + .../doc/historic/2003/pycon/lore/lore.html | 791 +++ .../doc/historic/2003/pycon/pb/pb-client1.py | 46 + .../doc/historic/2003/pycon/pb/pb-server1.py | 16 + .../doc/historic/2003/pycon/pb/pb-slides.py | 240 + .../doc/historic/2003/pycon/pb/pb.html | 966 +++ .../2003/pycon/releasing/releasing-twisted | 151 + .../2003/pycon/releasing/releasing.html | 491 ++ .../historic/2003/pycon/tw-deploy/tw-deploy | 184 + .../2003/pycon/tw-deploy/twisted-overview.png | Bin 0 -> 12722 bytes .../2003/pycon/tw-deploy/twistedlogo.png | Bin 0 -> 7256 bytes .../twisted-internet/twisted-internet.py | 541 ++ .../pycon/twisted-reality/componentized.svg | 254 + .../twisted-reality/twisted-reality.html | 578 ++ .../doc/historic/2004/ibm/talk.html | 495 ++ vendor/Twisted-10.0.0/doc/historic/index.html | 128 + .../doc/historic/ipc10errata.html | 256 + .../doc/historic/ipc10paper.html | 1568 +++++ .../doc/historic/stylesheet.css | 178 + .../doc/historic/template-notoc.tpl | 14 + .../Twisted-10.0.0/doc/historic/template.tpl | 20 + .../doc/historic/twisted-debian.html | 96 + .../doc/lore/examples/example.html | 60 + .../doc/lore/examples/index.html | 22 + .../doc/lore/examples/slides-template.tpl | 21 + .../doc/lore/howto/extend-lore.html | 425 ++ .../Twisted-10.0.0/doc/lore/howto/index.html | 23 + .../lore/howto/listings/lore/1st_example.html | 12 + .../lore/howto/listings/lore/a_lore_plugin.py | 11 + .../doc/lore/howto/listings/lore/factory.py-1 | 9 + .../doc/lore/howto/listings/lore/factory.py-2 | 20 + .../doc/lore/howto/listings/lore/factory.py-3 | 21 + .../lore/howto/listings/lore/spitters.py-1 | 18 + .../lore/howto/listings/lore/spitters.py-2 | 26 + .../Twisted-10.0.0/doc/lore/howto/lore.html | 366 ++ .../doc/lore/img/myhtml-output.png | Bin 0 -> 23124 bytes vendor/Twisted-10.0.0/doc/lore/index.html | 25 + .../Twisted-10.0.0/doc/lore/man/lore-man.html | 124 + vendor/Twisted-10.0.0/doc/lore/man/lore.1 | 74 + .../doc/mail/examples/emailserver.tac | 72 + .../doc/mail/examples/imap4client.py | 181 + .../doc/mail/examples/index.html | 35 + .../doc/mail/examples/smtpclient_tls.py | 157 + vendor/Twisted-10.0.0/doc/mail/index.html | 25 + .../doc/mail/man/mailmail-man.html | 55 + vendor/Twisted-10.0.0/doc/mail/man/mailmail.1 | 21 + .../mail/tutorial/smtpclient/smtpclient-1.tac | 3 + .../tutorial/smtpclient/smtpclient-10.tac | 56 + .../tutorial/smtpclient/smtpclient-11.tac | 58 + .../mail/tutorial/smtpclient/smtpclient-2.tac | 10 + .../mail/tutorial/smtpclient/smtpclient-3.tac | 10 + .../mail/tutorial/smtpclient/smtpclient-4.tac | 12 + .../mail/tutorial/smtpclient/smtpclient-5.tac | 14 + .../mail/tutorial/smtpclient/smtpclient-6.tac | 18 + .../mail/tutorial/smtpclient/smtpclient-7.tac | 46 + .../mail/tutorial/smtpclient/smtpclient-8.tac | 49 + .../mail/tutorial/smtpclient/smtpclient-9.tac | 53 + .../mail/tutorial/smtpclient/smtpclient.html | 752 +++ .../mail/tutorial/smtpserver/smtpserver-1.tac | 3 + .../mail/tutorial/smtpserver/smtpserver-2.tac | 10 + .../mail/tutorial/smtpserver/smtpserver-3.tac | 12 + .../mail/tutorial/smtpserver/smtpserver-4.tac | 14 + .../mail/tutorial/smtpserver/smtpserver-5.tac | 50 + .../mail/tutorial/smtpserver/smtpserver-6.tac | 57 + .../mail/tutorial/smtpserver/smtpserver-7.tac | 57 + .../mail/tutorial/smtpserver/smtpserver-8.tac | 63 + .../doc/names/examples/dns-service.py | 36 + .../doc/names/examples/gethostbyname.py | 21 + .../doc/names/examples/index.html | 24 + .../doc/names/examples/testdns.py | 38 + .../Twisted-10.0.0/doc/names/howto/index.html | 22 + .../howto/listings/names/example-domain.com | 37 + .../Twisted-10.0.0/doc/names/howto/names.html | 134 + vendor/Twisted-10.0.0/doc/names/index.html | 25 + .../doc/pair/examples/index.html | 23 + .../doc/pair/examples/pairudp.py | 18 + .../Twisted-10.0.0/doc/pair/howto/index.html | 27 + .../doc/pair/howto/twisted-pair.html | 79 + vendor/Twisted-10.0.0/doc/pair/index.html | 23 + .../doc/web/examples/advogato.py | 45 + .../Twisted-10.0.0/doc/web/examples/dlpage.py | 9 + .../doc/web/examples/fortune.rpy.py | 17 + .../doc/web/examples/getpage.py | 9 + .../Twisted-10.0.0/doc/web/examples/google.py | 9 + .../doc/web/examples/hello.rpy.py | 28 + .../doc/web/examples/httpclient.py | 54 + .../doc/web/examples/index.html | 96 + .../Twisted-10.0.0/doc/web/examples/lj.rpy.py | 35 + .../Twisted-10.0.0/doc/web/examples/proxy.py | 11 + .../doc/web/examples/report.rpy.py | 28 + .../doc/web/examples/rootscript.py | 9 + .../doc/web/examples/silly-web.py | 18 + .../doc/web/examples/simple.rtl | 23 + .../Twisted-10.0.0/doc/web/examples/soap.py | 41 + .../doc/web/examples/users.rpy.py | 18 + .../doc/web/examples/vhost.rpy.py | 4 + vendor/Twisted-10.0.0/doc/web/examples/web.py | 27 + .../doc/web/examples/webguard.py | 54 + .../Twisted-10.0.0/doc/web/examples/xmlrpc.py | 67 + .../doc/web/examples/xmlrpcclient.py | 23 + .../Twisted-10.0.0/doc/web/howto/client.html | 469 ++ .../doc/web/howto/formindepth.html | 20 + .../doc/web/howto/glossary.html | 42 + .../Twisted-10.0.0/doc/web/howto/index.html | 50 + .../doc/web/howto/listings/client/request.py | 21 + .../doc/web/howto/listings/client/response.py | 47 + .../doc/web/howto/listings/client/sendbody.py | 24 + .../web/howto/listings/client/stringprod.py | 21 + .../doc/web/howto/listings/soap.rpy | 13 + .../doc/web/howto/listings/webquote.rtl | 20 + .../doc/web/howto/listings/xmlAndSoapQuote.py | 25 + .../doc/web/howto/listings/xmlquote.rpy | 12 + .../doc/web/howto/resource-templates.html | 103 + .../doc/web/howto/using-twistedweb.html | 972 +++ .../doc/web/howto/web-development.html | 106 + .../web-in-60/asynchronous-deferred.html | 161 + .../doc/web/howto/web-in-60/asynchronous.html | 121 + .../doc/web/howto/web-in-60/custom-codes.html | 118 + .../web/howto/web-in-60/dynamic-content.html | 120 + .../web/howto/web-in-60/dynamic-dispatch.html | 142 + .../web/howto/web-in-60/error-handling.html | 129 + .../web/howto/web-in-60/handling-posts.html | 137 + .../doc/web/howto/web-in-60/http-auth.html | 250 + .../doc/web/howto/web-in-60/index.html | 44 + .../doc/web/howto/web-in-60/interrupted.html | 141 + .../web/howto/web-in-60/logging-errors.html | 104 + .../doc/web/howto/web-in-60/rpy-scripts.html | 86 + .../web/howto/web-in-60/session-basics.html | 120 + .../web/howto/web-in-60/session-endings.html | 170 + .../web/howto/web-in-60/session-store.html | 180 + .../web/howto/web-in-60/static-content.html | 102 + .../web/howto/web-in-60/static-dispatch.html | 118 + .../doc/web/howto/web-in-60/wsgi.html | 123 + .../doc/web/howto/web-overview.html | 67 + .../Twisted-10.0.0/doc/web/howto/xmlrpc.html | 457 ++ .../Twisted-10.0.0/doc/web/img/controller.png | Bin 0 -> 14934 bytes .../Twisted-10.0.0/doc/web/img/livepage.png | Bin 0 -> 9363 bytes vendor/Twisted-10.0.0/doc/web/img/model.png | Bin 0 -> 14971 bytes .../doc/web/img/plone_root_model.png | Bin 0 -> 11214 bytes vendor/Twisted-10.0.0/doc/web/img/view.png | Bin 0 -> 14703 bytes .../doc/web/img/web-overview.dia | Bin 0 -> 1630 bytes .../doc/web/img/web-overview.png | Bin 0 -> 7330 bytes .../doc/web/img/web-process.png | Bin 0 -> 30404 bytes .../doc/web/img/web-process.svg | 594 ++ .../doc/web/img/web-session.png | Bin 0 -> 8966 bytes .../doc/web/img/web-widgets.dia | Bin 0 -> 1326 bytes .../doc/web/img/web-widgets.png | Bin 0 -> 3147 bytes vendor/Twisted-10.0.0/doc/web/index.html | 25 + .../doc/words/examples/aimbot.py | 62 + .../doc/words/examples/cursesclient.py | 188 + .../doc/words/examples/index.html | 30 + .../doc/words/examples/ircLogBot.py | 156 + .../doc/words/examples/jabber_client.py | 29 + .../doc/words/examples/minchat.py | 126 + .../doc/words/examples/msn_example.py | 67 + .../doc/words/examples/oscardemo.py | 100 + .../doc/words/examples/pb_client.py | 102 + .../doc/words/examples/xmpp_client.py | 82 + vendor/Twisted-10.0.0/doc/words/howto/im.html | 115 + .../Twisted-10.0.0/doc/words/howto/index.html | 22 + vendor/Twisted-10.0.0/doc/words/index.html | 25 + .../Twisted-10.0.0/doc/words/man/im-man.html | 50 + vendor/Twisted-10.0.0/doc/words/man/im.1 | 16 + vendor/Twisted-10.0.0/setup.py | 100 + vendor/Twisted-10.0.0/twisted/__init__.py | 24 + vendor/Twisted-10.0.0/twisted/_version.py | 3 + .../twisted/application/__init__.py | 7 + .../Twisted-10.0.0/twisted/application/app.py | 730 +++ .../twisted/application/internet.py | 270 + .../twisted/application/reactors.py | 83 + .../twisted/application/service.py | 398 ++ .../twisted/application/strports.py | 200 + .../Twisted-10.0.0/twisted/conch/__init__.py | 18 + .../Twisted-10.0.0/twisted/conch/_version.py | 3 + vendor/Twisted-10.0.0/twisted/conch/avatar.py | 37 + .../Twisted-10.0.0/twisted/conch/checkers.py | 266 + .../twisted/conch/client/__init__.py | 9 + .../twisted/conch/client/agent.py | 73 + .../twisted/conch/client/connect.py | 21 + .../twisted/conch/client/default.py | 256 + .../twisted/conch/client/direct.py | 107 + .../twisted/conch/client/knownhosts.py | 474 ++ .../twisted/conch/client/options.py | 90 + vendor/Twisted-10.0.0/twisted/conch/error.py | 102 + .../twisted/conch/insults/__init__.py | 4 + .../twisted/conch/insults/client.py | 138 + .../twisted/conch/insults/colors.py | 29 + .../twisted/conch/insults/helper.py | 450 ++ .../twisted/conch/insults/insults.py | 1087 ++++ .../twisted/conch/insults/text.py | 186 + .../twisted/conch/insults/window.py | 864 +++ .../twisted/conch/interfaces.py | 402 ++ vendor/Twisted-10.0.0/twisted/conch/ls.py | 60 + .../Twisted-10.0.0/twisted/conch/manhole.py | 336 + .../twisted/conch/manhole_ssh.py | 146 + .../twisted/conch/manhole_tap.py | 128 + vendor/Twisted-10.0.0/twisted/conch/mixin.py | 49 + .../twisted/conch/openssh_compat/__init__.py | 11 + .../twisted/conch/openssh_compat/factory.py | 73 + .../twisted/conch/openssh_compat/primes.py | 26 + .../Twisted-10.0.0/twisted/conch/recvline.py | 328 + .../twisted/conch/scripts/__init__.py | 1 + .../twisted/conch/scripts/cftp.py | 811 +++ .../twisted/conch/scripts/ckeygen.py | 188 + .../twisted/conch/scripts/conch.py | 510 ++ .../twisted/conch/scripts/tkconch.py | 572 ++ .../twisted/conch/ssh/__init__.py | 10 + .../Twisted-10.0.0/twisted/conch/ssh/agent.py | 294 + .../Twisted-10.0.0/twisted/conch/ssh/asn1.py | 34 + .../twisted/conch/ssh/channel.py | 281 + .../twisted/conch/ssh/common.py | 130 + .../twisted/conch/ssh/connection.py | 613 ++ .../twisted/conch/ssh/factory.py | 131 + .../twisted/conch/ssh/filetransfer.py | 927 +++ .../twisted/conch/ssh/forwarding.py | 181 + .../Twisted-10.0.0/twisted/conch/ssh/keys.py | 941 +++ .../twisted/conch/ssh/service.py | 48 + .../twisted/conch/ssh/session.py | 310 + .../Twisted-10.0.0/twisted/conch/ssh/sexpy.py | 42 + .../twisted/conch/ssh/transport.py | 1404 ++++ .../twisted/conch/ssh/userauth.py | 846 +++ vendor/Twisted-10.0.0/twisted/conch/stdio.py | 95 + vendor/Twisted-10.0.0/twisted/conch/tap.py | 48 + vendor/Twisted-10.0.0/twisted/conch/telnet.py | 1017 +++ .../twisted/conch/test/__init__.py | 1 + .../twisted/conch/test/keydata.py | 174 + .../twisted/conch/test/test_agent.py | 399 ++ .../twisted/conch/test/test_cftp.py | 881 +++ .../twisted/conch/test/test_channel.py | 279 + .../twisted/conch/test/test_checkers.py | 280 + .../twisted/conch/test/test_ckeygen.py | 80 + .../twisted/conch/test/test_conch.py | 437 ++ .../twisted/conch/test/test_connection.py | 623 ++ .../twisted/conch/test/test_default.py | 171 + .../twisted/conch/test/test_filetransfer.py | 677 ++ .../twisted/conch/test/test_helper.py | 560 ++ .../twisted/conch/test/test_insults.py | 460 ++ .../twisted/conch/test/test_keys.py | 961 +++ .../twisted/conch/test/test_knownhosts.py | 979 +++ .../twisted/conch/test/test_manhole.py | 348 + .../twisted/conch/test/test_mixin.py | 47 + .../twisted/conch/test/test_openssh_compat.py | 102 + .../twisted/conch/test/test_recvline.py | 649 ++ .../twisted/conch/test/test_session.py | 1210 ++++ .../twisted/conch/test/test_ssh.py | 886 +++ .../twisted/conch/test/test_tap.py | 95 + .../twisted/conch/test/test_telnet.py | 710 ++ .../twisted/conch/test/test_text.py | 101 + .../twisted/conch/test/test_transport.py | 1953 ++++++ .../twisted/conch/test/test_userauth.py | 1062 +++ .../twisted/conch/test/test_window.py | 49 + .../twisted/conch/topfiles/NEWS | 206 + .../twisted/conch/topfiles/README | 4 + .../twisted/conch/topfiles/setup.py | 48 + .../Twisted-10.0.0/twisted/conch/ttymodes.py | 121 + .../twisted/conch/ui/__init__.py | 11 + .../Twisted-10.0.0/twisted/conch/ui/ansi.py | 240 + .../twisted/conch/ui/tkvt100.py | 197 + vendor/Twisted-10.0.0/twisted/conch/unix.py | 457 ++ vendor/Twisted-10.0.0/twisted/copyright.py | 39 + .../Twisted-10.0.0/twisted/cred/__init__.py | 13 + vendor/Twisted-10.0.0/twisted/cred/_digest.py | 129 + .../Twisted-10.0.0/twisted/cred/checkers.py | 268 + .../twisted/cred/credentials.py | 483 ++ vendor/Twisted-10.0.0/twisted/cred/error.py | 41 + vendor/Twisted-10.0.0/twisted/cred/pamauth.py | 79 + vendor/Twisted-10.0.0/twisted/cred/portal.py | 121 + vendor/Twisted-10.0.0/twisted/cred/strcred.py | 270 + vendor/Twisted-10.0.0/twisted/cred/util.py | 46 + .../twisted/enterprise/__init__.py | 9 + .../twisted/enterprise/adbapi.py | 488 ++ .../twisted/enterprise/reflector.py | 167 + .../Twisted-10.0.0/twisted/enterprise/row.py | 127 + .../twisted/enterprise/sqlreflector.py | 327 + .../Twisted-10.0.0/twisted/enterprise/util.py | 200 + .../twisted/internet/__init__.py | 12 + .../twisted/internet/_baseprocess.py | 62 + .../twisted/internet/_dumbwin32proc.py | 340 + .../twisted/internet/_javaserialport.py | 78 + .../twisted/internet/_pollingfile.py | 279 + .../twisted/internet/_posixserialport.py | 60 + .../twisted/internet/_posixstdio.py | 173 + .../twisted/internet/_sslverify.py | 748 +++ .../twisted/internet/_threadedselect.py | 362 ++ .../twisted/internet/_win32serialport.py | 112 + .../twisted/internet/_win32stdio.py | 124 + .../twisted/internet/abstract.py | 378 ++ .../twisted/internet/address.py | 113 + .../Twisted-10.0.0/twisted/internet/base.py | 1191 ++++ .../twisted/internet/cfreactor.py | 342 + .../twisted/internet/cfsupport/cfdate.pxi | 2 + .../twisted/internet/cfsupport/cfdecl.pxi | 227 + .../twisted/internet/cfsupport/cfrunloop.pxi | 104 + .../twisted/internet/cfsupport/cfsocket.pxi | 111 + .../twisted/internet/cfsupport/cfsupport.c | 2136 ++++++ .../twisted/internet/cfsupport/cfsupport.pyx | 6 + .../twisted/internet/cfsupport/python.pxi | 5 + .../twisted/internet/cfsupport/setup.py | 50 + .../twisted/internet/default.py | 21 + .../Twisted-10.0.0/twisted/internet/defer.py | 1264 ++++ .../twisted/internet/epollreactor.py | 235 + .../Twisted-10.0.0/twisted/internet/error.py | 319 + .../Twisted-10.0.0/twisted/internet/fdesc.py | 118 + .../twisted/internet/glib2reactor.py | 49 + .../twisted/internet/gtk2reactor.py | 377 ++ .../twisted/internet/gtkreactor.py | 232 + .../twisted/internet/interfaces.py | 1693 +++++ .../twisted/internet/iocpreactor/__init__.py | 10 + .../twisted/internet/iocpreactor/abstract.py | 456 ++ .../twisted/internet/iocpreactor/build.bat | 4 + .../twisted/internet/iocpreactor/const.py | 26 + .../internet/iocpreactor/interfaces.py | 33 + .../iocpreactor/iocpsupport/acceptex.pxi | 38 + .../iocpreactor/iocpsupport/connectex.pxi | 34 + .../iocpreactor/iocpsupport/iocpsupport.c | 2003 ++++++ .../iocpreactor/iocpsupport/iocpsupport.pyx | 250 + .../iocpsupport/winsock_pointers.c | 62 + .../iocpsupport/winsock_pointers.h | 51 + .../iocpreactor/iocpsupport/wsarecv.pxi | 61 + .../iocpreactor/iocpsupport/wsasend.pxi | 27 + .../twisted/internet/iocpreactor/notes.txt | 24 + .../twisted/internet/iocpreactor/reactor.py | 267 + .../twisted/internet/iocpreactor/setup.py | 23 + .../twisted/internet/iocpreactor/tcp.py | 639 ++ .../twisted/internet/iocpreactor/udp.py | 389 ++ .../twisted/internet/kqreactor.py | 221 + .../Twisted-10.0.0/twisted/internet/main.py | 28 + .../twisted/internet/pollreactor.py | 208 + .../twisted/internet/posixbase.py | 417 ++ .../twisted/internet/process.py | 931 +++ .../twisted/internet/protocol.py | 699 ++ .../twisted/internet/pyuisupport.py | 37 + .../twisted/internet/qtreactor.py | 19 + .../twisted/internet/reactor.py | 38 + .../twisted/internet/selectreactor.py | 204 + .../twisted/internet/serialport.py | 65 + vendor/Twisted-10.0.0/twisted/internet/ssl.py | 233 + .../Twisted-10.0.0/twisted/internet/stdio.py | 32 + .../Twisted-10.0.0/twisted/internet/task.py | 750 +++ vendor/Twisted-10.0.0/twisted/internet/tcp.py | 1019 +++ .../twisted/internet/test/__init__.py | 6 + .../twisted/internet/test/inlinecb_tests.py | 92 + .../twisted/internet/test/process_helper.py | 33 + .../twisted/internet/test/reactormixins.py | 193 + .../twisted/internet/test/test_base.py | 179 + .../twisted/internet/test/test_baseprocess.py | 73 + .../twisted/internet/test/test_core.py | 275 + .../twisted/internet/test/test_fdset.py | 209 + .../twisted/internet/test/test_inlinecb.py | 13 + .../twisted/internet/test/test_iocp.py | 105 + .../twisted/internet/test/test_pollingfile.py | 39 + .../twisted/internet/test/test_posixbase.py | 259 + .../twisted/internet/test/test_process.py | 475 ++ .../twisted/internet/test/test_qtreactor.py | 35 + .../twisted/internet/test/test_tcp.py | 143 + .../twisted/internet/test/test_threads.py | 163 + .../twisted/internet/test/test_time.py | 26 + .../twisted/internet/test/test_tls.py | 163 + .../twisted/internet/test/test_unix.py | 137 + .../twisted/internet/threads.py | 117 + .../twisted/internet/tksupport.py | 68 + vendor/Twisted-10.0.0/twisted/internet/udp.py | 297 + .../Twisted-10.0.0/twisted/internet/unix.py | 297 + .../Twisted-10.0.0/twisted/internet/utils.py | 219 + .../twisted/internet/win32eventreactor.py | 244 + .../twisted/internet/wxreactor.py | 181 + .../twisted/internet/wxsupport.py | 61 + .../Twisted-10.0.0/twisted/lore/__init__.py | 21 + .../Twisted-10.0.0/twisted/lore/_version.py | 3 + vendor/Twisted-10.0.0/twisted/lore/default.py | 56 + vendor/Twisted-10.0.0/twisted/lore/docbook.py | 68 + .../Twisted-10.0.0/twisted/lore/htmlbook.py | 47 + vendor/Twisted-10.0.0/twisted/lore/indexer.py | 50 + vendor/Twisted-10.0.0/twisted/lore/latex.py | 463 ++ vendor/Twisted-10.0.0/twisted/lore/lint.py | 204 + vendor/Twisted-10.0.0/twisted/lore/lmath.py | 85 + .../Twisted-10.0.0/twisted/lore/man2lore.py | 295 + .../Twisted-10.0.0/twisted/lore/numberer.py | 33 + vendor/Twisted-10.0.0/twisted/lore/process.py | 120 + .../twisted/lore/scripts/__init__.py | 1 + .../twisted/lore/scripts/lore.py | 159 + vendor/Twisted-10.0.0/twisted/lore/slides.py | 359 ++ .../Twisted-10.0.0/twisted/lore/template.mgp | 24 + .../twisted/lore/test/__init__.py | 1 + .../lore/test/lore_index_file_out.html | 2 + .../test/lore_index_file_out_multiple.html | 5 + .../test/lore_index_file_unnumbered_out.html | 2 + .../twisted/lore/test/lore_index_test.xhtml | 21 + .../twisted/lore/test/lore_index_test2.xhtml | 22 + .../lore/test/lore_numbering_test_out.html | 2 + .../lore/test/lore_numbering_test_out2.html | 2 + .../twisted/lore/test/simple.html | 9 + .../twisted/lore/test/simple3.html | 9 + .../twisted/lore/test/simple4.html | 9 + .../twisted/lore/test/template.tpl | 13 + .../twisted/lore/test/test_docbook.py | 35 + .../twisted/lore/test/test_latex.py | 146 + .../twisted/lore/test/test_lint.py | 132 + .../twisted/lore/test/test_lmath.py | 53 + .../twisted/lore/test/test_lore.py | 1228 ++++ .../twisted/lore/test/test_man2lore.py | 169 + .../twisted/lore/test/test_slides.py | 85 + vendor/Twisted-10.0.0/twisted/lore/texi.py | 109 + .../Twisted-10.0.0/twisted/lore/topfiles/NEWS | 103 + .../twisted/lore/topfiles/README | 3 + .../twisted/lore/topfiles/setup.py | 29 + vendor/Twisted-10.0.0/twisted/lore/tree.py | 1152 ++++ .../twisted/lore/xhtml-lat1.ent | 196 + .../twisted/lore/xhtml-special.ent | 80 + .../twisted/lore/xhtml-symbol.ent | 237 + .../twisted/lore/xhtml1-strict.dtd | 978 +++ .../twisted/lore/xhtml1-transitional.dtd | 1201 ++++ .../Twisted-10.0.0/twisted/mail/__init__.py | 15 + .../Twisted-10.0.0/twisted/mail/_version.py | 3 + vendor/Twisted-10.0.0/twisted/mail/alias.py | 435 ++ vendor/Twisted-10.0.0/twisted/mail/bounce.py | 61 + vendor/Twisted-10.0.0/twisted/mail/imap4.py | 5670 ++++++++++++++++ vendor/Twisted-10.0.0/twisted/mail/mail.py | 333 + vendor/Twisted-10.0.0/twisted/mail/maildir.py | 517 ++ vendor/Twisted-10.0.0/twisted/mail/pb.py | 115 + vendor/Twisted-10.0.0/twisted/mail/pop3.py | 1072 +++ .../Twisted-10.0.0/twisted/mail/pop3client.py | 706 ++ .../Twisted-10.0.0/twisted/mail/protocols.py | 225 + vendor/Twisted-10.0.0/twisted/mail/relay.py | 114 + .../twisted/mail/relaymanager.py | 631 ++ .../twisted/mail/scripts/__init__.py | 1 + .../twisted/mail/scripts/mailmail.py | 360 ++ vendor/Twisted-10.0.0/twisted/mail/smtp.py | 2023 ++++++ vendor/Twisted-10.0.0/twisted/mail/tap.py | 185 + .../twisted/mail/test/__init__.py | 1 + .../twisted/mail/test/pop3testserver.py | 314 + .../twisted/mail/test/rfc822.message | 86 + .../twisted/mail/test/test_bounce.py | 32 + .../twisted/mail/test/test_imap.py | 4244 ++++++++++++ .../twisted/mail/test/test_mail.py | 1968 ++++++ .../twisted/mail/test/test_mailmail.py | 75 + .../twisted/mail/test/test_options.py | 44 + .../twisted/mail/test/test_pop3.py | 1069 +++ .../twisted/mail/test/test_pop3client.py | 573 ++ .../twisted/mail/test/test_smtp.py | 1530 +++++ .../Twisted-10.0.0/twisted/mail/topfiles/NEWS | 191 + .../twisted/mail/topfiles/README | 3 + .../twisted/mail/topfiles/setup.py | 50 + .../twisted/manhole/__init__.py | 8 + .../twisted/manhole/_inspectro.py | 369 ++ .../twisted/manhole/explorer.py | 655 ++ .../twisted/manhole/gladereactor.glade | 342 + .../twisted/manhole/gladereactor.py | 219 + .../twisted/manhole/inspectro.glade | 510 ++ .../twisted/manhole/logview.glade | 39 + .../Twisted-10.0.0/twisted/manhole/service.py | 399 ++ .../Twisted-10.0.0/twisted/manhole/telnet.py | 117 + .../twisted/manhole/ui/__init__.py | 7 + .../twisted/manhole/ui/gtk2manhole.glade | 268 + .../twisted/manhole/ui/gtk2manhole.py | 375 ++ .../twisted/manhole/ui/test/__init__.py | 4 + .../manhole/ui/test/test_gtk2manhole.py | 48 + .../Twisted-10.0.0/twisted/names/__init__.py | 7 + .../Twisted-10.0.0/twisted/names/_version.py | 3 + .../Twisted-10.0.0/twisted/names/authority.py | 322 + vendor/Twisted-10.0.0/twisted/names/cache.py | 96 + vendor/Twisted-10.0.0/twisted/names/client.py | 928 +++ vendor/Twisted-10.0.0/twisted/names/common.py | 265 + vendor/Twisted-10.0.0/twisted/names/dns.py | 1822 ++++++ vendor/Twisted-10.0.0/twisted/names/error.py | 95 + vendor/Twisted-10.0.0/twisted/names/hosts.py | 61 + .../Twisted-10.0.0/twisted/names/resolve.py | 59 + vendor/Twisted-10.0.0/twisted/names/root.py | 446 ++ .../Twisted-10.0.0/twisted/names/secondary.py | 102 + vendor/Twisted-10.0.0/twisted/names/server.py | 205 + .../twisted/names/srvconnect.py | 186 + vendor/Twisted-10.0.0/twisted/names/tap.py | 119 + .../twisted/names/test/__init__.py | 1 + .../twisted/names/test/test_cache.py | 14 + .../twisted/names/test/test_client.py | 655 ++ .../twisted/names/test/test_common.py | 71 + .../twisted/names/test/test_dns.py | 1200 ++++ .../twisted/names/test/test_names.py | 752 +++ .../twisted/names/test/test_rootresolve.py | 705 ++ .../twisted/names/test/test_srvconnect.py | 133 + .../twisted/names/topfiles/NEWS | 131 + .../twisted/names/topfiles/README | 3 + .../twisted/names/topfiles/setup.py | 50 + .../Twisted-10.0.0/twisted/news/__init__.py | 11 + .../Twisted-10.0.0/twisted/news/_version.py | 3 + .../Twisted-10.0.0/twisted/news/database.py | 998 +++ vendor/Twisted-10.0.0/twisted/news/news.py | 90 + vendor/Twisted-10.0.0/twisted/news/nntp.py | 1069 +++ vendor/Twisted-10.0.0/twisted/news/tap.py | 134 + .../twisted/news/test/__init__.py | 1 + .../twisted/news/test/test_news.py | 107 + .../twisted/news/test/test_nntp.py | 124 + .../Twisted-10.0.0/twisted/news/topfiles/NEWS | 54 + .../twisted/news/topfiles/README | 4 + .../twisted/news/topfiles/setup.py | 28 + .../Twisted-10.0.0/twisted/pair/__init__.py | 20 + .../Twisted-10.0.0/twisted/pair/_version.py | 3 + .../Twisted-10.0.0/twisted/pair/ethernet.py | 56 + vendor/Twisted-10.0.0/twisted/pair/ip.py | 72 + vendor/Twisted-10.0.0/twisted/pair/raw.py | 35 + vendor/Twisted-10.0.0/twisted/pair/rawudp.py | 55 + .../twisted/pair/test/__init__.py | 1 + .../twisted/pair/test/test_ethernet.py | 226 + .../twisted/pair/test/test_ip.py | 417 ++ .../twisted/pair/test/test_rawudp.py | 327 + .../Twisted-10.0.0/twisted/pair/topfiles/NEWS | 20 + .../twisted/pair/topfiles/README | 1 + .../twisted/pair/topfiles/setup.py | 28 + vendor/Twisted-10.0.0/twisted/pair/tuntap.py | 170 + .../twisted/persisted/__init__.py | 10 + .../Twisted-10.0.0/twisted/persisted/aot.py | 560 ++ .../twisted/persisted/crefutil.py | 167 + .../twisted/persisted/dirdbm.py | 358 ++ .../twisted/persisted/journal/__init__.py | 10 + .../twisted/persisted/journal/base.py | 226 + .../twisted/persisted/journal/picklelog.py | 48 + .../twisted/persisted/journal/rowjournal.py | 99 + .../Twisted-10.0.0/twisted/persisted/sob.py | 227 + .../twisted/persisted/styles.py | 257 + vendor/Twisted-10.0.0/twisted/plugin.py | 246 + .../twisted/plugins/__init__.py | 17 + .../twisted/plugins/cred_anonymous.py | 40 + .../twisted/plugins/cred_file.py | 60 + .../twisted/plugins/cred_memory.py | 68 + .../twisted/plugins/cred_unix.py | 138 + .../twisted/plugins/twisted_conch.py | 18 + .../twisted/plugins/twisted_ftp.py | 10 + .../twisted/plugins/twisted_inet.py | 10 + .../twisted/plugins/twisted_lore.py | 38 + .../twisted/plugins/twisted_mail.py | 10 + .../twisted/plugins/twisted_manhole.py | 10 + .../twisted/plugins/twisted_names.py | 10 + .../twisted/plugins/twisted_news.py | 10 + .../twisted/plugins/twisted_portforward.py | 10 + .../twisted/plugins/twisted_qtstub.py | 45 + .../twisted/plugins/twisted_reactors.py | 38 + .../twisted/plugins/twisted_socks.py | 10 + .../twisted/plugins/twisted_telnet.py | 10 + .../twisted/plugins/twisted_trial.py | 59 + .../twisted/plugins/twisted_web.py | 11 + .../twisted/plugins/twisted_words.py | 48 + .../twisted/protocols/__init__.py | 7 + .../twisted/protocols/_c_urlarg.c | 147 + .../Twisted-10.0.0/twisted/protocols/amp.py | 2394 +++++++ .../Twisted-10.0.0/twisted/protocols/basic.py | 519 ++ .../Twisted-10.0.0/twisted/protocols/dict.py | 362 ++ .../twisted/protocols/finger.py | 43 + .../Twisted-10.0.0/twisted/protocols/ftp.py | 2814 ++++++++ .../twisted/protocols/gps/__init__.py | 1 + .../twisted/protocols/gps/nmea.py | 209 + .../twisted/protocols/gps/rockwell.py | 268 + .../Twisted-10.0.0/twisted/protocols/htb.py | 269 + .../Twisted-10.0.0/twisted/protocols/ident.py | 227 + .../twisted/protocols/loopback.py | 397 ++ .../twisted/protocols/memcache.py | 758 +++ .../twisted/protocols/mice/__init__.py | 1 + .../twisted/protocols/mice/mouseman.py | 127 + .../Twisted-10.0.0/twisted/protocols/pcp.py | 204 + .../twisted/protocols/policies.py | 645 ++ .../twisted/protocols/portforward.py | 76 + .../twisted/protocols/postfix.py | 112 + .../twisted/protocols/shoutcast.py | 111 + .../Twisted-10.0.0/twisted/protocols/sip.py | 1334 ++++ .../Twisted-10.0.0/twisted/protocols/socks.py | 240 + .../twisted/protocols/stateful.py | 52 + .../twisted/protocols/telnet.py | 325 + .../twisted/protocols/test/__init__.py | 6 + .../twisted/protocols/test/test_tls.py | 566 ++ .../Twisted-10.0.0/twisted/protocols/tls.py | 345 + .../Twisted-10.0.0/twisted/protocols/wire.py | 90 + .../Twisted-10.0.0/twisted/python/__init__.py | 13 + vendor/Twisted-10.0.0/twisted/python/_epoll.c | 925 +++ .../Twisted-10.0.0/twisted/python/_epoll.pyx | 181 + .../twisted/python/_initgroups.c | 66 + .../Twisted-10.0.0/twisted/python/_release.py | 1265 ++++ .../twisted/python/_twisted_zsh_stub | 89 + .../Twisted-10.0.0/twisted/python/compat.py | 173 + .../twisted/python/components.py | 448 ++ .../Twisted-10.0.0/twisted/python/context.py | 90 + .../twisted/python/deprecate.py | 375 ++ .../Twisted-10.0.0/twisted/python/dispatch.py | 42 + vendor/Twisted-10.0.0/twisted/python/dist.py | 361 ++ .../twisted/python/dxprofile.py | 56 + .../Twisted-10.0.0/twisted/python/failure.py | 557 ++ .../Twisted-10.0.0/twisted/python/fakepwd.py | 112 + .../Twisted-10.0.0/twisted/python/filepath.py | 802 +++ .../Twisted-10.0.0/twisted/python/finalize.py | 46 + .../twisted/python/formmethod.py | 363 ++ .../Twisted-10.0.0/twisted/python/hashlib.py | 24 + vendor/Twisted-10.0.0/twisted/python/hook.py | 177 + .../Twisted-10.0.0/twisted/python/htmlizer.py | 91 + .../Twisted-10.0.0/twisted/python/lockfile.py | 212 + vendor/Twisted-10.0.0/twisted/python/log.py | 665 ++ .../Twisted-10.0.0/twisted/python/logfile.py | 324 + .../Twisted-10.0.0/twisted/python/modules.py | 747 +++ .../Twisted-10.0.0/twisted/python/monkey.py | 73 + vendor/Twisted-10.0.0/twisted/python/otp.py | 496 ++ .../twisted/python/procutils.py | 45 + .../twisted/python/randbytes.py | 177 + .../Twisted-10.0.0/twisted/python/rebuild.py | 264 + .../Twisted-10.0.0/twisted/python/reflect.py | 812 +++ .../Twisted-10.0.0/twisted/python/release.py | 57 + vendor/Twisted-10.0.0/twisted/python/roots.py | 248 + .../Twisted-10.0.0/twisted/python/runtime.py | 97 + .../Twisted-10.0.0/twisted/python/shortcut.py | 76 + .../Twisted-10.0.0/twisted/python/syslog.py | 107 + .../twisted/python/test/__init__.py | 3 + .../python/test/deprecatedattributes.py | 21 + .../twisted/python/test/test_components.py | 741 +++ .../twisted/python/test/test_deprecate.py | 399 ++ .../twisted/python/test/test_dist.py | 173 + .../twisted/python/test/test_fakepwd.py | 216 + .../twisted/python/test/test_hashlib.py | 90 + .../twisted/python/test/test_htmlizer.py | 41 + .../twisted/python/test/test_release.py | 2476 +++++++ .../twisted/python/test/test_runtime.py | 29 + .../twisted/python/test/test_syslog.py | 151 + .../twisted/python/test/test_util.py | 834 +++ .../twisted/python/test/test_versions.py | 323 + .../twisted/python/test/test_win32.py | 35 + .../twisted/python/test/test_zipstream.py | 455 ++ vendor/Twisted-10.0.0/twisted/python/text.py | 227 + .../twisted/python/threadable.py | 120 + .../twisted/python/threadpool.py | 308 + .../twisted/python/timeoutqueue.py | 49 + .../Twisted-10.0.0/twisted/python/urlpath.py | 122 + vendor/Twisted-10.0.0/twisted/python/usage.py | 631 ++ vendor/Twisted-10.0.0/twisted/python/util.py | 968 +++ .../Twisted-10.0.0/twisted/python/versions.py | 249 + vendor/Twisted-10.0.0/twisted/python/win32.py | 163 + .../Twisted-10.0.0/twisted/python/zippath.py | 217 + .../twisted/python/zipstream.py | 377 ++ .../Twisted-10.0.0/twisted/python/zsh/README | 8 + .../Twisted-10.0.0/twisted/python/zsh/_cftp | 48 + .../twisted/python/zsh/_ckeygen | 25 + .../Twisted-10.0.0/twisted/python/zsh/_conch | 58 + .../Twisted-10.0.0/twisted/python/zsh/_lore | 28 + .../twisted/python/zsh/_manhole | 19 + .../Twisted-10.0.0/twisted/python/zsh/_mktap | 304 + .../twisted/python/zsh/_pyhtmlizer | 8 + .../twisted/python/zsh/_tap2deb | 23 + .../twisted/python/zsh/_tap2rpm | 23 + .../twisted/python/zsh/_tapconvert | 17 + .../twisted/python/zsh/_tkconch | 38 + .../twisted/python/zsh/_tkmktap | 0 .../Twisted-10.0.0/twisted/python/zsh/_trial | 40 + .../Twisted-10.0.0/twisted/python/zsh/_twistd | 328 + .../twisted/python/zsh/_websetroot | 0 .../Twisted-10.0.0/twisted/python/zshcomp.py | 780 +++ .../Twisted-10.0.0/twisted/runner/__init__.py | 15 + .../Twisted-10.0.0/twisted/runner/_version.py | 3 + vendor/Twisted-10.0.0/twisted/runner/inetd.py | 70 + .../twisted/runner/inetdconf.py | 194 + .../Twisted-10.0.0/twisted/runner/inetdtap.py | 160 + .../Twisted-10.0.0/twisted/runner/portmap.c | 57 + .../Twisted-10.0.0/twisted/runner/procmon.py | 264 + .../twisted/runner/procutils.py | 5 + .../twisted/runner/test/__init__.py | 6 + .../twisted/runner/test/test_procmon.py | 55 + .../twisted/runner/topfiles/NEWS | 49 + .../twisted/runner/topfiles/README | 2 + .../twisted/runner/topfiles/setup.py | 35 + .../twisted/scripts/__init__.py | 12 + .../twisted/scripts/_twistd_unix.py | 317 + .../Twisted-10.0.0/twisted/scripts/_twistw.py | 50 + .../twisted/scripts/htmlizer.py | 66 + .../Twisted-10.0.0/twisted/scripts/manhole.py | 65 + .../Twisted-10.0.0/twisted/scripts/mktap.py | 182 + .../Twisted-10.0.0/twisted/scripts/tap2deb.py | 281 + .../Twisted-10.0.0/twisted/scripts/tap2rpm.py | 273 + .../twisted/scripts/tapconvert.py | 53 + .../twisted/scripts/test/__init__.py | 6 + .../twisted/scripts/test/test_mktap.py | 122 + .../Twisted-10.0.0/twisted/scripts/tkunzip.py | 286 + .../Twisted-10.0.0/twisted/scripts/trial.py | 370 ++ .../Twisted-10.0.0/twisted/scripts/twistd.py | 30 + .../Twisted-10.0.0/twisted/spread/__init__.py | 12 + .../Twisted-10.0.0/twisted/spread/banana.py | 358 ++ .../Twisted-10.0.0/twisted/spread/flavors.py | 600 ++ .../twisted/spread/interfaces.py | 28 + vendor/Twisted-10.0.0/twisted/spread/jelly.py | 1134 ++++ vendor/Twisted-10.0.0/twisted/spread/pb.py | 1380 ++++ .../Twisted-10.0.0/twisted/spread/publish.py | 142 + .../Twisted-10.0.0/twisted/spread/refpath.py | 95 + .../twisted/spread/ui/__init__.py | 12 + .../twisted/spread/ui/gtk2util.py | 215 + .../twisted/spread/ui/login2.glade | 461 ++ .../twisted/spread/ui/tktree.py | 204 + .../twisted/spread/ui/tkutil.py | 397 ++ vendor/Twisted-10.0.0/twisted/spread/util.py | 215 + vendor/Twisted-10.0.0/twisted/tap/__init__.py | 10 + vendor/Twisted-10.0.0/twisted/tap/ftp.py | 51 + vendor/Twisted-10.0.0/twisted/tap/manhole.py | 51 + .../Twisted-10.0.0/twisted/tap/portforward.py | 24 + vendor/Twisted-10.0.0/twisted/tap/socks.py | 34 + vendor/Twisted-10.0.0/twisted/tap/telnet.py | 29 + .../Twisted-10.0.0/twisted/test/__init__.py | 10 + .../twisted/test/crash_test_dummy.py | 34 + .../twisted/test/generator_failure_tests.py | 169 + vendor/Twisted-10.0.0/twisted/test/iosim.py | 270 + .../twisted/test/mock_win32process.py | 48 + .../twisted/test/myrebuilder1.py | 15 + .../twisted/test/myrebuilder2.py | 16 + .../twisted/test/plugin_basic.py | 57 + .../twisted/test/plugin_extra1.py | 23 + .../twisted/test/plugin_extra2.py | 35 + .../twisted/test/process_cmdline.py | 5 + .../twisted/test/process_echoer.py | 11 + .../twisted/test/process_fds.py | 40 + .../twisted/test/process_linger.py | 17 + .../twisted/test/process_reader.py | 12 + .../twisted/test/process_signal.py | 8 + .../twisted/test/process_stdinreader.py | 23 + .../twisted/test/process_tester.py | 37 + .../twisted/test/process_tty.py | 6 + .../twisted/test/process_twisted.py | 43 + .../twisted/test/proto_helpers.py | 299 + vendor/Twisted-10.0.0/twisted/test/raiser.c | 316 + vendor/Twisted-10.0.0/twisted/test/raiser.pyx | 21 + .../twisted/test/reflect_helper_IE.py | 4 + .../twisted/test/reflect_helper_VE.py | 4 + .../twisted/test/reflect_helper_ZDE.py | 4 + vendor/Twisted-10.0.0/twisted/test/server.pem | 36 + .../twisted/test/ssl_helpers.py | 26 + .../twisted/test/stdio_test_consumer.py | 39 + .../twisted/test/stdio_test_hostpeer.py | 32 + .../twisted/test/stdio_test_lastwrite.py | 45 + .../twisted/test/stdio_test_loseconn.py | 48 + .../twisted/test/stdio_test_producer.py | 55 + .../twisted/test/stdio_test_write.py | 32 + .../twisted/test/stdio_test_writeseq.py | 30 + .../twisted/test/test_abstract.py | 83 + .../twisted/test/test_adbapi.py | 774 +++ .../Twisted-10.0.0/twisted/test/test_amp.py | 2555 ++++++++ .../twisted/test/test_application.py | 867 +++ .../twisted/test/test_banana.py | 278 + .../twisted/test/test_compat.py | 199 + .../twisted/test/test_context.py | 15 + .../twisted/test/test_cooperator.py | 634 ++ .../Twisted-10.0.0/twisted/test/test_defer.py | 950 +++ .../twisted/test/test_defgen.py | 283 + .../Twisted-10.0.0/twisted/test/test_dict.py | 22 + .../twisted/test/test_digestauth.py | 671 ++ .../twisted/test/test_dirdbm.py | 176 + .../Twisted-10.0.0/twisted/test/test_doc.py | 92 + .../twisted/test/test_enterprise.py | 41 + .../Twisted-10.0.0/twisted/test/test_epoll.py | 159 + .../Twisted-10.0.0/twisted/test/test_error.py | 170 + .../twisted/test/test_explorer.py | 236 + .../twisted/test/test_extensions.py | 18 + .../twisted/test/test_factories.py | 162 + .../twisted/test/test_failure.py | 318 + .../Twisted-10.0.0/twisted/test/test_fdesc.py | 235 + .../twisted/test/test_finger.py | 67 + .../twisted/test/test_formmethod.py | 77 + .../Twisted-10.0.0/twisted/test/test_ftp.py | 2671 ++++++++ .../Twisted-10.0.0/twisted/test/test_hook.py | 150 + .../Twisted-10.0.0/twisted/test/test_htb.py | 96 + .../Twisted-10.0.0/twisted/test/test_ident.py | 194 + .../twisted/test/test_import.py | 78 + .../twisted/test/test_internet.py | 1396 ++++ .../twisted/test/test_iutils.py | 296 + .../Twisted-10.0.0/twisted/test/test_jelly.py | 618 ++ .../twisted/test/test_journal.py | 169 + .../twisted/test/test_lockfile.py | 445 ++ .../Twisted-10.0.0/twisted/test/test_log.py | 559 ++ .../twisted/test/test_logfile.py | 314 + .../twisted/test/test_loopback.py | 433 ++ .../twisted/test/test_manhole.py | 75 + .../twisted/test/test_memcache.py | 663 ++ .../twisted/test/test_modules.py | 391 ++ .../twisted/test/test_monkey.py | 161 + .../twisted/test/test_newcred.py | 487 ++ .../Twisted-10.0.0/twisted/test/test_nmea.py | 115 + .../Twisted-10.0.0/twisted/test/test_paths.py | 896 +++ vendor/Twisted-10.0.0/twisted/test/test_pb.py | 1775 +++++ .../twisted/test/test_pbfailure.py | 424 ++ .../Twisted-10.0.0/twisted/test/test_pcp.py | 368 ++ .../twisted/test/test_persisted.py | 314 + .../twisted/test/test_plugin.py | 694 ++ .../twisted/test/test_policies.py | 683 ++ .../twisted/test/test_postfix.py | 108 + .../twisted/test/test_process.py | 2410 +++++++ .../twisted/test/test_protocols.py | 811 +++ .../twisted/test/test_randbytes.py | 178 + .../twisted/test/test_rebuild.py | 252 + .../twisted/test/test_reflect.py | 756 +++ .../twisted/test/test_reflector.py | 401 ++ .../Twisted-10.0.0/twisted/test/test_roots.py | 63 + .../twisted/test/test_shortcut.py | 26 + .../Twisted-10.0.0/twisted/test/test_sip.py | 942 +++ .../Twisted-10.0.0/twisted/test/test_sob.py | 172 + .../Twisted-10.0.0/twisted/test/test_socks.py | 498 ++ .../Twisted-10.0.0/twisted/test/test_ssl.py | 664 ++ .../twisted/test/test_sslverify.py | 558 ++ .../twisted/test/test_stateful.py | 77 + .../Twisted-10.0.0/twisted/test/test_stdio.py | 287 + .../twisted/test/test_strcred.py | 622 ++ .../twisted/test/test_strerror.py | 145 + .../twisted/test/test_stringtransport.py | 160 + .../twisted/test/test_strports.py | 84 + .../Twisted-10.0.0/twisted/test/test_task.py | 627 ++ .../Twisted-10.0.0/twisted/test/test_tcp.py | 1908 ++++++ .../twisted/test/test_tcp_internals.py | 240 + .../Twisted-10.0.0/twisted/test/test_text.py | 156 + .../twisted/test/test_threadable.py | 103 + .../twisted/test/test_threadpool.py | 583 ++ .../twisted/test/test_threads.py | 412 ++ .../twisted/test/test_timehelpers.py | 31 + .../twisted/test/test_timeoutqueue.py | 73 + .../twisted/test/test_tpfile.py | 52 + .../twisted/test/test_twistd.py | 1378 ++++ .../Twisted-10.0.0/twisted/test/test_udp.py | 661 ++ .../Twisted-10.0.0/twisted/test/test_unix.py | 405 ++ .../Twisted-10.0.0/twisted/test/test_usage.py | 372 ++ .../twisted/test/test_zshcomp.py | 210 + .../Twisted-10.0.0/twisted/test/testutils.py | 55 + .../twisted/test/time_helpers.py | 72 + .../Twisted-10.0.0/twisted/topfiles/4335.misc | 0 .../Twisted-10.0.0/twisted/topfiles/CREDITS | 60 + .../twisted/topfiles/ChangeLog.Old | 3888 +++++++++++ vendor/Twisted-10.0.0/twisted/topfiles/NEWS | 942 +++ vendor/Twisted-10.0.0/twisted/topfiles/README | 14 + .../Twisted-10.0.0/twisted/topfiles/setup.py | 99 + .../Twisted-10.0.0/twisted/trial/__init__.py | 52 + vendor/Twisted-10.0.0/twisted/trial/itrial.py | 251 + .../Twisted-10.0.0/twisted/trial/reporter.py | 1204 ++++ vendor/Twisted-10.0.0/twisted/trial/runner.py | 905 +++ .../twisted/trial/test/__init__.py | 1 + .../twisted/trial/test/detests.py | 195 + .../twisted/trial/test/erroneous.py | 130 + .../twisted/trial/test/mockcustomsuite.py | 21 + .../twisted/trial/test/mockcustomsuite2.py | 21 + .../twisted/trial/test/mockcustomsuite3.py | 28 + .../twisted/trial/test/mockdoctest.py | 104 + .../twisted/trial/test/moduleself.py | 7 + .../twisted/trial/test/moduletest.py | 11 + .../twisted/trial/test/notpython | 2 + .../twisted/trial/test/novars.py | 6 + .../twisted/trial/test/packages.py | 134 + .../twisted/trial/test/sample.py | 40 + .../twisted/trial/test/scripttest.py | 14 + .../twisted/trial/test/suppression.py | 57 + .../twisted/trial/test/test_assertions.py | 742 +++ .../twisted/trial/test/test_deferred.py | 220 + .../twisted/trial/test/test_doctest.py | 81 + .../twisted/trial/test/test_keyboard.py | 113 + .../twisted/trial/test/test_loader.py | 541 ++ .../twisted/trial/test/test_log.py | 197 + .../twisted/trial/test/test_output.py | 162 + .../twisted/trial/test/test_plugins.py | 46 + .../twisted/trial/test/test_pyunitcompat.py | 222 + .../twisted/trial/test/test_reporter.py | 1561 +++++ .../twisted/trial/test/test_runner.py | 914 +++ .../twisted/trial/test/test_script.py | 390 ++ .../twisted/trial/test/test_test_visitor.py | 82 + .../twisted/trial/test/test_testcase.py | 51 + .../twisted/trial/test/test_tests.py | 1056 +++ .../twisted/trial/test/test_util.py | 533 ++ .../twisted/trial/test/test_warning.py | 436 ++ .../twisted/trial/test/weird.py | 20 + .../Twisted-10.0.0/twisted/trial/unittest.py | 1597 +++++ vendor/Twisted-10.0.0/twisted/trial/util.py | 378 ++ vendor/Twisted-10.0.0/twisted/web/__init__.py | 13 + .../twisted/web/_auth/__init__.py | 7 + .../Twisted-10.0.0/twisted/web/_auth/basic.py | 59 + .../twisted/web/_auth/digest.py | 54 + .../twisted/web/_auth/wrapper.py | 222 + .../Twisted-10.0.0/twisted/web/_newclient.py | 1413 ++++ vendor/Twisted-10.0.0/twisted/web/_version.py | 3 + vendor/Twisted-10.0.0/twisted/web/client.py | 644 ++ vendor/Twisted-10.0.0/twisted/web/demo.py | 29 + vendor/Twisted-10.0.0/twisted/web/distrib.py | 374 ++ .../Twisted-10.0.0/twisted/web/domhelpers.py | 268 + vendor/Twisted-10.0.0/twisted/web/error.py | 230 + vendor/Twisted-10.0.0/twisted/web/google.py | 75 + vendor/Twisted-10.0.0/twisted/web/guard.py | 17 + vendor/Twisted-10.0.0/twisted/web/html.py | 49 + vendor/Twisted-10.0.0/twisted/web/http.py | 1797 ++++++ .../twisted/web/http_headers.py | 260 + vendor/Twisted-10.0.0/twisted/web/iweb.py | 421 ++ vendor/Twisted-10.0.0/twisted/web/microdom.py | 1028 +++ vendor/Twisted-10.0.0/twisted/web/proxy.py | 302 + vendor/Twisted-10.0.0/twisted/web/resource.py | 300 + vendor/Twisted-10.0.0/twisted/web/rewrite.py | 52 + vendor/Twisted-10.0.0/twisted/web/script.py | 169 + vendor/Twisted-10.0.0/twisted/web/server.py | 527 ++ vendor/Twisted-10.0.0/twisted/web/soap.py | 154 + vendor/Twisted-10.0.0/twisted/web/static.py | 1104 ++++ vendor/Twisted-10.0.0/twisted/web/sux.py | 657 ++ vendor/Twisted-10.0.0/twisted/web/tap.py | 234 + .../twisted/web/test/__init__.py | 7 + .../Twisted-10.0.0/twisted/web/test/_util.py | 24 + .../twisted/web/test/test_cgi.py | 190 + .../twisted/web/test/test_distrib.py | 361 ++ .../twisted/web/test/test_domhelpers.py | 306 + .../twisted/web/test/test_error.py | 151 + .../twisted/web/test/test_http.py | 1531 +++++ .../twisted/web/test/test_http_headers.py | 585 ++ .../twisted/web/test/test_httpauth.py | 586 ++ .../twisted/web/test/test_newclient.py | 2082 ++++++ .../twisted/web/test/test_proxy.py | 541 ++ .../twisted/web/test/test_resource.py | 144 + .../twisted/web/test/test_script.py | 70 + .../twisted/web/test/test_soap.py | 114 + .../twisted/web/test/test_static.py | 1507 +++++ .../twisted/web/test/test_tap.py | 251 + .../twisted/web/test/test_vhost.py | 105 + .../twisted/web/test/test_web.py | 863 +++ .../twisted/web/test/test_webclient.py | 1060 +++ .../twisted/web/test/test_wsgi.py | 1572 +++++ .../twisted/web/test/test_xml.py | 1105 ++++ .../twisted/web/test/test_xmlrpc.py | 510 ++ .../Twisted-10.0.0/twisted/web/topfiles/NEWS | 309 + .../twisted/web/topfiles/README | 1 + .../twisted/web/topfiles/setup.py | 30 + vendor/Twisted-10.0.0/twisted/web/trp.py | 23 + vendor/Twisted-10.0.0/twisted/web/twcgi.py | 253 + vendor/Twisted-10.0.0/twisted/web/util.py | 380 ++ vendor/Twisted-10.0.0/twisted/web/vhost.py | 135 + vendor/Twisted-10.0.0/twisted/web/wsgi.py | 401 ++ vendor/Twisted-10.0.0/twisted/web/xmlrpc.py | 427 ++ .../Twisted-10.0.0/twisted/words/__init__.py | 10 + .../Twisted-10.0.0/twisted/words/_version.py | 3 + vendor/Twisted-10.0.0/twisted/words/ewords.py | 34 + .../twisted/words/im/__init__.py | 8 + .../twisted/words/im/baseaccount.py | 62 + .../twisted/words/im/basechat.py | 316 + .../twisted/words/im/basesupport.py | 270 + .../twisted/words/im/instancemessenger.glade | 3165 +++++++++ .../twisted/words/im/interfaces.py | 364 ++ .../twisted/words/im/ircsupport.py | 261 + .../Twisted-10.0.0/twisted/words/im/locals.py | 26 + .../twisted/words/im/pbsupport.py | 260 + .../twisted/words/im/proxyui.py | 24 + vendor/Twisted-10.0.0/twisted/words/im/tap.py | 15 + .../twisted/words/im/tocsupport.py | 220 + vendor/Twisted-10.0.0/twisted/words/iwords.py | 266 + .../twisted/words/protocols/__init__.py | 1 + .../twisted/words/protocols/irc.py | 3166 +++++++++ .../words/protocols/jabber/__init__.py | 8 + .../twisted/words/protocols/jabber/client.py | 369 ++ .../words/protocols/jabber/component.py | 474 ++ .../twisted/words/protocols/jabber/error.py | 336 + .../twisted/words/protocols/jabber/ijabber.py | 199 + .../twisted/words/protocols/jabber/jid.py | 249 + .../words/protocols/jabber/jstrports.py | 31 + .../twisted/words/protocols/jabber/sasl.py | 243 + .../words/protocols/jabber/sasl_mechanisms.py | 240 + .../words/protocols/jabber/xmlstream.py | 1136 ++++ .../words/protocols/jabber/xmpp_stringprep.py | 248 + .../twisted/words/protocols/msn.py | 2449 +++++++ .../twisted/words/protocols/oscar.py | 1235 ++++ .../twisted/words/protocols/toc.py | 1622 +++++ .../Twisted-10.0.0/twisted/words/service.py | 1223 ++++ vendor/Twisted-10.0.0/twisted/words/tap.py | 72 + .../twisted/words/test/__init__.py | 1 + .../twisted/words/test/test_basesupport.py | 97 + .../twisted/words/test/test_domish.py | 421 ++ .../twisted/words/test/test_irc.py | 1566 +++++ .../twisted/words/test/test_irc_service.py | 110 + .../twisted/words/test/test_jabberclient.py | 414 ++ .../words/test/test_jabbercomponent.py | 422 ++ .../twisted/words/test/test_jabbererror.py | 308 + .../twisted/words/test/test_jabberjid.py | 225 + .../twisted/words/test/test_jabbersasl.py | 272 + .../words/test/test_jabbersaslmechanisms.py | 90 + .../words/test/test_jabberxmlstream.py | 1287 ++++ .../words/test/test_jabberxmppstringprep.py | 84 + .../twisted/words/test/test_msn.py | 503 ++ .../twisted/words/test/test_oscar.py | 24 + .../twisted/words/test/test_service.py | 992 +++ .../twisted/words/test/test_tap.py | 78 + .../twisted/words/test/test_toc.py | 340 + .../twisted/words/test/test_xishutil.py | 345 + .../twisted/words/test/test_xmlstream.py | 201 + .../twisted/words/test/test_xmpproutertap.py | 86 + .../twisted/words/test/test_xpath.py | 260 + vendor/Twisted-10.0.0/twisted/words/toctap.py | 20 + .../twisted/words/topfiles/NEWS | 230 + .../twisted/words/topfiles/README | 4 + .../twisted/words/topfiles/setup.py | 53 + .../twisted/words/xish/__init__.py | 10 + .../twisted/words/xish/domish.py | 848 +++ .../twisted/words/xish/utility.py | 372 ++ .../twisted/words/xish/xmlstream.py | 261 + .../twisted/words/xish/xpath.py | 333 + .../twisted/words/xish/xpathparser.g | 375 ++ .../twisted/words/xish/xpathparser.py | 508 ++ .../twisted/words/xmpproutertap.py | 30 + vendor/amqplib/__init__.py | 1 + vendor/amqplib/client_0_8/__init__.py | 35 + vendor/amqplib/client_0_8/abstract_channel.py | 114 + vendor/amqplib/client_0_8/basic_message.py | 137 + vendor/amqplib/client_0_8/channel.py | 2602 ++++++++ vendor/amqplib/client_0_8/connection.py | 826 +++ vendor/amqplib/client_0_8/exceptions.py | 105 + vendor/amqplib/client_0_8/method_framing.py | 244 + vendor/amqplib/client_0_8/serialization.py | 530 ++ vendor/amqplib/client_0_8/transport.py | 220 + vendor/anyjson/__init__.py | 124 + vendor/boto/README | 53 + vendor/boto/bin/bundle_image | 27 + vendor/boto/bin/cfadmin | 70 + vendor/boto/bin/elbadmin | 179 + vendor/boto/bin/fetch_file | 37 + vendor/boto/bin/kill_instance | 12 + vendor/boto/bin/launch_instance | 138 + vendor/boto/bin/list_instances | 10 + vendor/boto/bin/pyami_sendmail | 47 + vendor/boto/bin/s3put | 196 + vendor/boto/bin/sdbadmin | 168 + vendor/boto/bin/taskadmin | 116 + vendor/boto/boto/__init__.py | 292 + vendor/boto/boto/cloudfront/__init__.py | 223 + vendor/boto/boto/cloudfront/distribution.py | 470 ++ vendor/boto/boto/cloudfront/exception.py | 26 + vendor/boto/boto/cloudfront/identity.py | 122 + vendor/boto/boto/cloudfront/logging.py | 38 + vendor/boto/boto/cloudfront/object.py | 48 + vendor/boto/boto/cloudfront/signers.py | 60 + vendor/boto/boto/connection.py | 644 ++ vendor/boto/boto/contrib/__init__.py | 22 + vendor/boto/boto/contrib/m2helpers.py | 52 + vendor/boto/boto/contrib/ymlmessage.py | 52 + vendor/boto/boto/ec2/__init__.py | 52 + vendor/boto/boto/ec2/address.py | 58 + vendor/boto/boto/ec2/autoscale/__init__.py | 203 + vendor/boto/boto/ec2/autoscale/activity.py | 55 + vendor/boto/boto/ec2/autoscale/group.py | 189 + vendor/boto/boto/ec2/autoscale/instance.py | 53 + .../boto/boto/ec2/autoscale/launchconfig.py | 98 + vendor/boto/boto/ec2/autoscale/request.py | 38 + vendor/boto/boto/ec2/autoscale/trigger.py | 137 + vendor/boto/boto/ec2/blockdevicemapping.py | 98 + vendor/boto/boto/ec2/bundleinstance.py | 78 + vendor/boto/boto/ec2/buyreservation.py | 81 + vendor/boto/boto/ec2/cloudwatch/__init__.py | 213 + vendor/boto/boto/ec2/cloudwatch/datapoint.py | 37 + vendor/boto/boto/ec2/cloudwatch/metric.py | 71 + vendor/boto/boto/ec2/connection.py | 1605 +++++ vendor/boto/boto/ec2/ec2object.py | 41 + vendor/boto/boto/ec2/elb/__init__.py | 238 + vendor/boto/boto/ec2/elb/healthcheck.py | 68 + vendor/boto/boto/ec2/elb/instancestate.py | 54 + vendor/boto/boto/ec2/elb/listelement.py | 31 + vendor/boto/boto/ec2/elb/listener.py | 64 + vendor/boto/boto/ec2/elb/loadbalancer.py | 142 + vendor/boto/boto/ec2/image.py | 250 + vendor/boto/boto/ec2/instance.py | 294 + vendor/boto/boto/ec2/instanceinfo.py | 47 + vendor/boto/boto/ec2/keypair.py | 111 + vendor/boto/boto/ec2/launchspecification.py | 96 + vendor/boto/boto/ec2/regioninfo.py | 60 + vendor/boto/boto/ec2/reservedinstance.py | 97 + vendor/boto/boto/ec2/securitygroup.py | 282 + vendor/boto/boto/ec2/snapshot.py | 127 + .../boto/boto/ec2/spotdatafeedsubscription.py | 63 + vendor/boto/boto/ec2/spotinstancerequest.py | 109 + vendor/boto/boto/ec2/spotpricehistory.py | 52 + vendor/boto/boto/ec2/volume.py | 208 + vendor/boto/boto/ec2/zone.py | 47 + vendor/boto/boto/emr/__init__.py | 29 + vendor/boto/boto/emr/connection.py | 236 + vendor/boto/boto/emr/emrobject.py | 34 + vendor/boto/boto/emr/jobflow.py | 89 + vendor/boto/boto/emr/step.py | 168 + vendor/boto/boto/exception.py | 293 + vendor/boto/boto/fps/__init__.py | 23 + vendor/boto/boto/fps/connection.py | 172 + vendor/boto/boto/handler.py | 46 + vendor/boto/boto/manage/__init__.py | 23 + vendor/boto/boto/manage/cmdshell.py | 169 + vendor/boto/boto/manage/propget.py | 64 + vendor/boto/boto/manage/server.py | 548 ++ vendor/boto/boto/manage/task.py | 175 + vendor/boto/boto/manage/test_manage.py | 34 + vendor/boto/boto/manage/volume.py | 420 ++ vendor/boto/boto/mapreduce/__init__.py | 23 + vendor/boto/boto/mapreduce/lqs.py | 152 + vendor/boto/boto/mapreduce/partitiondb.py | 175 + vendor/boto/boto/mapreduce/pdb_delete | 135 + vendor/boto/boto/mapreduce/pdb_describe | 124 + vendor/boto/boto/mapreduce/pdb_revert | 135 + vendor/boto/boto/mapreduce/pdb_upload | 172 + vendor/boto/boto/mapreduce/queuetools.py | 66 + vendor/boto/boto/mashups/__init__.py | 23 + vendor/boto/boto/mashups/interactive.py | 97 + vendor/boto/boto/mashups/iobject.py | 115 + vendor/boto/boto/mashups/order.py | 211 + vendor/boto/boto/mashups/server.py | 395 ++ vendor/boto/boto/mturk/__init__.py | 23 + vendor/boto/boto/mturk/connection.py | 515 ++ vendor/boto/boto/mturk/notification.py | 95 + vendor/boto/boto/mturk/price.py | 48 + vendor/boto/boto/mturk/qualification.py | 118 + vendor/boto/boto/mturk/question.py | 336 + vendor/boto/boto/mturk/test/all_tests.py | 8 + vendor/boto/boto/mturk/test/cleanup_tests.py | 67 + .../create_free_text_question_regex.doctest | 92 + .../boto/boto/mturk/test/create_hit.doctest | 86 + .../boto/mturk/test/create_hit_binary.doctest | 87 + .../boto/mturk/test/create_hit_external.py | 14 + .../test/create_hit_from_hit_type.doctest | 97 + .../test/create_hit_with_qualifications.py | 18 + .../boto/mturk/test/reviewable_hits.doctest | 71 + .../boto/boto/mturk/test/search_hits.doctest | 16 + vendor/boto/boto/pyami/__init__.py | 22 + vendor/boto/boto/pyami/bootstrap.py | 121 + vendor/boto/boto/pyami/config.py | 203 + vendor/boto/boto/pyami/copybot.cfg | 60 + vendor/boto/boto/pyami/copybot.py | 97 + vendor/boto/boto/pyami/helloworld.py | 28 + vendor/boto/boto/pyami/installers/__init__.py | 64 + .../boto/pyami/installers/ubuntu/__init__.py | 22 + .../boto/pyami/installers/ubuntu/apache.py | 43 + .../boto/boto/pyami/installers/ubuntu/ebs.py | 206 + .../boto/pyami/installers/ubuntu/installer.py | 96 + .../boto/pyami/installers/ubuntu/mysql.py | 109 + .../boto/boto/pyami/installers/ubuntu/trac.py | 139 + vendor/boto/boto/pyami/launch_ami.py | 178 + vendor/boto/boto/pyami/scriptbase.py | 44 + vendor/boto/boto/pyami/startup.py | 59 + vendor/boto/boto/rds/__init__.py | 810 +++ vendor/boto/boto/rds/dbinstance.py | 136 + vendor/boto/boto/rds/dbsecuritygroup.py | 159 + vendor/boto/boto/rds/dbsnapshot.py | 74 + vendor/boto/boto/rds/event.py | 49 + vendor/boto/boto/rds/parametergroup.py | 201 + vendor/boto/boto/resultset.py | 136 + vendor/boto/boto/s3/__init__.py | 31 + vendor/boto/boto/s3/acl.py | 162 + vendor/boto/boto/s3/bucket.py | 721 +++ vendor/boto/boto/s3/bucketlistresultset.py | 99 + vendor/boto/boto/s3/connection.py | 350 + vendor/boto/boto/s3/deletemarker.py | 56 + vendor/boto/boto/s3/key.py | 804 +++ vendor/boto/boto/s3/prefix.py | 35 + vendor/boto/boto/s3/user.py | 49 + vendor/boto/boto/sdb/__init__.py | 41 + vendor/boto/boto/sdb/connection.py | 441 ++ vendor/boto/boto/sdb/db/__init__.py | 21 + vendor/boto/boto/sdb/db/blob.py | 64 + vendor/boto/boto/sdb/db/key.py | 59 + vendor/boto/boto/sdb/db/manager/__init__.py | 88 + vendor/boto/boto/sdb/db/manager/pgmanager.py | 389 ++ vendor/boto/boto/sdb/db/manager/sdbmanager.py | 599 ++ vendor/boto/boto/sdb/db/manager/xmlmanager.py | 517 ++ vendor/boto/boto/sdb/db/model.py | 234 + vendor/boto/boto/sdb/db/property.py | 556 ++ vendor/boto/boto/sdb/db/query.py | 79 + vendor/boto/boto/sdb/db/sequence.py | 224 + vendor/boto/boto/sdb/db/test_db.py | 225 + vendor/boto/boto/sdb/domain.py | 337 + vendor/boto/boto/sdb/item.py | 105 + vendor/boto/boto/sdb/persist/__init__.py | 83 + vendor/boto/boto/sdb/persist/checker.py | 302 + vendor/boto/boto/sdb/persist/object.py | 207 + vendor/boto/boto/sdb/persist/property.py | 371 ++ vendor/boto/boto/sdb/persist/test_persist.py | 141 + vendor/boto/boto/sdb/queryresultset.py | 92 + vendor/boto/boto/sdb/regioninfo.py | 40 + vendor/boto/boto/services/__init__.py | 23 + vendor/boto/boto/services/bs.py | 179 + vendor/boto/boto/services/message.py | 58 + vendor/boto/boto/services/result.py | 137 + vendor/boto/boto/services/service.py | 161 + vendor/boto/boto/services/servicedef.py | 91 + vendor/boto/boto/services/sonofmmm.cfg | 43 + vendor/boto/boto/services/sonofmmm.py | 81 + vendor/boto/boto/services/submit.py | 88 + vendor/boto/boto/sns/__init__.py | 353 + vendor/boto/boto/sqs/__init__.py | 42 + vendor/boto/boto/sqs/attributes.py | 46 + vendor/boto/boto/sqs/connection.py | 286 + vendor/boto/boto/sqs/jsonmessage.py | 42 + vendor/boto/boto/sqs/message.py | 251 + vendor/boto/boto/sqs/queue.py | 414 ++ vendor/boto/boto/sqs/regioninfo.py | 40 + vendor/boto/boto/tests/__init__.py | 23 + vendor/boto/boto/tests/devpay_s3.py | 177 + vendor/boto/boto/tests/test.py | 85 + vendor/boto/boto/tests/test_ec2connection.py | 154 + vendor/boto/boto/tests/test_s3connection.py | 175 + vendor/boto/boto/tests/test_s3versioning.py | 143 + vendor/boto/boto/tests/test_sdbconnection.py | 104 + vendor/boto/boto/tests/test_sqsconnection.py | 142 + vendor/boto/boto/utils.py | 561 ++ vendor/boto/boto/vpc/__init__.py | 473 ++ vendor/boto/boto/vpc/customergateway.py | 54 + vendor/boto/boto/vpc/dhcpoptions.py | 69 + vendor/boto/boto/vpc/subnet.py | 54 + vendor/boto/boto/vpc/vpc.py | 54 + vendor/boto/boto/vpc/vpnconnection.py | 60 + vendor/boto/boto/vpc/vpngateway.py | 80 + vendor/boto/cq.py | 82 + vendor/boto/docs/Makefile | 89 + vendor/boto/docs/make.bat | 113 + .../boto/docs/source/_templates/layout.html | 3 + vendor/boto/docs/source/autoscale_tut.rst | 140 + .../docs/source/boto_theme/static/boto.css_t | 239 + .../source/boto_theme/static/pygments.css | 61 + vendor/boto/docs/source/boto_theme/theme.conf | 3 + vendor/boto/docs/source/conf.py | 30 + vendor/boto/docs/source/documentation.rst | 59 + vendor/boto/docs/source/ec2_tut.rst | 420 ++ vendor/boto/docs/source/elb_tut.rst | 202 + vendor/boto/docs/source/index.rst | 52 + vendor/boto/docs/source/ref/boto.rst | 47 + vendor/boto/docs/source/ref/cloudfront.rst | 108 + vendor/boto/docs/source/ref/contrib.rst | 32 + vendor/boto/docs/source/ref/ec2.rst | 223 + vendor/boto/docs/source/ref/fps.rst | 19 + vendor/boto/docs/source/ref/index.rst | 25 + vendor/boto/docs/source/ref/manage.rst | 47 + vendor/boto/docs/source/ref/mapreduce.rst | 38 + vendor/boto/docs/source/ref/mashups.rst | 40 + vendor/boto/docs/source/ref/mturk.rst | 47 + vendor/boto/docs/source/ref/pyami.rst | 103 + vendor/boto/docs/source/ref/rds.rst | 47 + vendor/boto/docs/source/ref/s3.rst | 54 + vendor/boto/docs/source/ref/sdb.rst | 144 + vendor/boto/docs/source/ref/services.rst | 61 + vendor/boto/docs/source/ref/sqs.rst | 54 + vendor/boto/docs/source/ref/vpc.rst | 54 + vendor/boto/docs/source/s3_tut.rst | 213 + vendor/boto/docs/source/sqs_tut.rst | 230 + vendor/boto/docs/source/vpc_tut.rst | 88 + vendor/boto/pylintrc | 305 + vendor/boto/setup.py | 56 + vendor/carrot/__init__.py | 7 + vendor/carrot/backends/__init__.py | 54 + vendor/carrot/backends/base.py | 185 + vendor/carrot/backends/pikachu.py | 209 + vendor/carrot/backends/pyamqplib.py | 328 + vendor/carrot/backends/pystomp.py | 192 + vendor/carrot/backends/queue.py | 76 + vendor/carrot/connection.py | 229 + vendor/carrot/messaging.py | 981 +++ vendor/carrot/serialization.py | 253 + vendor/carrot/utils.py | 56 + vendor/lockfile/2.4.diff | 99 + vendor/lockfile/ACKS | 6 + vendor/lockfile/LICENSE | 21 + vendor/lockfile/MANIFEST | 19 + vendor/lockfile/PKG-INFO | 47 + vendor/lockfile/README | 23 + vendor/lockfile/RELEASE-NOTES | 42 + vendor/lockfile/doc/Makefile | 73 + vendor/lockfile/doc/conf.py | 179 + vendor/lockfile/doc/glossary.rst | 15 + vendor/lockfile/doc/index.rst | 22 + vendor/lockfile/doc/lockfile.rst | 257 + vendor/lockfile/lockfile/__init__.py | 286 + vendor/lockfile/lockfile/linklockfile.py | 71 + vendor/lockfile/lockfile/mkdirlockfile.py | 79 + vendor/lockfile/lockfile/pidlockfile.py | 181 + vendor/lockfile/lockfile/sqlitelockfile.py | 142 + vendor/lockfile/setup.py | 32 + vendor/lockfile/test/compliancetest.py | 228 + vendor/lockfile/test/test_lockfile.py | 30 + vendor/pymox/COPYING | 202 + vendor/pymox/MANIFEST.in | 5 + vendor/pymox/README | 56 + vendor/pymox/mox.py | 1729 +++++ vendor/pymox/mox_test.py | 1853 ++++++ vendor/pymox/mox_test_helper.py | 95 + vendor/pymox/setup.py | 14 + vendor/pymox/stubout.py | 142 + vendor/pymox/stubout_test.py | 47 + vendor/pymox/stubout_testee.py | 2 + vendor/python-daemon/ChangeLog | 187 + vendor/python-daemon/LICENSE.GPL-2 | 339 + vendor/python-daemon/LICENSE.PSF-2 | 48 + vendor/python-daemon/MANIFEST.in | 4 + vendor/python-daemon/PKG-INFO | 37 + vendor/python-daemon/README.nova | 4 + vendor/python-daemon/daemon/__init__.py | 47 + vendor/python-daemon/daemon/daemon.py | 776 +++ vendor/python-daemon/daemon/pidlockfile.py | 195 + vendor/python-daemon/daemon/runner.py | 229 + .../python-daemon/daemon/version/__init__.py | 36 + .../daemon/version/version_info.py | 23 + .../python_daemon.egg-info/PKG-INFO | 37 + .../python_daemon.egg-info/SOURCES.txt | 22 + .../dependency_links.txt | 1 + .../python_daemon.egg-info/not-zip-safe | 1 + .../python_daemon.egg-info/requires.txt | 2 + .../python_daemon.egg-info/top_level.txt | 1 + vendor/python-daemon/setup.cfg | 5 + vendor/python-daemon/setup.py | 64 + vendor/python-gflags/AUTHORS | 2 + vendor/python-gflags/COPYING | 28 + vendor/python-gflags/ChangeLog | 5 + vendor/python-gflags/README | 23 + vendor/python-gflags/debian/README | 7 + vendor/python-gflags/debian/changelog | 11 + vendor/python-gflags/debian/compat | 1 + vendor/python-gflags/debian/control | 26 + vendor/python-gflags/debian/copyright | 41 + vendor/python-gflags/debian/docs | 2 + vendor/python-gflags/debian/rules | 62 + vendor/python-gflags/gflags.py | 2340 +++++++ vendor/python-gflags/gflags2man.py | 536 ++ vendor/python-gflags/gflags_helpxml_test.py | 563 ++ vendor/python-gflags/gflags_unittest.py | 1679 +++++ vendor/python-gflags/setup.py | 44 + vendor/python-gflags/test_module_bar.py | 135 + vendor/python-gflags/test_module_foo.py | 120 + vendor/redis-py/.gitignore | 5 + vendor/redis-py/CHANGES | 58 + vendor/redis-py/INSTALL | 6 + vendor/redis-py/LICENSE | 22 + vendor/redis-py/MANIFEST.in | 4 + vendor/redis-py/README.md | 33 + vendor/redis-py/redis/__init__.py | 10 + vendor/redis-py/redis/client.py | 1259 ++++ vendor/redis-py/redis/exceptions.py | 20 + vendor/redis-py/setup.py | 42 + vendor/redis-py/tests/__init__.py | 11 + vendor/redis-py/tests/connection_pool.py | 53 + vendor/redis-py/tests/pipeline.py | 61 + vendor/redis-py/tests/server_commands.py | 1092 ++++ vendor/tornado/MANIFEST.in | 2 + vendor/tornado/README | 27 + vendor/tornado/demos/appengine/README | 48 + vendor/tornado/demos/appengine/app.yaml | 11 + vendor/tornado/demos/appengine/blog.py | 169 + vendor/tornado/demos/appengine/markdown.py | 1877 ++++++ .../tornado/demos/appengine/static/blog.css | 153 + .../demos/appengine/templates/archive.html | 31 + .../demos/appengine/templates/base.html | 29 + .../demos/appengine/templates/compose.html | 42 + .../demos/appengine/templates/entry.html | 5 + .../demos/appengine/templates/feed.xml | 26 + .../demos/appengine/templates/home.html | 8 + .../appengine/templates/modules/entry.html | 8 + vendor/tornado/demos/auth/authdemo.py | 79 + vendor/tornado/demos/blog/README | 57 + vendor/tornado/demos/blog/blog.py | 195 + vendor/tornado/demos/blog/markdown.py | 1877 ++++++ vendor/tornado/demos/blog/schema.sql | 44 + vendor/tornado/demos/blog/static/blog.css | 153 + .../tornado/demos/blog/templates/archive.html | 31 + vendor/tornado/demos/blog/templates/base.html | 27 + .../tornado/demos/blog/templates/compose.html | 42 + .../tornado/demos/blog/templates/entry.html | 5 + vendor/tornado/demos/blog/templates/feed.xml | 26 + vendor/tornado/demos/blog/templates/home.html | 8 + .../demos/blog/templates/modules/entry.html | 8 + vendor/tornado/demos/chat/chatdemo.py | 156 + vendor/tornado/demos/chat/static/chat.css | 56 + vendor/tornado/demos/chat/static/chat.js | 135 + .../tornado/demos/chat/templates/index.html | 37 + .../tornado/demos/chat/templates/message.html | 1 + vendor/tornado/demos/facebook/README | 8 + vendor/tornado/demos/facebook/facebook.py | 127 + .../demos/facebook/static/facebook.css | 97 + .../tornado/demos/facebook/static/facebook.js | 0 .../facebook/templates/modules/post.html | 29 + .../demos/facebook/templates/stream.html | 22 + vendor/tornado/demos/facebook/uimodules.py | 22 + vendor/tornado/demos/helloworld/helloworld.py | 43 + vendor/tornado/setup.py | 44 + vendor/tornado/tornado/__init__.py | 17 + vendor/tornado/tornado/auth.py | 883 +++ vendor/tornado/tornado/autoreload.py | 95 + vendor/tornado/tornado/database.py | 180 + vendor/tornado/tornado/epoll.c | 112 + vendor/tornado/tornado/escape.py | 112 + vendor/tornado/tornado/httpclient.py | 465 ++ vendor/tornado/tornado/httpserver.py | 450 ++ vendor/tornado/tornado/ioloop.py | 483 ++ vendor/tornado/tornado/iostream.py | 229 + vendor/tornado/tornado/locale.py | 457 ++ vendor/tornado/tornado/options.py | 386 ++ vendor/tornado/tornado/s3server.py | 255 + vendor/tornado/tornado/template.py | 576 ++ vendor/tornado/tornado/test/README | 4 + vendor/tornado/tornado/test/test_ioloop.py | 38 + vendor/tornado/tornado/web.py | 1445 +++++ vendor/tornado/tornado/websocket.py | 138 + vendor/tornado/tornado/win32_support.py | 123 + vendor/tornado/tornado/wsgi.py | 311 + vendor/tornado/website/app.yaml | 15 + vendor/tornado/website/index.yaml | 0 vendor/tornado/website/markdown/__init__.py | 603 ++ .../tornado/website/markdown/blockparser.py | 95 + .../website/markdown/blockprocessors.py | 460 ++ .../tornado/website/markdown/commandline.py | 96 + .../tornado/website/markdown/etree_loader.py | 33 + .../website/markdown/extensions/__init__.py | 0 .../website/markdown/extensions/toc.py | 140 + vendor/tornado/website/markdown/html4.py | 274 + .../website/markdown/inlinepatterns.py | 371 ++ vendor/tornado/website/markdown/odict.py | 162 + .../website/markdown/postprocessors.py | 77 + .../tornado/website/markdown/preprocessors.py | 214 + .../website/markdown/treeprocessors.py | 329 + vendor/tornado/website/static/base.css | 120 + vendor/tornado/website/static/facebook.png | Bin 0 -> 7457 bytes vendor/tornado/website/static/friendfeed.png | Bin 0 -> 7906 bytes vendor/tornado/website/static/robots.txt | 2 + .../tornado/website/static/tornado-0.1.tar.gz | Bin 0 -> 106878 bytes .../tornado/website/static/tornado-0.2.tar.gz | Bin 0 -> 200680 bytes vendor/tornado/website/static/tornado.png | Bin 0 -> 7101 bytes vendor/tornado/website/static/twitter.png | Bin 0 -> 7197 bytes vendor/tornado/website/templates/base.html | 27 + .../website/templates/documentation.html | 9 + .../website/templates/documentation.txt | 866 +++ vendor/tornado/website/templates/index.html | 51 + vendor/tornado/website/website.py | 63 + 1877 files changed, 430845 insertions(+) create mode 100644 CA/.gitignore create mode 100644 CA/INTER/.gitignore create mode 100755 CA/geninter.sh create mode 100755 CA/genrootca.sh create mode 100644 CA/newcerts/.placeholder create mode 100644 CA/openssl.cnf.tmpl create mode 100644 CA/private/.placeholder create mode 100644 CA/reqs/.gitignore create mode 100644 HACKING create mode 100644 LICENSE create mode 100755 bin/nova-api create mode 100755 bin/nova-compute create mode 100755 bin/nova-manage create mode 100755 bin/nova-objectstore create mode 100755 bin/nova-volume create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/nova-api.init create mode 100644 debian/nova-api.install create mode 100644 debian/nova-common.install create mode 100644 debian/nova-compute.init create mode 100644 debian/nova-compute.install create mode 100644 debian/nova-objectstore.init create mode 100644 debian/nova-objectstore.install create mode 100644 debian/nova-volume.init create mode 100644 debian/nova-volume.install create mode 100644 debian/pycompat create mode 100644 debian/pyversions create mode 100755 debian/rules create mode 100644 docs/.gitignore create mode 100644 docs/Makefile create mode 100644 docs/_build/.gitignore create mode 100644 docs/_static/.gitignore create mode 100644 docs/_templates/.gitignore create mode 100644 docs/architecture.rst create mode 100644 docs/auth.rst create mode 100644 docs/binaries.rst create mode 100644 docs/compute.rst create mode 100644 docs/conf.py create mode 100644 docs/endpoint.rst create mode 100644 docs/fakes.rst create mode 100644 docs/getting.started.rst create mode 100644 docs/index.rst create mode 100644 docs/modules.rst create mode 100644 docs/network.rst create mode 100644 docs/nova.rst create mode 100644 docs/objectstore.rst create mode 100644 docs/packages.rst create mode 100644 docs/storage.rst create mode 100644 docs/volume.rst create mode 100644 nova/__init__.py create mode 100644 nova/adminclient.py create mode 100644 nova/auth/__init__.py create mode 100644 nova/auth/access.py create mode 100644 nova/auth/fakeldap.py create mode 100644 nova/auth/novarc.template create mode 100644 nova/auth/rbac.ldif create mode 100644 nova/auth/signer.py create mode 100755 nova/auth/slap.sh create mode 100755 nova/auth/users.py create mode 100644 nova/compute/__init__.py create mode 100644 nova/compute/disk.py create mode 100644 nova/compute/exception.py create mode 100644 nova/compute/fakevirtinstance.xml create mode 100644 nova/compute/libvirt.xml.template create mode 100644 nova/compute/linux_net.py create mode 100644 nova/compute/model.py create mode 100644 nova/compute/network.py create mode 100644 nova/compute/node.py create mode 100644 nova/crypto.py create mode 100644 nova/datastore.py create mode 100644 nova/endpoint/__init__.py create mode 100644 nova/endpoint/admin.py create mode 100755 nova/endpoint/api.py create mode 100644 nova/endpoint/cloud.py create mode 100644 nova/endpoint/images.py create mode 100644 nova/exception.py create mode 100644 nova/fakerabbit.py create mode 100644 nova/fakevirt.py create mode 100644 nova/flags.py create mode 100644 nova/objectstore/__init__.py create mode 100644 nova/objectstore/bucket.py create mode 100644 nova/objectstore/handler.py create mode 100644 nova/objectstore/image.py create mode 100644 nova/objectstore/stored.py create mode 100644 nova/process.py create mode 100644 nova/rpc.py create mode 100644 nova/server.py create mode 100644 nova/test.py create mode 100644 nova/tests/CA/cacert.pem create mode 100644 nova/tests/CA/private/cakey.pem create mode 100644 nova/tests/__init__.py create mode 100644 nova/tests/access_unittest.py create mode 100644 nova/tests/api_integration.py create mode 100644 nova/tests/api_unittest.py create mode 100644 nova/tests/bundle/1mb.manifest.xml create mode 100644 nova/tests/bundle/1mb.part.0 create mode 100644 nova/tests/bundle/1mb.part.1 create mode 100644 nova/tests/cloud_unittest.py create mode 100644 nova/tests/datastore_unittest.py create mode 100644 nova/tests/fake_flags.py create mode 100644 nova/tests/future_unittest.py create mode 100644 nova/tests/keeper_unittest.py create mode 100644 nova/tests/network_unittest.py create mode 100644 nova/tests/node_unittest.py create mode 100644 nova/tests/objectstore_unittest.py create mode 100644 nova/tests/real_flags.py create mode 100644 nova/tests/storage_unittest.py create mode 100644 nova/tests/users_unittest.py create mode 100644 nova/twistd.py create mode 100644 nova/utils.py create mode 100644 nova/vendor.py create mode 100644 nova/volume/__init__.py create mode 100644 nova/volume/storage.py create mode 100644 run_tests.py create mode 100644 setup.py create mode 100644 vendor/IPy.py create mode 100644 vendor/Twisted-10.0.0/INSTALL create mode 100644 vendor/Twisted-10.0.0/LICENSE create mode 100644 vendor/Twisted-10.0.0/NEWS create mode 100644 vendor/Twisted-10.0.0/README create mode 100644 vendor/Twisted-10.0.0/bin/.twistd.swp create mode 100755 vendor/Twisted-10.0.0/bin/conch/cftp create mode 100755 vendor/Twisted-10.0.0/bin/conch/ckeygen create mode 100755 vendor/Twisted-10.0.0/bin/conch/conch create mode 100755 vendor/Twisted-10.0.0/bin/conch/tkconch create mode 100755 vendor/Twisted-10.0.0/bin/lore/lore create mode 100755 vendor/Twisted-10.0.0/bin/mail/mailmail create mode 100755 vendor/Twisted-10.0.0/bin/manhole create mode 100755 vendor/Twisted-10.0.0/bin/mktap create mode 100755 vendor/Twisted-10.0.0/bin/pyhtmlizer create mode 100755 vendor/Twisted-10.0.0/bin/tap2deb create mode 100755 vendor/Twisted-10.0.0/bin/tap2rpm create mode 100755 vendor/Twisted-10.0.0/bin/tapconvert create mode 100755 vendor/Twisted-10.0.0/bin/trial create mode 100755 vendor/Twisted-10.0.0/bin/twistd create mode 100644 vendor/Twisted-10.0.0/doc/conch/benchmarks/README create mode 100755 vendor/Twisted-10.0.0/doc/conch/benchmarks/buffering_mixin.py create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo_draw.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo_insults.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo_manhole.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo_recvline.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/demo_scroll.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleclient.py create mode 100755 vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleserver.py create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/telnet_echo.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/examples/window.tac create mode 100644 vendor/Twisted-10.0.0/doc/conch/howto/conch_client.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/index.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/cftp-man.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/cftp.1 create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/ckeygen-man.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/ckeygen.1 create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/conch-man.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/conch.1 create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/tkconch-man.html create mode 100644 vendor/Twisted-10.0.0/doc/conch/man/tkconch.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/banana.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/deferreds.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/failure.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/linereceiver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/task.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/timer.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient_nt.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver_nt.py create mode 100644 vendor/Twisted-10.0.0/doc/core/development/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/listings/new_module_template.py create mode 100644 vendor/Twisted-10.0.0/doc/core/development/naming.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/philosophy.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/coding-standard.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/doc-standard.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/svn-dev.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/test-standard.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/policy/writing-standard.html create mode 100644 vendor/Twisted-10.0.0/doc/core/development/security.html create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/ampclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/ampserver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/bananabench.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/chatserver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/courier.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/cred.py create mode 100755 vendor/Twisted-10.0.0/doc/core/examples/dbcred.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/echoclient.py create mode 100755 vendor/Twisted-10.0.0/doc/core/examples/echoclient_ssl.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/echoclient_udp.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/echoserv.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/echoserv_ssl.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/echoserv_udp.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/filewatch.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/ftpclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/ftpserver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/gpsfix.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/longex.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/longex2.py create mode 100755 vendor/Twisted-10.0.0/doc/core/examples/mouse.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pb_exceptions.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbbenchclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbbenchserver.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbecho.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbechoclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbgtk2.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbgtk2login.glade create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbinterop.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbsimple.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pbsimpleclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/postfix.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/ptyserv.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/pyui_bg.png create mode 100755 vendor/Twisted-10.0.0/doc/core/examples/pyuidemo.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/rotatinglog.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/row_example.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/row_schema.sql create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/row_util.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/server.pem create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/shaper.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/shoutcast.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/simple.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/simpleclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/simpleserv.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/stdin.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/stdiodemo.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/testlogging.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/classes.nib create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/info.nib create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/keyedobjects.nib create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/README.txt create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/setup.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/README create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/blockingdemo.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/threadedselect/pygamedemo.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/twistd-logging.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/wxacceptance.py create mode 100644 vendor/Twisted-10.0.0/doc/core/examples/wxdemo.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/application.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/basics.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/book.tex create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/choosing-reactor.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/clients.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/components.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/cred.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/debug-with-emacs.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/defer.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/deferredindepth.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/design.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/dirdbm.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/gendefer.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/glossary.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/howto.tidyrc create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/internet-overview.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/__init__.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquote.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquoteclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoteproto.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoters.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotes.txt create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap2.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/webquote.rpy create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/application/service.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1a.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1b.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex2.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex3.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex4.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex5.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex6.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex7.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex8.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/synch-validation.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_classes.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_receiver.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_sender.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatclient.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatserver.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_classes.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_receiver.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_sender.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_receiver.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_sender.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb4client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client1.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client2.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb7client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonClient.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonServer.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_client.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_server.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/listings/process/process.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/process/quotes.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/process/trueandfalse.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastClient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastServer.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/logging.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/options.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/overview.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pb-copyable.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pb-cred.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pb-intro.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pb-usage.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pb.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/pclients.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/plugin.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/process.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/producers.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/quotes.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/rdbms.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/reactor-basics.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/row.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/servers.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/ssl.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/stylesheet-unprocessed.css create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/stylesheet.css create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tap.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/telnet.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/template.tpl create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/testing.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/threading.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/time.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/backends.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/client.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/components.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/configuration.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/factory.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/intro.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/library.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/etc.users create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/__init__.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/finger.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/tap.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger01.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger02.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger03.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger04.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger05.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger06.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger07.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger08.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger09.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger10.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger11.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger12.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger13.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger14.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger15.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger16.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger17.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a_changes.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b_changes.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c_changes.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger20.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger21.tac create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger22.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerPBclient.py create mode 100755 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerXRclient.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger_config.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerproxy.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/organized-finger.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/simple-finger.tac create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/twisted/plugins/finger_tutorial.py create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/pb.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/protocol.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/style.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/tutorial/web.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/udp.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/upgrading.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/vision.html create mode 100644 vendor/Twisted-10.0.0/doc/core/howto/website-template.tpl create mode 100644 vendor/Twisted-10.0.0/doc/core/img/TwistedLogo.bmp create mode 100644 vendor/Twisted-10.0.0/doc/core/img/cred-login.dia create mode 100644 vendor/Twisted-10.0.0/doc/core/img/cred-login.png create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred-attach.dia create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred-attach.png create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred-process.dia create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred-process.png create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred.dia create mode 100644 vendor/Twisted-10.0.0/doc/core/img/deferred.png create mode 100644 vendor/Twisted-10.0.0/doc/core/img/twisted-overview.dia create mode 100644 vendor/Twisted-10.0.0/doc/core/img/twisted-overview.png create mode 100644 vendor/Twisted-10.0.0/doc/core/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/manhole-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/manhole.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/mktap-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/mktap.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/pyhtmlizer-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/pyhtmlizer.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tap2deb-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tap2deb.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tap2rpm-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tap2rpm.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tapconvert-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/tapconvert.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/trial-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/trial.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/man/twistd-man.html create mode 100644 vendor/Twisted-10.0.0/doc/core/man/twistd.1 create mode 100644 vendor/Twisted-10.0.0/doc/core/specifications/banana.html create mode 100644 vendor/Twisted-10.0.0/doc/core/specifications/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/upgrades/2.0/components.html create mode 100644 vendor/Twisted-10.0.0/doc/core/upgrades/2.0/index.html create mode 100644 vendor/Twisted-10.0.0/doc/core/upgrades/2.0/split.html create mode 100644 vendor/Twisted-10.0.0/doc/core/upgrades/index.html create mode 100644 vendor/Twisted-10.0.0/doc/fun/Twisted.Quotes create mode 100644 vendor/Twisted-10.0.0/doc/fun/lightbulb create mode 100644 vendor/Twisted-10.0.0/doc/fun/register.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2002/ipc10/twisted-network-framework/errata.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2002/ipc10/twisted-network-framework/index.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/doanddont.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/index.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/lore.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/slides-template.tpl create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/tw-deploy.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/twisted.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/europython/webclients.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/haifux/haifux.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/haifux/notes.html create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/applications/applications create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/applications/applications.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/applications/pynfo-chart.png create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/conch/conch create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/conch/conch.html create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/conch/conchtalk.txt create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/conch/smalltwisted.png create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/conch/twistedlogo.png create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-bad-adding.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-chaining.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-complex-failure.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-complex-raise.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-forwarding.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-listing0.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-listing1.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-listing2.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-simple-failure.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex-simple-raise.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferex.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/deferex/deferexex.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/intrinsics-lightning/intrinsics-lightning create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/lore/lore-presentation create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/lore/lore-slides.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/lore/lore.html create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/pb/pb-client1.py create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/pb/pb-server1.py create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/pb/pb-slides.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/pb/pb.html create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/releasing/releasing-twisted create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/releasing/releasing.html create mode 100755 vendor/Twisted-10.0.0/doc/historic/2003/pycon/tw-deploy/tw-deploy create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/tw-deploy/twisted-overview.png create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/tw-deploy/twistedlogo.png create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/twisted-internet/twisted-internet.py create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/twisted-reality/componentized.svg create mode 100644 vendor/Twisted-10.0.0/doc/historic/2003/pycon/twisted-reality/twisted-reality.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/2004/ibm/talk.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/index.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/ipc10errata.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/ipc10paper.html create mode 100644 vendor/Twisted-10.0.0/doc/historic/stylesheet.css create mode 100644 vendor/Twisted-10.0.0/doc/historic/template-notoc.tpl create mode 100644 vendor/Twisted-10.0.0/doc/historic/template.tpl create mode 100644 vendor/Twisted-10.0.0/doc/historic/twisted-debian.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/examples/example.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/examples/slides-template.tpl create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/extend-lore.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/1st_example.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/a_lore_plugin.py create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/factory.py-1 create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/factory.py-2 create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/factory.py-3 create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/spitters.py-1 create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/listings/lore/spitters.py-2 create mode 100644 vendor/Twisted-10.0.0/doc/lore/howto/lore.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/img/myhtml-output.png create mode 100644 vendor/Twisted-10.0.0/doc/lore/index.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/man/lore-man.html create mode 100644 vendor/Twisted-10.0.0/doc/lore/man/lore.1 create mode 100644 vendor/Twisted-10.0.0/doc/mail/examples/emailserver.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/examples/imap4client.py create mode 100644 vendor/Twisted-10.0.0/doc/mail/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/mail/examples/smtpclient_tls.py create mode 100644 vendor/Twisted-10.0.0/doc/mail/index.html create mode 100644 vendor/Twisted-10.0.0/doc/mail/man/mailmail-man.html create mode 100644 vendor/Twisted-10.0.0/doc/mail/man/mailmail.1 create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-1.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-10.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-11.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-2.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-3.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-4.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-5.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-6.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-7.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-8.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient-9.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpclient/smtpclient.html create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-1.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-2.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-3.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-4.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-5.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-6.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-7.tac create mode 100644 vendor/Twisted-10.0.0/doc/mail/tutorial/smtpserver/smtpserver-8.tac create mode 100755 vendor/Twisted-10.0.0/doc/names/examples/dns-service.py create mode 100755 vendor/Twisted-10.0.0/doc/names/examples/gethostbyname.py create mode 100644 vendor/Twisted-10.0.0/doc/names/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/names/examples/testdns.py create mode 100644 vendor/Twisted-10.0.0/doc/names/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/names/howto/listings/names/example-domain.com create mode 100644 vendor/Twisted-10.0.0/doc/names/howto/names.html create mode 100644 vendor/Twisted-10.0.0/doc/names/index.html create mode 100644 vendor/Twisted-10.0.0/doc/pair/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/pair/examples/pairudp.py create mode 100644 vendor/Twisted-10.0.0/doc/pair/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/pair/howto/twisted-pair.html create mode 100644 vendor/Twisted-10.0.0/doc/pair/index.html create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/advogato.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/dlpage.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/fortune.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/getpage.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/google.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/hello.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/httpclient.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/lj.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/proxy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/report.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/rootscript.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/silly-web.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/simple.rtl create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/soap.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/users.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/vhost.rpy.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/web.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/webguard.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/xmlrpc.py create mode 100644 vendor/Twisted-10.0.0/doc/web/examples/xmlrpcclient.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/client.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/formindepth.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/glossary.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/client/request.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/client/response.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/client/sendbody.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/client/stringprod.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/soap.rpy create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/webquote.rtl create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/xmlAndSoapQuote.py create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/listings/xmlquote.rpy create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/resource-templates.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/using-twistedweb.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-development.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/asynchronous-deferred.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/asynchronous.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/custom-codes.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/dynamic-content.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/dynamic-dispatch.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/error-handling.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/handling-posts.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/http-auth.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/index.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/interrupted.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/logging-errors.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/rpy-scripts.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/session-basics.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/session-endings.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/session-store.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/static-content.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/static-dispatch.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-in-60/wsgi.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/web-overview.html create mode 100644 vendor/Twisted-10.0.0/doc/web/howto/xmlrpc.html create mode 100644 vendor/Twisted-10.0.0/doc/web/img/controller.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/livepage.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/model.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/plone_root_model.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/view.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-overview.dia create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-overview.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-process.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-process.svg create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-session.png create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-widgets.dia create mode 100644 vendor/Twisted-10.0.0/doc/web/img/web-widgets.png create mode 100644 vendor/Twisted-10.0.0/doc/web/index.html create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/aimbot.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/cursesclient.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/index.html create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/ircLogBot.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/jabber_client.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/minchat.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/msn_example.py create mode 100755 vendor/Twisted-10.0.0/doc/words/examples/oscardemo.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/pb_client.py create mode 100644 vendor/Twisted-10.0.0/doc/words/examples/xmpp_client.py create mode 100644 vendor/Twisted-10.0.0/doc/words/howto/im.html create mode 100644 vendor/Twisted-10.0.0/doc/words/howto/index.html create mode 100644 vendor/Twisted-10.0.0/doc/words/index.html create mode 100644 vendor/Twisted-10.0.0/doc/words/man/im-man.html create mode 100644 vendor/Twisted-10.0.0/doc/words/man/im.1 create mode 100755 vendor/Twisted-10.0.0/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/app.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/internet.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/reactors.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/service.py create mode 100644 vendor/Twisted-10.0.0/twisted/application/strports.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/avatar.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/checkers.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/agent.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/connect.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/default.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/direct.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/knownhosts.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/client/options.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/client.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/colors.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/helper.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/insults.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/text.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/insults/window.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/interfaces.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ls.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/manhole_ssh.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/manhole_tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/mixin.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/openssh_compat/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/openssh_compat/factory.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/openssh_compat/primes.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/recvline.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/scripts/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/scripts/cftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/scripts/ckeygen.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/scripts/conch.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/scripts/tkconch.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/agent.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/asn1.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/channel.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/common.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/connection.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/factory.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/filetransfer.py create mode 100755 vendor/Twisted-10.0.0/twisted/conch/ssh/forwarding.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/keys.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/service.py create mode 100755 vendor/Twisted-10.0.0/twisted/conch/ssh/session.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/sexpy.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/transport.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ssh/userauth.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/stdio.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/keydata.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_agent.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_cftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_channel.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_checkers.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_ckeygen.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_conch.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_connection.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_default.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_filetransfer.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_helper.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_insults.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_keys.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_knownhosts.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_mixin.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_openssh_compat.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_recvline.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_session.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_ssh.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_text.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_transport.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_userauth.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/test/test_window.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/conch/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/conch/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ttymodes.py create mode 100755 vendor/Twisted-10.0.0/twisted/conch/ui/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ui/ansi.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/ui/tkvt100.py create mode 100644 vendor/Twisted-10.0.0/twisted/conch/unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/copyright.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/_digest.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/checkers.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/credentials.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/pamauth.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/portal.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/strcred.py create mode 100644 vendor/Twisted-10.0.0/twisted/cred/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/adbapi.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/reflector.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/row.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/sqlreflector.py create mode 100644 vendor/Twisted-10.0.0/twisted/enterprise/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_baseprocess.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_dumbwin32proc.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_javaserialport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_pollingfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_posixserialport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_posixstdio.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_sslverify.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_threadedselect.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_win32serialport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/_win32stdio.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/abstract.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/address.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/base.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfdate.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfdecl.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfrunloop.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfsocket.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfsupport.c create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/cfsupport.pyx create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/python.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/cfsupport/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/default.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/defer.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/epollreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/fdesc.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/glib2reactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/gtk2reactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/gtkreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/interfaces.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/abstract.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/build.bat create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/const.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/interfaces.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/acceptex.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/connectex.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/iocpsupport.c create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/iocpsupport.pyx create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/winsock_pointers.c create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/winsock_pointers.h create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/wsarecv.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/iocpsupport/wsasend.pxi create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/notes.txt create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/reactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/tcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/iocpreactor/udp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/kqreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/main.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/pollreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/posixbase.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/process.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/protocol.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/pyuisupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/qtreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/reactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/selectreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/serialport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/ssl.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/stdio.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/task.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/tcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/inlinecb_tests.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/process_helper.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/reactormixins.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_base.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_baseprocess.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_core.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_fdset.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_inlinecb.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_iocp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_pollingfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_posixbase.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_process.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_qtreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_tcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_threads.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_time.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_tls.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/test/test_unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/threads.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/tksupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/udp.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/utils.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/win32eventreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/wxreactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/internet/wxsupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/default.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/docbook.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/htmlbook.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/indexer.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/latex.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/lint.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/lmath.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/man2lore.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/numberer.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/process.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/scripts/__init__.py create mode 100755 vendor/Twisted-10.0.0/twisted/lore/scripts/lore.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/slides.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/template.mgp create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_index_file_out.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_index_file_out_multiple.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_index_file_unnumbered_out.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_index_test.xhtml create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_index_test2.xhtml create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_numbering_test_out.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/lore_numbering_test_out2.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/simple.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/simple3.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/simple4.html create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/template.tpl create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_docbook.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_latex.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_lint.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_lmath.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_lore.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_man2lore.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/test/test_slides.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/texi.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/lore/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/lore/topfiles/setup.py create mode 100755 vendor/Twisted-10.0.0/twisted/lore/tree.py create mode 100644 vendor/Twisted-10.0.0/twisted/lore/xhtml-lat1.ent create mode 100644 vendor/Twisted-10.0.0/twisted/lore/xhtml-special.ent create mode 100644 vendor/Twisted-10.0.0/twisted/lore/xhtml-symbol.ent create mode 100644 vendor/Twisted-10.0.0/twisted/lore/xhtml1-strict.dtd create mode 100644 vendor/Twisted-10.0.0/twisted/lore/xhtml1-transitional.dtd create mode 100644 vendor/Twisted-10.0.0/twisted/mail/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/alias.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/bounce.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/imap4.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/mail.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/maildir.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/pb.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/pop3.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/pop3client.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/protocols.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/relay.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/relaymanager.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/scripts/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/scripts/mailmail.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/smtp.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/pop3testserver.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/rfc822.message create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_bounce.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_imap.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_mail.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_mailmail.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_options.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_pop3.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_pop3client.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/test/test_smtp.py create mode 100644 vendor/Twisted-10.0.0/twisted/mail/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/mail/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/mail/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/_inspectro.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/explorer.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/gladereactor.glade create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/gladereactor.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/inspectro.glade create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/logview.glade create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/service.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/ui/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/ui/gtk2manhole.glade create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/ui/gtk2manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/ui/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/manhole/ui/test/test_gtk2manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/authority.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/cache.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/client.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/common.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/dns.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/hosts.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/resolve.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/root.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/secondary.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/server.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/srvconnect.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_cache.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_client.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_common.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_dns.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_names.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_rootresolve.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/test/test_srvconnect.py create mode 100644 vendor/Twisted-10.0.0/twisted/names/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/names/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/names/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/database.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/news.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/nntp.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/test/test_news.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/test/test_nntp.py create mode 100644 vendor/Twisted-10.0.0/twisted/news/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/news/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/news/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/ethernet.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/ip.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/raw.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/rawudp.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/test/test_ethernet.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/test/test_ip.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/test/test_rawudp.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/pair/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/pair/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/pair/tuntap.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/aot.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/crefutil.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/dirdbm.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/journal/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/journal/base.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/journal/picklelog.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/journal/rowjournal.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/sob.py create mode 100644 vendor/Twisted-10.0.0/twisted/persisted/styles.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugin.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/cred_anonymous.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/cred_file.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/cred_memory.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/cred_unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_conch.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_ftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_inet.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_lore.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_mail.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_names.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_news.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_portforward.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_qtstub.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_reactors.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_socks.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_trial.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_web.py create mode 100644 vendor/Twisted-10.0.0/twisted/plugins/twisted_words.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/_c_urlarg.c create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/amp.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/basic.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/dict.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/finger.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/ftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/gps/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/gps/nmea.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/gps/rockwell.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/htb.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/ident.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/loopback.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/memcache.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/mice/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/mice/mouseman.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/pcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/policies.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/portforward.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/postfix.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/shoutcast.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/sip.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/socks.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/stateful.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/test/test_tls.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/tls.py create mode 100644 vendor/Twisted-10.0.0/twisted/protocols/wire.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/_epoll.c create mode 100644 vendor/Twisted-10.0.0/twisted/python/_epoll.pyx create mode 100644 vendor/Twisted-10.0.0/twisted/python/_initgroups.c create mode 100644 vendor/Twisted-10.0.0/twisted/python/_release.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/_twisted_zsh_stub create mode 100644 vendor/Twisted-10.0.0/twisted/python/compat.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/components.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/context.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/deprecate.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/dispatch.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/dist.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/dxprofile.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/failure.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/fakepwd.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/filepath.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/finalize.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/formmethod.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/hashlib.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/hook.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/htmlizer.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/lockfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/log.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/logfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/modules.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/monkey.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/otp.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/procutils.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/randbytes.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/rebuild.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/reflect.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/release.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/roots.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/runtime.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/shortcut.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/syslog.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/deprecatedattributes.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_components.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_deprecate.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_dist.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_fakepwd.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_hashlib.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_htmlizer.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_release.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_runtime.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_syslog.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_util.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_versions.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_win32.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/test/test_zipstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/text.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/threadable.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/threadpool.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/timeoutqueue.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/urlpath.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/usage.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/versions.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/win32.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/zippath.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/zipstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/README create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_cftp create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_ckeygen create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_conch create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_lore create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_manhole create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_mktap create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_pyhtmlizer create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_tap2deb create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_tap2rpm create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_tapconvert create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_tkconch create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_tkmktap create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_trial create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_twistd create mode 100644 vendor/Twisted-10.0.0/twisted/python/zsh/_websetroot create mode 100644 vendor/Twisted-10.0.0/twisted/python/zshcomp.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/inetd.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/inetdconf.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/inetdtap.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/portmap.c create mode 100644 vendor/Twisted-10.0.0/twisted/runner/procmon.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/procutils.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/test/test_procmon.py create mode 100644 vendor/Twisted-10.0.0/twisted/runner/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/runner/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/runner/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/_twistd_unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/_twistw.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/htmlizer.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/mktap.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/tap2deb.py create mode 100755 vendor/Twisted-10.0.0/twisted/scripts/tap2rpm.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/tapconvert.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/test/test_mktap.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/tkunzip.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/trial.py create mode 100644 vendor/Twisted-10.0.0/twisted/scripts/twistd.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/banana.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/flavors.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/interfaces.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/jelly.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/pb.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/publish.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/refpath.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/ui/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/ui/gtk2util.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/ui/login2.glade create mode 100644 vendor/Twisted-10.0.0/twisted/spread/ui/tktree.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/ui/tkutil.py create mode 100644 vendor/Twisted-10.0.0/twisted/spread/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/ftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/portforward.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/socks.py create mode 100644 vendor/Twisted-10.0.0/twisted/tap/telnet.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/crash_test_dummy.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/generator_failure_tests.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/iosim.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/mock_win32process.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/myrebuilder1.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/myrebuilder2.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/plugin_basic.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/plugin_extra1.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/plugin_extra2.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_cmdline.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_echoer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_fds.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_linger.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_reader.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_signal.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_stdinreader.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_tester.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_tty.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/process_twisted.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/proto_helpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/raiser.c create mode 100644 vendor/Twisted-10.0.0/twisted/test/raiser.pyx create mode 100644 vendor/Twisted-10.0.0/twisted/test/reflect_helper_IE.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/reflect_helper_VE.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/reflect_helper_ZDE.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/server.pem create mode 100644 vendor/Twisted-10.0.0/twisted/test/ssl_helpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_consumer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_hostpeer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_lastwrite.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_loseconn.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_producer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_write.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/stdio_test_writeseq.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_abstract.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_adbapi.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_amp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_application.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_banana.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_compat.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_context.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_cooperator.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_defer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_defgen.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_dict.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_digestauth.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_dirdbm.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_doc.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_enterprise.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_epoll.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_error.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_explorer.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_extensions.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_factories.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_failure.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_fdesc.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_finger.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_formmethod.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_ftp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_hook.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_htb.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_ident.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_import.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_internet.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_iutils.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_jelly.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_journal.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_lockfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_log.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_logfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_loopback.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_manhole.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_memcache.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_modules.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_monkey.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_newcred.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_nmea.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_paths.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_pb.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_pbfailure.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_pcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_persisted.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_plugin.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_policies.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_postfix.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_process.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_protocols.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_randbytes.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_rebuild.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_reflect.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_reflector.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_roots.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_shortcut.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_sip.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_sob.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_socks.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_ssl.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_sslverify.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_stateful.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_stdio.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_strcred.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_strerror.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_stringtransport.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_strports.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_task.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_tcp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_tcp_internals.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_text.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_threadable.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_threadpool.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_threads.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_timehelpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_timeoutqueue.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_tpfile.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_twistd.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_udp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_unix.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_usage.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/test_zshcomp.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/testutils.py create mode 100644 vendor/Twisted-10.0.0/twisted/test/time_helpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/4335.misc create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/CREDITS create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/ChangeLog.Old create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/itrial.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/reporter.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/runner.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/detests.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/erroneous.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/mockcustomsuite.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/mockcustomsuite2.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/mockcustomsuite3.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/mockdoctest.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/moduleself.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/moduletest.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/notpython create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/novars.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/packages.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/sample.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/scripttest.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/suppression.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_assertions.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_deferred.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_doctest.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_keyboard.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_loader.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_log.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_output.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_plugins.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_pyunitcompat.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_reporter.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_runner.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_script.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_test_visitor.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_testcase.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_tests.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_util.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/test_warning.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/test/weird.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/unittest.py create mode 100644 vendor/Twisted-10.0.0/twisted/trial/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_auth/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_auth/basic.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_auth/digest.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_auth/wrapper.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_newclient.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/client.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/demo.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/distrib.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/domhelpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/google.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/guard.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/html.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/http.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/http_headers.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/iweb.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/microdom.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/proxy.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/resource.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/rewrite.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/script.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/server.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/soap.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/static.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/sux.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/_util.py create mode 100755 vendor/Twisted-10.0.0/twisted/web/test/test_cgi.py create mode 100755 vendor/Twisted-10.0.0/twisted/web/test/test_distrib.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_domhelpers.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_error.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_http.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_http_headers.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_httpauth.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_newclient.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_resource.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_script.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_soap.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_static.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_vhost.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_web.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_webclient.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_wsgi.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_xml.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/test/test_xmlrpc.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/web/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/web/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/trp.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/twcgi.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/util.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/vhost.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/wsgi.py create mode 100644 vendor/Twisted-10.0.0/twisted/web/xmlrpc.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/_version.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/ewords.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/baseaccount.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/basechat.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/basesupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/instancemessenger.glade create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/interfaces.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/ircsupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/locals.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/pbsupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/proxyui.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/im/tocsupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/iwords.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/irc.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/client.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/component.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/error.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/ijabber.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jid.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jstrports.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl_mechanisms.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmlstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmpp_stringprep.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/msn.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/oscar.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/protocols/toc.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/service.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_basesupport.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_domish.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_irc.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_irc_service.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabberclient.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabbercomponent.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabbererror.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabberjid.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabbersasl.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabbersaslmechanisms.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmlstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmppstringprep.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_msn.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_oscar.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_service.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_tap.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_toc.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_xishutil.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_xmlstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_xmpproutertap.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/test/test_xpath.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/toctap.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/topfiles/NEWS create mode 100644 vendor/Twisted-10.0.0/twisted/words/topfiles/README create mode 100644 vendor/Twisted-10.0.0/twisted/words/topfiles/setup.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/__init__.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/domish.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/utility.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/xmlstream.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/xpath.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.g create mode 100644 vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.py create mode 100644 vendor/Twisted-10.0.0/twisted/words/xmpproutertap.py create mode 100644 vendor/amqplib/__init__.py create mode 100644 vendor/amqplib/client_0_8/__init__.py create mode 100644 vendor/amqplib/client_0_8/abstract_channel.py create mode 100644 vendor/amqplib/client_0_8/basic_message.py create mode 100644 vendor/amqplib/client_0_8/channel.py create mode 100644 vendor/amqplib/client_0_8/connection.py create mode 100644 vendor/amqplib/client_0_8/exceptions.py create mode 100644 vendor/amqplib/client_0_8/method_framing.py create mode 100644 vendor/amqplib/client_0_8/serialization.py create mode 100644 vendor/amqplib/client_0_8/transport.py create mode 100644 vendor/anyjson/__init__.py create mode 100644 vendor/boto/README create mode 100755 vendor/boto/bin/bundle_image create mode 100644 vendor/boto/bin/cfadmin create mode 100755 vendor/boto/bin/elbadmin create mode 100755 vendor/boto/bin/fetch_file create mode 100644 vendor/boto/bin/kill_instance create mode 100755 vendor/boto/bin/launch_instance create mode 100755 vendor/boto/bin/list_instances create mode 100755 vendor/boto/bin/pyami_sendmail create mode 100755 vendor/boto/bin/s3put create mode 100755 vendor/boto/bin/sdbadmin create mode 100755 vendor/boto/bin/taskadmin create mode 100644 vendor/boto/boto/__init__.py create mode 100644 vendor/boto/boto/cloudfront/__init__.py create mode 100644 vendor/boto/boto/cloudfront/distribution.py create mode 100644 vendor/boto/boto/cloudfront/exception.py create mode 100644 vendor/boto/boto/cloudfront/identity.py create mode 100644 vendor/boto/boto/cloudfront/logging.py create mode 100644 vendor/boto/boto/cloudfront/object.py create mode 100644 vendor/boto/boto/cloudfront/signers.py create mode 100644 vendor/boto/boto/connection.py create mode 100644 vendor/boto/boto/contrib/__init__.py create mode 100644 vendor/boto/boto/contrib/m2helpers.py create mode 100644 vendor/boto/boto/contrib/ymlmessage.py create mode 100644 vendor/boto/boto/ec2/__init__.py create mode 100644 vendor/boto/boto/ec2/address.py create mode 100644 vendor/boto/boto/ec2/autoscale/__init__.py create mode 100644 vendor/boto/boto/ec2/autoscale/activity.py create mode 100644 vendor/boto/boto/ec2/autoscale/group.py create mode 100644 vendor/boto/boto/ec2/autoscale/instance.py create mode 100644 vendor/boto/boto/ec2/autoscale/launchconfig.py create mode 100644 vendor/boto/boto/ec2/autoscale/request.py create mode 100644 vendor/boto/boto/ec2/autoscale/trigger.py create mode 100644 vendor/boto/boto/ec2/blockdevicemapping.py create mode 100644 vendor/boto/boto/ec2/bundleinstance.py create mode 100644 vendor/boto/boto/ec2/buyreservation.py create mode 100644 vendor/boto/boto/ec2/cloudwatch/__init__.py create mode 100644 vendor/boto/boto/ec2/cloudwatch/datapoint.py create mode 100644 vendor/boto/boto/ec2/cloudwatch/metric.py create mode 100644 vendor/boto/boto/ec2/connection.py create mode 100644 vendor/boto/boto/ec2/ec2object.py create mode 100644 vendor/boto/boto/ec2/elb/__init__.py create mode 100644 vendor/boto/boto/ec2/elb/healthcheck.py create mode 100644 vendor/boto/boto/ec2/elb/instancestate.py create mode 100644 vendor/boto/boto/ec2/elb/listelement.py create mode 100644 vendor/boto/boto/ec2/elb/listener.py create mode 100644 vendor/boto/boto/ec2/elb/loadbalancer.py create mode 100644 vendor/boto/boto/ec2/image.py create mode 100644 vendor/boto/boto/ec2/instance.py create mode 100644 vendor/boto/boto/ec2/instanceinfo.py create mode 100644 vendor/boto/boto/ec2/keypair.py create mode 100644 vendor/boto/boto/ec2/launchspecification.py create mode 100644 vendor/boto/boto/ec2/regioninfo.py create mode 100644 vendor/boto/boto/ec2/reservedinstance.py create mode 100644 vendor/boto/boto/ec2/securitygroup.py create mode 100644 vendor/boto/boto/ec2/snapshot.py create mode 100644 vendor/boto/boto/ec2/spotdatafeedsubscription.py create mode 100644 vendor/boto/boto/ec2/spotinstancerequest.py create mode 100644 vendor/boto/boto/ec2/spotpricehistory.py create mode 100644 vendor/boto/boto/ec2/volume.py create mode 100644 vendor/boto/boto/ec2/zone.py create mode 100644 vendor/boto/boto/emr/__init__.py create mode 100644 vendor/boto/boto/emr/connection.py create mode 100644 vendor/boto/boto/emr/emrobject.py create mode 100644 vendor/boto/boto/emr/jobflow.py create mode 100644 vendor/boto/boto/emr/step.py create mode 100644 vendor/boto/boto/exception.py create mode 100644 vendor/boto/boto/fps/__init__.py create mode 100644 vendor/boto/boto/fps/connection.py create mode 100644 vendor/boto/boto/handler.py create mode 100644 vendor/boto/boto/manage/__init__.py create mode 100644 vendor/boto/boto/manage/cmdshell.py create mode 100644 vendor/boto/boto/manage/propget.py create mode 100644 vendor/boto/boto/manage/server.py create mode 100644 vendor/boto/boto/manage/task.py create mode 100644 vendor/boto/boto/manage/test_manage.py create mode 100644 vendor/boto/boto/manage/volume.py create mode 100644 vendor/boto/boto/mapreduce/__init__.py create mode 100644 vendor/boto/boto/mapreduce/lqs.py create mode 100644 vendor/boto/boto/mapreduce/partitiondb.py create mode 100644 vendor/boto/boto/mapreduce/pdb_delete create mode 100755 vendor/boto/boto/mapreduce/pdb_describe create mode 100755 vendor/boto/boto/mapreduce/pdb_revert create mode 100755 vendor/boto/boto/mapreduce/pdb_upload create mode 100644 vendor/boto/boto/mapreduce/queuetools.py create mode 100644 vendor/boto/boto/mashups/__init__.py create mode 100644 vendor/boto/boto/mashups/interactive.py create mode 100644 vendor/boto/boto/mashups/iobject.py create mode 100644 vendor/boto/boto/mashups/order.py create mode 100644 vendor/boto/boto/mashups/server.py create mode 100644 vendor/boto/boto/mturk/__init__.py create mode 100644 vendor/boto/boto/mturk/connection.py create mode 100644 vendor/boto/boto/mturk/notification.py create mode 100644 vendor/boto/boto/mturk/price.py create mode 100644 vendor/boto/boto/mturk/qualification.py create mode 100644 vendor/boto/boto/mturk/question.py create mode 100644 vendor/boto/boto/mturk/test/all_tests.py create mode 100644 vendor/boto/boto/mturk/test/cleanup_tests.py create mode 100644 vendor/boto/boto/mturk/test/create_free_text_question_regex.doctest create mode 100644 vendor/boto/boto/mturk/test/create_hit.doctest create mode 100644 vendor/boto/boto/mturk/test/create_hit_binary.doctest create mode 100644 vendor/boto/boto/mturk/test/create_hit_external.py create mode 100644 vendor/boto/boto/mturk/test/create_hit_from_hit_type.doctest create mode 100644 vendor/boto/boto/mturk/test/create_hit_with_qualifications.py create mode 100644 vendor/boto/boto/mturk/test/reviewable_hits.doctest create mode 100644 vendor/boto/boto/mturk/test/search_hits.doctest create mode 100644 vendor/boto/boto/pyami/__init__.py create mode 100644 vendor/boto/boto/pyami/bootstrap.py create mode 100644 vendor/boto/boto/pyami/config.py create mode 100644 vendor/boto/boto/pyami/copybot.cfg create mode 100644 vendor/boto/boto/pyami/copybot.py create mode 100644 vendor/boto/boto/pyami/helloworld.py create mode 100644 vendor/boto/boto/pyami/installers/__init__.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/__init__.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/apache.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/ebs.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/installer.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/mysql.py create mode 100644 vendor/boto/boto/pyami/installers/ubuntu/trac.py create mode 100755 vendor/boto/boto/pyami/launch_ami.py create mode 100644 vendor/boto/boto/pyami/scriptbase.py create mode 100644 vendor/boto/boto/pyami/startup.py create mode 100644 vendor/boto/boto/rds/__init__.py create mode 100644 vendor/boto/boto/rds/dbinstance.py create mode 100644 vendor/boto/boto/rds/dbsecuritygroup.py create mode 100644 vendor/boto/boto/rds/dbsnapshot.py create mode 100644 vendor/boto/boto/rds/event.py create mode 100644 vendor/boto/boto/rds/parametergroup.py create mode 100644 vendor/boto/boto/resultset.py create mode 100644 vendor/boto/boto/s3/__init__.py create mode 100644 vendor/boto/boto/s3/acl.py create mode 100644 vendor/boto/boto/s3/bucket.py create mode 100644 vendor/boto/boto/s3/bucketlistresultset.py create mode 100644 vendor/boto/boto/s3/connection.py create mode 100644 vendor/boto/boto/s3/deletemarker.py create mode 100644 vendor/boto/boto/s3/key.py create mode 100644 vendor/boto/boto/s3/prefix.py create mode 100644 vendor/boto/boto/s3/user.py create mode 100644 vendor/boto/boto/sdb/__init__.py create mode 100644 vendor/boto/boto/sdb/connection.py create mode 100644 vendor/boto/boto/sdb/db/__init__.py create mode 100644 vendor/boto/boto/sdb/db/blob.py create mode 100644 vendor/boto/boto/sdb/db/key.py create mode 100644 vendor/boto/boto/sdb/db/manager/__init__.py create mode 100644 vendor/boto/boto/sdb/db/manager/pgmanager.py create mode 100644 vendor/boto/boto/sdb/db/manager/sdbmanager.py create mode 100644 vendor/boto/boto/sdb/db/manager/xmlmanager.py create mode 100644 vendor/boto/boto/sdb/db/model.py create mode 100644 vendor/boto/boto/sdb/db/property.py create mode 100644 vendor/boto/boto/sdb/db/query.py create mode 100644 vendor/boto/boto/sdb/db/sequence.py create mode 100644 vendor/boto/boto/sdb/db/test_db.py create mode 100644 vendor/boto/boto/sdb/domain.py create mode 100644 vendor/boto/boto/sdb/item.py create mode 100644 vendor/boto/boto/sdb/persist/__init__.py create mode 100644 vendor/boto/boto/sdb/persist/checker.py create mode 100644 vendor/boto/boto/sdb/persist/object.py create mode 100644 vendor/boto/boto/sdb/persist/property.py create mode 100644 vendor/boto/boto/sdb/persist/test_persist.py create mode 100644 vendor/boto/boto/sdb/queryresultset.py create mode 100644 vendor/boto/boto/sdb/regioninfo.py create mode 100644 vendor/boto/boto/services/__init__.py create mode 100755 vendor/boto/boto/services/bs.py create mode 100644 vendor/boto/boto/services/message.py create mode 100644 vendor/boto/boto/services/result.py create mode 100644 vendor/boto/boto/services/service.py create mode 100644 vendor/boto/boto/services/servicedef.py create mode 100644 vendor/boto/boto/services/sonofmmm.cfg create mode 100644 vendor/boto/boto/services/sonofmmm.py create mode 100644 vendor/boto/boto/services/submit.py create mode 100644 vendor/boto/boto/sns/__init__.py create mode 100644 vendor/boto/boto/sqs/__init__.py create mode 100644 vendor/boto/boto/sqs/attributes.py create mode 100644 vendor/boto/boto/sqs/connection.py create mode 100644 vendor/boto/boto/sqs/jsonmessage.py create mode 100644 vendor/boto/boto/sqs/message.py create mode 100644 vendor/boto/boto/sqs/queue.py create mode 100644 vendor/boto/boto/sqs/regioninfo.py create mode 100644 vendor/boto/boto/tests/__init__.py create mode 100644 vendor/boto/boto/tests/devpay_s3.py create mode 100755 vendor/boto/boto/tests/test.py create mode 100644 vendor/boto/boto/tests/test_ec2connection.py create mode 100644 vendor/boto/boto/tests/test_s3connection.py create mode 100644 vendor/boto/boto/tests/test_s3versioning.py create mode 100644 vendor/boto/boto/tests/test_sdbconnection.py create mode 100644 vendor/boto/boto/tests/test_sqsconnection.py create mode 100644 vendor/boto/boto/utils.py create mode 100644 vendor/boto/boto/vpc/__init__.py create mode 100644 vendor/boto/boto/vpc/customergateway.py create mode 100644 vendor/boto/boto/vpc/dhcpoptions.py create mode 100644 vendor/boto/boto/vpc/subnet.py create mode 100644 vendor/boto/boto/vpc/vpc.py create mode 100644 vendor/boto/boto/vpc/vpnconnection.py create mode 100644 vendor/boto/boto/vpc/vpngateway.py create mode 100755 vendor/boto/cq.py create mode 100644 vendor/boto/docs/Makefile create mode 100644 vendor/boto/docs/make.bat create mode 100644 vendor/boto/docs/source/_templates/layout.html create mode 100644 vendor/boto/docs/source/autoscale_tut.rst create mode 100644 vendor/boto/docs/source/boto_theme/static/boto.css_t create mode 100644 vendor/boto/docs/source/boto_theme/static/pygments.css create mode 100644 vendor/boto/docs/source/boto_theme/theme.conf create mode 100644 vendor/boto/docs/source/conf.py create mode 100644 vendor/boto/docs/source/documentation.rst create mode 100644 vendor/boto/docs/source/ec2_tut.rst create mode 100644 vendor/boto/docs/source/elb_tut.rst create mode 100644 vendor/boto/docs/source/index.rst create mode 100644 vendor/boto/docs/source/ref/boto.rst create mode 100644 vendor/boto/docs/source/ref/cloudfront.rst create mode 100644 vendor/boto/docs/source/ref/contrib.rst create mode 100644 vendor/boto/docs/source/ref/ec2.rst create mode 100644 vendor/boto/docs/source/ref/fps.rst create mode 100644 vendor/boto/docs/source/ref/index.rst create mode 100644 vendor/boto/docs/source/ref/manage.rst create mode 100644 vendor/boto/docs/source/ref/mapreduce.rst create mode 100644 vendor/boto/docs/source/ref/mashups.rst create mode 100644 vendor/boto/docs/source/ref/mturk.rst create mode 100644 vendor/boto/docs/source/ref/pyami.rst create mode 100644 vendor/boto/docs/source/ref/rds.rst create mode 100644 vendor/boto/docs/source/ref/s3.rst create mode 100644 vendor/boto/docs/source/ref/sdb.rst create mode 100644 vendor/boto/docs/source/ref/services.rst create mode 100644 vendor/boto/docs/source/ref/sqs.rst create mode 100644 vendor/boto/docs/source/ref/vpc.rst create mode 100644 vendor/boto/docs/source/s3_tut.rst create mode 100644 vendor/boto/docs/source/sqs_tut.rst create mode 100644 vendor/boto/docs/source/vpc_tut.rst create mode 100644 vendor/boto/pylintrc create mode 100644 vendor/boto/setup.py create mode 100644 vendor/carrot/__init__.py create mode 100644 vendor/carrot/backends/__init__.py create mode 100644 vendor/carrot/backends/base.py create mode 100644 vendor/carrot/backends/pikachu.py create mode 100644 vendor/carrot/backends/pyamqplib.py create mode 100644 vendor/carrot/backends/pystomp.py create mode 100644 vendor/carrot/backends/queue.py create mode 100644 vendor/carrot/connection.py create mode 100644 vendor/carrot/messaging.py create mode 100644 vendor/carrot/serialization.py create mode 100644 vendor/carrot/utils.py create mode 100644 vendor/lockfile/2.4.diff create mode 100644 vendor/lockfile/ACKS create mode 100644 vendor/lockfile/LICENSE create mode 100644 vendor/lockfile/MANIFEST create mode 100644 vendor/lockfile/PKG-INFO create mode 100644 vendor/lockfile/README create mode 100644 vendor/lockfile/RELEASE-NOTES create mode 100644 vendor/lockfile/doc/Makefile create mode 100644 vendor/lockfile/doc/conf.py create mode 100644 vendor/lockfile/doc/glossary.rst create mode 100644 vendor/lockfile/doc/index.rst create mode 100644 vendor/lockfile/doc/lockfile.rst create mode 100644 vendor/lockfile/lockfile/__init__.py create mode 100644 vendor/lockfile/lockfile/linklockfile.py create mode 100644 vendor/lockfile/lockfile/mkdirlockfile.py create mode 100644 vendor/lockfile/lockfile/pidlockfile.py create mode 100644 vendor/lockfile/lockfile/sqlitelockfile.py create mode 100644 vendor/lockfile/setup.py create mode 100644 vendor/lockfile/test/compliancetest.py create mode 100644 vendor/lockfile/test/test_lockfile.py create mode 100644 vendor/pymox/COPYING create mode 100644 vendor/pymox/MANIFEST.in create mode 100644 vendor/pymox/README create mode 100755 vendor/pymox/mox.py create mode 100755 vendor/pymox/mox_test.py create mode 100755 vendor/pymox/mox_test_helper.py create mode 100755 vendor/pymox/setup.py create mode 100644 vendor/pymox/stubout.py create mode 100644 vendor/pymox/stubout_test.py create mode 100644 vendor/pymox/stubout_testee.py create mode 100644 vendor/python-daemon/ChangeLog create mode 100644 vendor/python-daemon/LICENSE.GPL-2 create mode 100644 vendor/python-daemon/LICENSE.PSF-2 create mode 100644 vendor/python-daemon/MANIFEST.in create mode 100644 vendor/python-daemon/PKG-INFO create mode 100644 vendor/python-daemon/README.nova create mode 100644 vendor/python-daemon/daemon/__init__.py create mode 100644 vendor/python-daemon/daemon/daemon.py create mode 100644 vendor/python-daemon/daemon/pidlockfile.py create mode 100644 vendor/python-daemon/daemon/runner.py create mode 100644 vendor/python-daemon/daemon/version/__init__.py create mode 100644 vendor/python-daemon/daemon/version/version_info.py create mode 100644 vendor/python-daemon/python_daemon.egg-info/PKG-INFO create mode 100644 vendor/python-daemon/python_daemon.egg-info/SOURCES.txt create mode 100644 vendor/python-daemon/python_daemon.egg-info/dependency_links.txt create mode 100644 vendor/python-daemon/python_daemon.egg-info/not-zip-safe create mode 100644 vendor/python-daemon/python_daemon.egg-info/requires.txt create mode 100644 vendor/python-daemon/python_daemon.egg-info/top_level.txt create mode 100644 vendor/python-daemon/setup.cfg create mode 100644 vendor/python-daemon/setup.py create mode 100644 vendor/python-gflags/AUTHORS create mode 100644 vendor/python-gflags/COPYING create mode 100644 vendor/python-gflags/ChangeLog create mode 100644 vendor/python-gflags/README create mode 100644 vendor/python-gflags/debian/README create mode 100644 vendor/python-gflags/debian/changelog create mode 100644 vendor/python-gflags/debian/compat create mode 100644 vendor/python-gflags/debian/control create mode 100644 vendor/python-gflags/debian/copyright create mode 100644 vendor/python-gflags/debian/docs create mode 100755 vendor/python-gflags/debian/rules create mode 100644 vendor/python-gflags/gflags.py create mode 100755 vendor/python-gflags/gflags2man.py create mode 100755 vendor/python-gflags/gflags_helpxml_test.py create mode 100755 vendor/python-gflags/gflags_unittest.py create mode 100755 vendor/python-gflags/setup.py create mode 100755 vendor/python-gflags/test_module_bar.py create mode 100755 vendor/python-gflags/test_module_foo.py create mode 100755 vendor/redis-py/.gitignore create mode 100755 vendor/redis-py/CHANGES create mode 100755 vendor/redis-py/INSTALL create mode 100755 vendor/redis-py/LICENSE create mode 100755 vendor/redis-py/MANIFEST.in create mode 100755 vendor/redis-py/README.md create mode 100755 vendor/redis-py/redis/__init__.py create mode 100755 vendor/redis-py/redis/client.py create mode 100755 vendor/redis-py/redis/exceptions.py create mode 100755 vendor/redis-py/setup.py create mode 100755 vendor/redis-py/tests/__init__.py create mode 100755 vendor/redis-py/tests/connection_pool.py create mode 100755 vendor/redis-py/tests/pipeline.py create mode 100755 vendor/redis-py/tests/server_commands.py create mode 100644 vendor/tornado/MANIFEST.in create mode 100644 vendor/tornado/README create mode 100644 vendor/tornado/demos/appengine/README create mode 100644 vendor/tornado/demos/appengine/app.yaml create mode 100644 vendor/tornado/demos/appengine/blog.py create mode 100644 vendor/tornado/demos/appengine/markdown.py create mode 100644 vendor/tornado/demos/appengine/static/blog.css create mode 100644 vendor/tornado/demos/appengine/templates/archive.html create mode 100644 vendor/tornado/demos/appengine/templates/base.html create mode 100644 vendor/tornado/demos/appengine/templates/compose.html create mode 100644 vendor/tornado/demos/appengine/templates/entry.html create mode 100644 vendor/tornado/demos/appengine/templates/feed.xml create mode 100644 vendor/tornado/demos/appengine/templates/home.html create mode 100644 vendor/tornado/demos/appengine/templates/modules/entry.html create mode 100755 vendor/tornado/demos/auth/authdemo.py create mode 100644 vendor/tornado/demos/blog/README create mode 100755 vendor/tornado/demos/blog/blog.py create mode 100644 vendor/tornado/demos/blog/markdown.py create mode 100644 vendor/tornado/demos/blog/schema.sql create mode 100644 vendor/tornado/demos/blog/static/blog.css create mode 100644 vendor/tornado/demos/blog/templates/archive.html create mode 100644 vendor/tornado/demos/blog/templates/base.html create mode 100644 vendor/tornado/demos/blog/templates/compose.html create mode 100644 vendor/tornado/demos/blog/templates/entry.html create mode 100644 vendor/tornado/demos/blog/templates/feed.xml create mode 100644 vendor/tornado/demos/blog/templates/home.html create mode 100644 vendor/tornado/demos/blog/templates/modules/entry.html create mode 100755 vendor/tornado/demos/chat/chatdemo.py create mode 100644 vendor/tornado/demos/chat/static/chat.css create mode 100644 vendor/tornado/demos/chat/static/chat.js create mode 100644 vendor/tornado/demos/chat/templates/index.html create mode 100644 vendor/tornado/demos/chat/templates/message.html create mode 100644 vendor/tornado/demos/facebook/README create mode 100755 vendor/tornado/demos/facebook/facebook.py create mode 100644 vendor/tornado/demos/facebook/static/facebook.css create mode 100644 vendor/tornado/demos/facebook/static/facebook.js create mode 100644 vendor/tornado/demos/facebook/templates/modules/post.html create mode 100644 vendor/tornado/demos/facebook/templates/stream.html create mode 100644 vendor/tornado/demos/facebook/uimodules.py create mode 100755 vendor/tornado/demos/helloworld/helloworld.py create mode 100644 vendor/tornado/setup.py create mode 100644 vendor/tornado/tornado/__init__.py create mode 100644 vendor/tornado/tornado/auth.py create mode 100644 vendor/tornado/tornado/autoreload.py create mode 100644 vendor/tornado/tornado/database.py create mode 100644 vendor/tornado/tornado/epoll.c create mode 100644 vendor/tornado/tornado/escape.py create mode 100644 vendor/tornado/tornado/httpclient.py create mode 100644 vendor/tornado/tornado/httpserver.py create mode 100644 vendor/tornado/tornado/ioloop.py create mode 100644 vendor/tornado/tornado/iostream.py create mode 100644 vendor/tornado/tornado/locale.py create mode 100644 vendor/tornado/tornado/options.py create mode 100644 vendor/tornado/tornado/s3server.py create mode 100644 vendor/tornado/tornado/template.py create mode 100644 vendor/tornado/tornado/test/README create mode 100755 vendor/tornado/tornado/test/test_ioloop.py create mode 100644 vendor/tornado/tornado/web.py create mode 100644 vendor/tornado/tornado/websocket.py create mode 100644 vendor/tornado/tornado/win32_support.py create mode 100644 vendor/tornado/tornado/wsgi.py create mode 100644 vendor/tornado/website/app.yaml create mode 100644 vendor/tornado/website/index.yaml create mode 100644 vendor/tornado/website/markdown/__init__.py create mode 100644 vendor/tornado/website/markdown/blockparser.py create mode 100644 vendor/tornado/website/markdown/blockprocessors.py create mode 100644 vendor/tornado/website/markdown/commandline.py create mode 100644 vendor/tornado/website/markdown/etree_loader.py create mode 100644 vendor/tornado/website/markdown/extensions/__init__.py create mode 100644 vendor/tornado/website/markdown/extensions/toc.py create mode 100644 vendor/tornado/website/markdown/html4.py create mode 100644 vendor/tornado/website/markdown/inlinepatterns.py create mode 100644 vendor/tornado/website/markdown/odict.py create mode 100644 vendor/tornado/website/markdown/postprocessors.py create mode 100644 vendor/tornado/website/markdown/preprocessors.py create mode 100644 vendor/tornado/website/markdown/treeprocessors.py create mode 100644 vendor/tornado/website/static/base.css create mode 100755 vendor/tornado/website/static/facebook.png create mode 100755 vendor/tornado/website/static/friendfeed.png create mode 100644 vendor/tornado/website/static/robots.txt create mode 100644 vendor/tornado/website/static/tornado-0.1.tar.gz create mode 100644 vendor/tornado/website/static/tornado-0.2.tar.gz create mode 100644 vendor/tornado/website/static/tornado.png create mode 100755 vendor/tornado/website/static/twitter.png create mode 100644 vendor/tornado/website/templates/base.html create mode 100644 vendor/tornado/website/templates/documentation.html create mode 100644 vendor/tornado/website/templates/documentation.txt create mode 100644 vendor/tornado/website/templates/index.html create mode 100644 vendor/tornado/website/website.py diff --git a/CA/.gitignore b/CA/.gitignore new file mode 100644 index 000000000000..fae0922bf949 --- /dev/null +++ b/CA/.gitignore @@ -0,0 +1,11 @@ +index.txt +index.txt.old +index.txt.attr +index.txt.attr.old +cacert.pem +serial +serial.old +openssl.cnf +private/* +newcerts/* + diff --git a/CA/INTER/.gitignore b/CA/INTER/.gitignore new file mode 100644 index 000000000000..72e8ffc0db8a --- /dev/null +++ b/CA/INTER/.gitignore @@ -0,0 +1 @@ +* diff --git a/CA/geninter.sh b/CA/geninter.sh new file mode 100755 index 000000000000..ad3332ad9235 --- /dev/null +++ b/CA/geninter.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ARG is the id of the user + +mkdir INTER/$1 +cd INTER/$1 +cp ../../openssl.cnf.tmpl openssl.cnf +sed -i -e s/%USERNAME%/$1/g openssl.cnf +mkdir certs crl newcerts private +echo "10" > serial +touch index.txt +openssl genrsa -out private/cakey.pem 1024 -config ./openssl.cnf -batch -nodes +openssl req -new -sha1 -key private/cakey.pem -out ../../reqs/inter$1.csr -batch -subj "/C=US/ST=California/L=Mountain View/O=Anso Labs/OU=Nova Dev/CN=customer-intCA-$1" +cd ../../ +openssl ca -extensions v3_ca -days 365 -out INTER/$1/cacert.pem -in reqs/inter$1.csr -config openssl.cnf -batch \ No newline at end of file diff --git a/CA/genrootca.sh b/CA/genrootca.sh new file mode 100755 index 000000000000..e21f48d77ab9 --- /dev/null +++ b/CA/genrootca.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [ -f "cacert.pem" ]; +then + echo "Not installing, it's already done." +else + cp openssl.cnf.tmpl openssl.cnf + sed -i -e s/%USERNAME%/ROOT/g openssl.cnf + openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes + touch index.txt + echo "10" > serial +fi diff --git a/CA/newcerts/.placeholder b/CA/newcerts/.placeholder new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/CA/openssl.cnf.tmpl b/CA/openssl.cnf.tmpl new file mode 100644 index 000000000000..b06f1cca0b78 --- /dev/null +++ b/CA/openssl.cnf.tmpl @@ -0,0 +1,87 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# OpenSSL configuration file. +# + +# Establish working directory. + +dir = . + +[ ca ] +default_ca = CA_default +unique_subject = no + +[ CA_default ] +serial = $dir/serial +database = $dir/index.txt +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cakey.pem +default_days = 365 +default_md = md5 +preserve = no +email_in_dn = no +nameopt = default_ca +certopt = default_ca +policy = policy_match + +[ policy_match ] +countryName = match +stateOrProvinceName = match +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + + +[ req ] +default_bits = 1024 # Size of keys +default_keyfile = key.pem # name of generated keys +default_md = md5 # message digest algorithm +string_mask = nombstr # permitted characters +distinguished_name = req_distinguished_name + +[ req_distinguished_name ] +# Variable name Prompt string +#---------------------- ---------------------------------- +0.organizationName = Organization Name (company) +organizationalUnitName = Organizational Unit Name (department, division) +emailAddress = Email Address +emailAddress_max = 40 +localityName = Locality Name (city, district) +stateOrProvinceName = State or Province Name (full name) +countryName = Country Name (2 letter code) +countryName_min = 2 +countryName_max = 2 +commonName = Common Name (hostname, IP, or your name) +commonName_max = 64 + +# Default values for the above, for consistency and less typing. +# Variable name Value +#------------------------------ ------------------------------ +0.organizationName_default = NOVA %USERNAME% +localityName_default = Mountain View +stateOrProvinceName_default = California +countryName_default = US + +[ v3_ca ] +basicConstraints = CA:TRUE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always + +[ v3_req ] +basicConstraints = CA:FALSE +subjectKeyIdentifier = hash diff --git a/CA/private/.placeholder b/CA/private/.placeholder new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/CA/reqs/.gitignore b/CA/reqs/.gitignore new file mode 100644 index 000000000000..72e8ffc0db8a --- /dev/null +++ b/CA/reqs/.gitignore @@ -0,0 +1 @@ +* diff --git a/HACKING b/HACKING new file mode 100644 index 000000000000..77e42b8e6f10 --- /dev/null +++ b/HACKING @@ -0,0 +1,53 @@ +Nova Style Commandments +======================= + +Step 1: Read http://www.python.org/dev/peps/pep-0008/ +Step 2: Read http://www.python.org/dev/peps/pep-0008/ again +Step 3: Read on + +Imports +------- +- thou shalt not import objects, only modules +- thou shalt not import more than one module per line +- thou shalt not make relative imports +- thou shalt "from nova import vendor" before importing third party code +- thou shalt organize your imports according to the following template + +:: + # vim: tabstop=4 shiftwidth=4 softtabstop=4 + {{stdlib imports in human alphabetical order}} + \n + from nova import vendor + {{vendor imports in human alphabetical order}} + \n + {{nova imports in human alphabetical order}} + \n + \n + {{begin your code}} + + +General +------- +- thou shalt put two newlines twixt toplevel code (funcs, classes, etc) +- thou shalt put one newline twixt methods in classes and anywhere else +- thou shalt not write "except:", use "except Exception:" at the very least +- thou shalt include your name with TODOs as in "TODO(termie)" +- thou shalt not name anything the same name as a builtin or reserved word +- thou shalt not violate causality in our time cone, or else + + +Human Alphabetical Order Examples +--------------------------------- +:: + import httplib + import logging + import random + import StringIO + import time + import unittest + + from nova import flags + from nova import test + from nova.auth import users + from nova.endpoint import api + from nova.endpoint import cloud diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000000..68c771a09995 --- /dev/null +++ b/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/bin/nova-api b/bin/nova-api new file mode 100755 index 000000000000..8fea1da4d5ce --- /dev/null +++ b/bin/nova-api @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Tornado daemon for the main API endpoint. +""" + +import logging + +from nova import vendor +from tornado import httpserver +from tornado import ioloop + +from nova import flags +from nova import rpc +from nova import server +from nova import utils +from nova.auth import users +from nova.endpoint import admin +from nova.endpoint import api +from nova.endpoint import cloud + +FLAGS = flags.FLAGS + + +def main(_argv): + user_manager = users.UserManager() + controllers = { + 'Cloud': cloud.CloudController(), + 'Admin': admin.AdminController(user_manager) + } + _app = api.APIServerApplication(user_manager, controllers) + + conn = rpc.Connection.instance() + consumer = rpc.AdapterConsumer(connection=conn, + topic=FLAGS.cloud_topic, + proxy=controllers['Cloud']) + + io_inst = ioloop.IOLoop.instance() + _injected = consumer.attach_to_tornado(io_inst) + + http_server = httpserver.HTTPServer(_app) + http_server.listen(FLAGS.cc_port) + logging.debug('Started HTTP server on %s', FLAGS.cc_port) + io_inst.start() + + +if __name__ == '__main__': + utils.default_flagfile() + server.serve('nova-api', main) diff --git a/bin/nova-compute b/bin/nova-compute new file mode 100755 index 000000000000..bd3648d206c8 --- /dev/null +++ b/bin/nova-compute @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Twistd daemon for the nova compute nodes. + Receives messages via AMQP, manages pool of worker threads + for async tasks. +""" + +import logging +import os +import sys + +# NOTE(termie): kludge so that we can run this from the bin directory in the +# checkout without having to screw with paths +NOVA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'nova') +if os.path.exists(NOVA_PATH): + sys.path.insert(0, os.path.dirname(NOVA_PATH)) + +from nova import vendor +from carrot import connection +from carrot import messaging +from twisted.internet import task +from twisted.application import service + +from nova import flags +from nova import rpc +from nova import twistd +from nova.compute import node + + +FLAGS = flags.FLAGS +# NOTE(termie): This file will necessarily be re-imported under different +# context when the twistd.serve() call is made below so any +# flags we define here will have to be conditionally defined, +# flags defined by imported modules are safe. +if 'node_report_state_interval' not in FLAGS: + flags.DEFINE_integer('node_report_state_interval', 10, + 'seconds between nodes reporting state to cloud', + lower_bound=1) +logging.getLogger().setLevel(logging.DEBUG) + +def main(): + logging.warn('Starting compute node') + n = node.NetworkNode() + d = n.adopt_instances() + d.addCallback(lambda x: logging.info('Adopted %d instances', x)) + + conn = rpc.Connection.instance() + consumer_all = rpc.AdapterConsumer( + connection=conn, + topic='%s' % FLAGS.compute_topic, + proxy=n) + + consumer_node = rpc.AdapterConsumer( + connection=conn, + topic='%s.%s' % (FLAGS.compute_topic, FLAGS.node_name), + proxy=n) + + # heartbeat = task.LoopingCall(n.report_state) + # heartbeat.start(interval=FLAGS.node_report_state_interval, now=False) + + injected = consumer_all.attach_to_twisted() + injected = consumer_node.attach_to_twisted() + + # This is the parent service that twistd will be looking for when it + # parses this file, return it so that we can get it into globals below + application = service.Application('nova-compute') + n.setServiceParent(application) + return application + + +# NOTE(termie): When this script is executed from the commandline what it will +# actually do is tell the twistd application runner that it +# should run this file as a twistd application (see below). +if __name__ == '__main__': + twistd.serve(__file__) + +# NOTE(termie): When this script is loaded by the twistd application runner +# this code path will be executed and twistd will expect a +# variable named 'application' to be available, it will then +# handle starting it and stopping it. +if __name__ == '__builtin__': + application = main() diff --git a/bin/nova-manage b/bin/nova-manage new file mode 100755 index 000000000000..d2108626b982 --- /dev/null +++ b/bin/nova-manage @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + CLI interface for nova management. + Connects to the running ADMIN api in the api daemon. +""" + +import sys + +from nova import flags +from nova import utils +from nova.auth import users +from nova.compute import model +from nova.endpoint import cloud +import time + +FLAGS = flags.FLAGS + + +class UserCommands(object): + def __init__(self): + self.manager = users.UserManager.instance() + + def __print_export(self, user): + print 'export EC2_ACCESS_KEY=%s' % user.access + print 'export EC2_SECRET_KEY=%s' % user.secret + + def admin(self, name, access=None, secret=None): + """creates a new admin and prints exports + arguments: name [access] [secret]""" + user = self.manager.create_user(name, access, secret, True) + self.__print_export(user) + + def create(self, name, access=None, secret=None): + """creates a new user and prints exports + arguments: name [access] [secret]""" + user = self.manager.create_user(name, access, secret, False) + self.__print_export(user) + + def delete(self, name): + """deletes an existing user + arguments: name""" + self.manager.delete_user(name) + + def exports(self, name): + """prints access and secrets for user in export format + arguments: name""" + user = self.manager.get_user(name) + if user: + self.__print_export(user) + else: + print "User %s doesn't exist" % name + + def list(self): + """lists all users + arguments: """ + for user in self.manager.get_users(): + print user.name + + def zip(self, name, filename='nova.zip'): + """exports credentials for user to a zip file + arguments: name [filename='nova.zip]""" + user = self.manager.get_user(name) + if user: + with open(filename, 'w') as f: + f.write(user.get_credentials()) + else: + print "User %s doesn't exist" % name + + +def usage(script_name): + print script_name + " category action []" + + +categories = [ + ('user', UserCommands), +] + + +def lazy_match(name, key_value_tuples): + """finds all objects that have a key that case insensitively contains [name] + key_value_tuples is a list of tuples of the form (key, value) + returns a list of tuples of the form (key, value)""" + return [(k, v) for (k, v) in key_value_tuples if k.lower().find(name.lower()) == 0] + + +def methods_of(obj): + """get all callable methods of an object that don't start with underscore + returns a list of tuples of the form (method_name, method)""" + return [(i, getattr(obj, i)) for i in dir(obj) if callable(getattr(obj, i)) and not i.startswith('_')] + + +if __name__ == '__main__': + utils.default_flagfile() + argv = FLAGS(sys.argv) + script_name = argv.pop(0) + if len(argv) < 1: + usage(script_name) + print "Available categories:" + for k, v in categories: + print "\t%s" % k + sys.exit(2) + category = argv.pop(0) + matches = lazy_match(category, categories) + if len(matches) == 0: + print "%s does not match any categories:" % category + for k, v in categories: + print "\t%s" % k + sys.exit(2) + if len(matches) > 1: + print "%s matched multiple categories:" % category + for k, v in matches: + print "\t%s" % k + sys.exit(2) + # instantiate the command group object + category, fn = matches[0] + command_object = fn() + actions = methods_of(command_object) + if len(argv) < 1: + usage(script_name) + print "Available actions for %s category:" % category + for k, v in actions: + print "\t%s" % k + sys.exit(2) + action = argv.pop(0) + matches = lazy_match(action, actions) + if len(matches) == 0: + print "%s does not match any actions" % action + for k, v in actions: + print "\t%s" % k + sys.exit(2) + if len(matches) > 1: + print "%s matched multiple actions:" % action + for k, v in matches: + print "\t%s" % k + sys.exit(2) + action, fn = matches[0] + # call the action with the remaining arguments + try: + fn(*argv) + except TypeError: + print "Wrong number of arguments supplied" + print "%s %s: %s" % (category, action, fn.__doc__) + diff --git a/bin/nova-objectstore b/bin/nova-objectstore new file mode 100755 index 000000000000..38a23f1ffd0f --- /dev/null +++ b/bin/nova-objectstore @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Tornado daemon for nova objectstore. Supports S3 API. +""" + +import logging + +from nova import vendor +from tornado import httpserver +from tornado import ioloop + +from nova import flags +from nova import server +from nova import utils +from nova.auth import users +from nova.objectstore import handler + + +FLAGS = flags.FLAGS + + +def main(argv): + # FIXME: if this log statement isn't here, no logging + # appears from other files and app won't start daemonized + logging.debug('Started HTTP server on %s' % (FLAGS.s3_internal_port)) + app = handler.Application(users.UserManager()) + server = httpserver.HTTPServer(app) + server.listen(FLAGS.s3_internal_port) + ioloop.IOLoop.instance().start() + + +if __name__ == '__main__': + utils.default_flagfile() + server.serve('nova-objectstore', main) diff --git a/bin/nova-volume b/bin/nova-volume new file mode 100755 index 000000000000..e36954cd373c --- /dev/null +++ b/bin/nova-volume @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + Tornado Storage daemon manages AoE volumes via AMQP messaging. +""" + +import logging + +from nova import vendor +from tornado import ioloop + +from nova import flags +from nova import rpc +from nova import server +from nova import utils +from nova.volume import storage + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('storage_report_state_interval', 10, + 'seconds between broadcasting state to cloud', + lower_bound=1) + + +def main(argv): + bs = storage.BlockStore() + + conn = rpc.Connection.instance() + consumer_all = rpc.AdapterConsumer( + connection=conn, + topic='%s' % FLAGS.storage_topic, + proxy=bs) + + consumer_node = rpc.AdapterConsumer( + connection=conn, + topic='%s.%s' % (FLAGS.storage_topic, FLAGS.node_name), + proxy=bs) + + io_inst = ioloop.IOLoop.instance() + scheduler = ioloop.PeriodicCallback( + lambda: bs.report_state(), + FLAGS.storage_report_state_interval * 1000, + io_loop=io_inst) + + injected = consumer_all.attachToTornado(io_inst) + injected = consumer_node.attachToTornado(io_inst) + scheduler.start() + io_inst.start() + + +if __name__ == '__main__': + utils.default_flagfile() + server.serve('nova-volume', main) + diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 000000000000..2b226e048dc7 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,6 @@ +nova (0.3.0-1) UNRELEASED; urgency=low + + * initial release + + -- Jesse Andrews Thur, 27 May 2010 12:28:00 -0700 + diff --git a/debian/compat b/debian/compat new file mode 100644 index 000000000000..7f8f011eb73d --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +7 diff --git a/debian/control b/debian/control new file mode 100644 index 000000000000..81af9f4e9e1a --- /dev/null +++ b/debian/control @@ -0,0 +1,40 @@ +Source: nova +Section: net +Priority: extra +Maintainer: Jesse Andrews +Build-Depends: debhelper (>= 7) +Build-Depends-Indep: python-support +Standards-Version: 3.8.4 +XS-Python-Version: 2.6 + +Package: nova-common +Architecture: all +Depends: ${python:Depends}, aoetools, vlan, python-ipy, python-boto, python-m2crypto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile, python-gflags, python-tornado, ${misc:Depends} +Provides: ${python:Provides} +Conflicts: nova +Description: Nova is a cloud + +Package: nova-compute +Architecture: all +Depends: nova-common (= ${binary:Version}), kpartx, kvm, python-libvirt, libvirt-bin (>= 0.8.1), ${python:Depends}, ${misc:Depends} +Description: Nova compute + +Package: nova-volume +Architecture: all +Depends: nova-common (= ${binary:Version}), vblade, vblade-persist, ${python:Depends}, ${misc:Depends} +Description: Nova volume + +Package: nova-api +Architecture: all +Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends} +Description: Nova api + +Package: nova-objectstore +Architecture: all +Depends: nova-common (= ${binary:Version}), ${python:Depends}, ${misc:Depends} +Description: Nova object store + +Package: nova-tools +Architecture: all +Depends: python-boto, ${python:Depends}, ${misc:Depends} +Description: CLI tools to access nova diff --git a/debian/nova-api.init b/debian/nova-api.init new file mode 100644 index 000000000000..925c92c5e3fb --- /dev/null +++ b/debian/nova-api.init @@ -0,0 +1,69 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: nova-api +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: nova-api +# Description: nova-api +### END INIT INFO + + +set -e + +DAEMON=/usr/bin/nova-api +DAEMON_ARGS="--flagfile=/etc/nova.conf" +PIDFILE=/var/run/nova-api.pid + +ENABLED=false + +if test -f /etc/default/nova-api; then + . /etc/default/nova-api +fi + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting nova api" "nova-api" + cd /var/run + if $DAEMON $DAEMON_ARGS start; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping nova api" "nova-api" + cd /var/run + if $DAEMON $DAEMON_ARGS stop; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + cd /var/run + if $DAEMON $DAEMON_ARGS restart; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON nova-api && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/nova-api {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/debian/nova-api.install b/debian/nova-api.install new file mode 100644 index 000000000000..757235b11110 --- /dev/null +++ b/debian/nova-api.install @@ -0,0 +1 @@ +bin/nova-api usr/bin diff --git a/debian/nova-common.install b/debian/nova-common.install new file mode 100644 index 000000000000..c9358ac419ba --- /dev/null +++ b/debian/nova-common.install @@ -0,0 +1,4 @@ +bin/nova-manage usr/bin +nova/auth/novarc.template usr/lib/pymodules/python2.6/nova/auth +nova/compute/libvirt.xml.template usr/lib/pymodules/python2.6/nova/compute +usr/lib/python*/*-packages/nova/* diff --git a/debian/nova-compute.init b/debian/nova-compute.init new file mode 100644 index 000000000000..89d0e5fce640 --- /dev/null +++ b/debian/nova-compute.init @@ -0,0 +1,69 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: nova-compute +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: nova-compute +# Description: nova-compute +### END INIT INFO + + +set -e + +DAEMON=/usr/bin/nova-compute +DAEMON_ARGS="--flagfile=/etc/nova.conf" +PIDFILE=/var/run/nova-compute.pid + +ENABLED=false + +if test -f /etc/default/nova-compute; then + . /etc/default/nova-compute +fi + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting nova compute" "nova-compute" + cd /var/run + if $DAEMON $DAEMON_ARGS start; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping nova compute" "nova-compute" + cd /var/run + if $DAEMON $DAEMON_ARGS stop; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + cd /var/run + if $DAEMON $DAEMON_ARGS restart; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON nova-compute && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/nova-compute {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/debian/nova-compute.install b/debian/nova-compute.install new file mode 100644 index 000000000000..6387cef07f79 --- /dev/null +++ b/debian/nova-compute.install @@ -0,0 +1 @@ +bin/nova-compute usr/bin diff --git a/debian/nova-objectstore.init b/debian/nova-objectstore.init new file mode 100644 index 000000000000..be7d32d8e014 --- /dev/null +++ b/debian/nova-objectstore.init @@ -0,0 +1,69 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: nova-objectstore +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: nova-objectstore +# Description: nova-objectstore +### END INIT INFO + + +set -e + +DAEMON=/usr/bin/nova-objectstore +DAEMON_ARGS="--flagfile=/etc/nova.conf" +PIDFILE=/var/run/nova-objectstore.pid + +ENABLED=false + +if test -f /etc/default/nova-objectstore; then + . /etc/default/nova-objectstore +fi + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting nova objectstore" "nova-objectstore" + cd /var/run + if $DAEMON $DAEMON_ARGS start; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping nova objectstore" "nova-objectstore" + cd /var/run + if $DAEMON $DAEMON_ARGS stop; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + cd /var/run + if $DAEMON $DAEMON_ARGS restart; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON nova-objectstore && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/nova-objectstore {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/debian/nova-objectstore.install b/debian/nova-objectstore.install new file mode 100644 index 000000000000..ccc60fcccc5a --- /dev/null +++ b/debian/nova-objectstore.install @@ -0,0 +1 @@ +bin/nova-objectstore usr/bin diff --git a/debian/nova-volume.init b/debian/nova-volume.init new file mode 100644 index 000000000000..80da3f70c613 --- /dev/null +++ b/debian/nova-volume.init @@ -0,0 +1,69 @@ +#! /bin/sh +### BEGIN INIT INFO +# Provides: nova-volume +# Required-Start: $remote_fs $syslog +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: nova-volume +# Description: nova-volume +### END INIT INFO + + +set -e + +DAEMON=/usr/bin/nova-volume +DAEMON_ARGS="--flagfile=/etc/nova.conf" +PIDFILE=/var/run/nova-volume.pid + +ENABLED=false + +if test -f /etc/default/nova-volume; then + . /etc/default/nova-volume +fi + +. /lib/lsb/init-functions + +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" + +case "$1" in + start) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Starting nova volume" "nova-volume" + cd /var/run + if $DAEMON $DAEMON_ARGS start; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + stop) + test "$ENABLED" = "true" || exit 0 + log_daemon_msg "Stopping nova volume" "nova-volume" + cd /var/run + if $DAEMON $DAEMON_ARGS stop; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + restart|force-reload) + test "$ENABLED" = "true" || exit 1 + cd /var/run + if $DAEMON $DAEMON_ARGS restart; then + log_end_msg 0 + else + log_end_msg 1 + fi + ;; + status) + test "$ENABLED" = "true" || exit 0 + status_of_proc -p $PIDFILE $DAEMON nova-volume && exit 0 || exit $? + ;; + *) + log_action_msg "Usage: /etc/init.d/nova-volume {start|stop|restart|force-reload|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/debian/nova-volume.install b/debian/nova-volume.install new file mode 100644 index 000000000000..37b535c0343b --- /dev/null +++ b/debian/nova-volume.install @@ -0,0 +1 @@ +bin/nova-volume usr/bin diff --git a/debian/pycompat b/debian/pycompat new file mode 100644 index 000000000000..0cfbf08886fc --- /dev/null +++ b/debian/pycompat @@ -0,0 +1 @@ +2 diff --git a/debian/pyversions b/debian/pyversions new file mode 100644 index 000000000000..0c043f18c333 --- /dev/null +++ b/debian/pyversions @@ -0,0 +1 @@ +2.6- diff --git a/debian/rules b/debian/rules new file mode 100755 index 000000000000..2d33f6ac8992 --- /dev/null +++ b/debian/rules @@ -0,0 +1,4 @@ +#!/usr/bin/make -f + +%: + dh $@ diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000000..88f9974bd731 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +_build/* diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000000..b2f74e85aaf0 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nova.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nova.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/docs/_build/.gitignore b/docs/_build/.gitignore new file mode 100644 index 000000000000..72e8ffc0db8a --- /dev/null +++ b/docs/_build/.gitignore @@ -0,0 +1 @@ +* diff --git a/docs/_static/.gitignore b/docs/_static/.gitignore new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/_templates/.gitignore b/docs/_templates/.gitignore new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/docs/architecture.rst b/docs/architecture.rst new file mode 100644 index 000000000000..9aab7afbf9db --- /dev/null +++ b/docs/architecture.rst @@ -0,0 +1,46 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +nova System Architecture +======================== + +Nova is built on a shared-nothing, messaging-based architecture. All of the major nova components can be run on multiple servers. This means that most component to component communication must go via message queue. In order to avoid blocking each component while waiting for a response, we use deferred objects, with a callback that gets triggered when a response is received. + +In order to achieve shared-nothing with multiple copies of the same component (especially when the component is an API server that needs to reply with state information in a timely fashion), we need to keep all of our system state in a distributed data system. Updates to system state are written into this system, using atomic transactions when necessary. Requests for state are read out of this system. In limited cases, these read calls are memoized within controllers for short periods of time. (Such a limited case would be, for instance, the current list of system users.) + + +Components +---------- + +Below you will find a helpful explanation. + +:: + + [ User Manager ] ---- ( LDAP ) + | + | / [ Storage ] - ( ATAoE ) + [ API server ] -> [ Cloud ] < AMQP > + | \ [ Nodes ] - ( libvirt/kvm ) + < HTTP > + | + [ S3 ] + + +* API: receives http requests from boto, converts commands to/from API format, and sending requests to cloud controller +* Cloud Controller: global state of system, talks to ldap, s3, and node/storage workers through a queue +* Nodes: worker that spawns instances +* S3: tornado based http/s3 server +* User Manager: create/manage users, which are stored in ldap +* Network Controller: allocate and deallocate IPs and VLANs diff --git a/docs/auth.rst b/docs/auth.rst new file mode 100644 index 000000000000..ba001cfeccff --- /dev/null +++ b/docs/auth.rst @@ -0,0 +1,213 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Auth Documentation +================== + +Nova provides RBAC (Role-based access control) of the AWS-type APIs. We define the following roles: + +Roles-Based Access Control of AWS-style APIs using SAML Assertions +“Achieving FIPS 199 Moderate certification of a hybrid cloud environment using CloudAudit and declarative C.I.A. classifications†+ +Introduction +-------------- + +We will investigate one method for integrating an AWS-style API with US eAuthentication-compatible federated authentication systems, to achieve access controls and limits based on traditional operational roles. +Additionally, we will look at how combining this approach, with an implementation of the CloudAudit APIs, will allow us to achieve a certification under FIPS 199 Moderate classification for a hybrid cloud environment. + +Relationship of US eAuth to RBAC +-------------------------------- + +Typical implementations of US eAuth authentication systems are structured as follows:: + + [ MS Active Directory or other federated LDAP user store ] + --> backends to… + [ SUN Identity Manager or other SAML Policy Controller ] + --> maps URLs to groups… + [ Apache Policy Agent in front of eAuth-secured Web Application ] + +In more ideal implementations, the remainder of the application-specific account information is stored either in extended schema on the LDAP server itself, via the use of a translucent LDAP proxy, or in an independent datastore keyed off of the UID provided via SAML assertion. + +Basic AWS API call structure +---------------------------- + +AWS API calls are traditionally secured via Access and Secret Keys, which are used to sign API calls, along with traditional timestamps to prevent replay attacks. The APIs can be logically grouped into sets that align with five typical roles: + +* System User +* System Administrator +* Network Administrator +* Project Manager +* Cloud Administrator +* (IT-Sec?) + +There is an additional, conceptual end-user that may or may not have API access: + +* (EXTERNAL) End-user / Third-party User + +Basic operations are available to any System User: + +* Launch Instance +* Terminate Instance (their own) +* Create keypair +* Delete keypair +* Create, Upload, Delete: Buckets and Keys (Object Store) – their own +* Create, Attach, Delete Volume (Block Store) – their own + +System Administrators: + +* Register/Unregister Machine Image (project-wide) +* Change Machine Image properties (public / private) +* Request / Review CloudAudit Scans + +Network Administrator: + +* Change Firewall Rules, define Security Groups +* Allocate, Associate, Deassociate Public IP addresses + +Project Manager: + +* Launch and Terminate Instances (project-wide) +* CRUD of Object and Block store (project-wide) + +Cloud Administrator: + +* Register / Unregister Kernel and Ramdisk Images +* Register / Unregister Machine Image (any) + +Enhancements +------------ + +* SAML Token passing +* REST interfaces +* SOAP interfaces + +Wrapping the SAML token into the API calls. +Then store the UID (fetched via backchannel) into the instance metadata, providing end-to-end auditability of ownership and responsibility, without PII. + +CloudAudit APIs +--------------- + +* Request formats +* Response formats +* Stateless asynchronous queries + +CloudAudit queries may spawn long-running processes (similar to launching instances, etc.) They need to return a ReservationId in the same fashion, which can be returned in further queries for updates. +RBAC of CloudAudit API calls is critical, since detailed system information is a system vulnerability. + +Type declarations +--------------------- +* Data declarations – Volumes and Objects +* System declarations – Instances + +Existing API calls to launch instances specific a single, combined “type†flag. We propose to extend this with three additional type declarations, mapping to the “Confidentiality, Integrity, Availability†classifications of FIPS 199. An example API call would look like:: + + RunInstances type=m1.large number=1 secgroup=default key=mykey confidentiality=low integrity=low availability=low + +These additional parameters would also apply to creation of block storage volumes (along with the existing parameter of ‘size’), and creation of object storage ‘buckets’. (C.I.A. classifications on a bucket would be inherited by the keys within this bucket.) + +Request Brokering +----------------- + + * Cloud Interop + * IMF Registration / PubSub + * Digital C&A + +Establishing declarative semantics for individual API calls will allow the cloud environment to seamlessly proxy these API calls to external, third-party vendors – when the requested CIA levels match. + +See related work within the Infrastructure 2.0 working group for more information on how the IMF Metadata specification could be utilized to manage registration of these vendors and their C&A credentials. + +Dirty Cloud – Hybrid Data Centers +--------------------------------- + +* CloudAudit bridge interfaces +* Anything in the ARP table + +A hybrid cloud environment provides dedicated, potentially co-located physical hardware with a network interconnect to the project or users’ cloud virtual network. + +This interconnect is typically a bridged VPN connection. Any machines that can be bridged into a hybrid environment in this fashion (at Layer 2) must implement a minimum version of the CloudAudit spec, such that they can be queried to provide a complete picture of the IT-sec runtime environment. + +Network discovery protocols (ARP, CDP) can be applied in this case, and existing protocols (SNMP location data, DNS LOC records) overloaded to provide CloudAudit information. + +The Details +----------- + + * Preliminary Roles Definitions + * Categorization of available API calls + * SAML assertion vocabulary + +System limits +------------- + +The following limits need to be defined and enforced: + +* Total number of instances allowed (user / project) +* Total number of instances, per instance type (user / project) +* Total number of volumes (user / project) +* Maximum size of volume +* Cumulative size of all volumes +* Total use of object storage (GB) +* Total number of Public IPs + + +Further Challenges +------------------ + * Prioritization of users / jobs in shared computing environments + * Incident response planning + * Limit launch of instances to specific security groups based on AMI + * Store AMIs in LDAP for added property control + + + +The :mod:`access` Module +-------------------------- + +.. automodule:: nova.auth.access + :members: + :undoc-members: + :show-inheritance: + +The :mod:`signer` Module +------------------------ + +.. automodule:: nova.auth.signer + :members: + :undoc-members: + :show-inheritance: + +The :mod:`users` Module +----------------------- + +.. automodule:: nova.auth.users + :members: + :undoc-members: + :show-inheritance: + +The :mod:`users_unittest` Module +-------------------------------- + +.. automodule:: nova.tests.users_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`access_unittest` Module +--------------------------------- + +.. automodule:: nova.tests.access_unittest + :members: + :undoc-members: + :show-inheritance: + + diff --git a/docs/binaries.rst b/docs/binaries.rst new file mode 100644 index 000000000000..eee0891642e7 --- /dev/null +++ b/docs/binaries.rst @@ -0,0 +1,29 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Nova Binaries +=============== + +* nova-api +* nova-compute +* nova-manage +* nova-objectstore +* nova-volume + +The configuration of these binaries relies on "flagfiles" using the google +gflags package. If present, the nova.conf file will be used as the flagfile +- otherwise, it must be specified on the command line:: + + $ python node_worker.py --flagfile flagfile \ No newline at end of file diff --git a/docs/compute.rst b/docs/compute.rst new file mode 100644 index 000000000000..e2b32fae0431 --- /dev/null +++ b/docs/compute.rst @@ -0,0 +1,72 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Compute Documentation +===================== + +This page contains the Compute Package documentation. + + +The :mod:`disk` Module +---------------------- + +.. automodule:: nova.compute.disk + :members: + :undoc-members: + :show-inheritance: + +The :mod:`exception` Module +--------------------------- + +.. automodule:: nova.compute.exception + :members: + :undoc-members: + :show-inheritance: + +The :mod:`model` Module +------------------------- + +.. automodule:: nova.compute.model + :members: + :undoc-members: + :show-inheritance: + +The :mod:`network` Module +------------------------- + +.. automodule:: nova.compute.network + :members: + :undoc-members: + :show-inheritance: + +The :mod:`node` Module +---------------------- + +.. automodule:: nova.compute.node + :members: + :undoc-members: + :show-inheritance: + +RELATED TESTS +--------------- + +The :mod:`node_unittest` Module +------------------------------- + +.. automodule:: nova.tests.node_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000000..9dfdfc8be65c --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +# +# nova documentation build configuration file, created by +# sphinx-quickstart on Sat May 1 15:17:47 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) +sys.path.append([os.path.abspath('../nova'),os.path.abspath('../'),os.path.abspath('../vendor')]) +from nova import vendor + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig'] +#sphinx_to_github = False +todo_include_todos = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'nova' +copyright = u'2010, Anso Labs, LLC' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.42' +# The full version, including alpha/beta/rc tags. +release = '0.42' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['nova.'] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'novadoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'nova.tex', u'nova Documentation', + u'Anso Labs, LLC', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'http://docs.python.org/': None} diff --git a/docs/endpoint.rst b/docs/endpoint.rst new file mode 100644 index 000000000000..86a1a3be08ad --- /dev/null +++ b/docs/endpoint.rst @@ -0,0 +1,89 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Endpoint Documentation +====================== + +This page contains the Endpoint Package documentation. + +The :mod:`admin` Module +----------------------- + +.. automodule:: nova.endpoint.admin + :members: + :undoc-members: + :show-inheritance: + +The :mod:`api` Module +--------------------- + +.. automodule:: nova.endpoint.api + :members: + :undoc-members: + :show-inheritance: + +The :mod:`cloud` Module +----------------------- + +.. automodule:: nova.endpoint.cloud + :members: + :undoc-members: + :show-inheritance: + +The :mod:`images` Module +------------------------ + +.. automodule:: nova.endpoint.images + :members: + :undoc-members: + :show-inheritance: + + +RELATED TESTS +-------------- + +The :mod:`api_unittest` Module +------------------------------ + +.. automodule:: nova.tests.api_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`api_integration` Module +--------------------------------- + +.. automodule:: nova.tests.api_integration + :members: + :undoc-members: + :show-inheritance: + +The :mod:`cloud_unittest` Module +-------------------------------- + +.. automodule:: nova.tests.cloud_unittest + :members: + :undoc-members: + :show-inheritance: + +The :mod:`network_unittest` Module +---------------------------------- + +.. automodule:: nova.tests.network_unittest + :members: + :undoc-members: + :show-inheritance: + + diff --git a/docs/fakes.rst b/docs/fakes.rst new file mode 100644 index 000000000000..f105c6b8d668 --- /dev/null +++ b/docs/fakes.rst @@ -0,0 +1,41 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Nova Fakes +========== + +The :mod:`fakevirt` Module +-------------------------- + +.. automodule:: nova.fakevirt + :members: + :undoc-members: + :show-inheritance: + +The :mod:`fakeldap` Module +-------------------------- + +.. automodule:: nova.auth.fakeldap + :members: + :undoc-members: + :show-inheritance: + +The :mod:`fakerabbit` Module +---------------------------- + +.. automodule:: nova.fakerabbit + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/getting.started.rst b/docs/getting.started.rst new file mode 100644 index 000000000000..777cd32e981e --- /dev/null +++ b/docs/getting.started.rst @@ -0,0 +1,70 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Getting Started with Nova +========================= + + +GOTTA HAVE A nova.pth file added or it WONT WORK (will write setup.py file soon) + +DEPENDENCIES +------------ + +* RabbitMQ: messaging queue, used for all communication between components +* OpenLDAP: users, groups (maybe cut) +* Tornado: scalable non blocking web server for api requests +* Twisted: just for the twisted.internet.defer package +* boto: python api for aws api +* M2Crypto: python library interface for openssl +* IPy: library for managing ip addresses +* ReDIS: Remote Dictionary Store (for fast, shared state data) + +Recommended +----------------- +* euca2ools: python implementation of aws ec2-tools and ami tools +* build tornado to use C module for evented section + + +Installation +-------------- +:: + + # ON ALL SYSTEMS + apt-get install -y python-libvirt libvirt-bin python-setuptools python-dev python-pycurl python-m2crypto python-twisted + apt-get install -y aoetools vlan + modprobe aoe + + # ON THE CLOUD CONTROLLER + apt-get install -y rabbitmq-server dnsmasq + # fix ec2 metadata/userdata uri - where $IP is the IP of the cloud + iptables -t nat -A PREROUTING -s 0.0.0.0/0 -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $IP:8773 + iptables --table nat --append POSTROUTING --out-interface $PUBLICIFACE -j MASQUERADE + # setup ldap (slap.sh as root will remove ldap and reinstall it) + auth/slap.sh + /etc/init.d/rabbitmq-server start + + # ON VOLUME NODE: + apt-get install -y vblade-persist + + # ON THE COMPUTE NODE: + apt-get install -y kpartx kvm + + # optional packages + apt-get install -y euca2ools + + # Set up flagfiles with the appropriate hostnames, etc. + # start api_worker, s3_worker, node_worker, storage_worker + # Add yourself to the libvirtd group, log out, and log back in + # Make sure the user who will launch the workers has sudo privileges w/o pass (will fix later) diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000000..b86f14324c45 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,53 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +Welcome to nova's documentation! +================================ + +Nova is a cloud computing fabric controller (the main part of an IaaS system) built to match the popular AWS EC2 and S3 APIs. +It is written in Python, using the Tornado and Twisted frameworks, and relies on the standard AMQP messaging protocol, +and the Redis distributed KVS. +Nova is intended to be easy to extend, and adapt. For example, it currently uses +an LDAP server for users and groups, but also includes a fake LDAP server, +that stores data in Redis. It has extensive test coverage, and uses the +Sphinx toolkit (the same as Python itself) for code and user documentation. +While Nova is currently in Beta use within several organizations, the codebase +is very much under active development - there are bugs! + +Contents: + +.. toctree:: + :maxdepth: 2 + + getting.started + architecture + network + storage + auth + compute + endpoint + nova + fakes + binaries + todo + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/modules.rst b/docs/modules.rst new file mode 100644 index 000000000000..f927a52d09bd --- /dev/null +++ b/docs/modules.rst @@ -0,0 +1,32 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Nova Documentation +================== + +This page contains the Nova Modules documentation. + +Modules: +-------- + +.. toctree:: + :maxdepth: 4 + + auth + compute + endpoint + fakes + nova + volume diff --git a/docs/network.rst b/docs/network.rst new file mode 100644 index 000000000000..49e36170d999 --- /dev/null +++ b/docs/network.rst @@ -0,0 +1,86 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +nova Networking +================ + +The nova networking components manage private networks, public IP addressing, VPN connectivity, and firewall rules. + +Components +---------- +There are several key components: + +* NetworkController (Manages address and vlan allocation) +* RoutingNode (NATs public IPs to private IPs, and enforces firewall rules) +* AddressingNode (runs DHCP services for private networks) +* BridgingNode (a subclass of the basic nova ComputeNode) +* TunnelingNode (provides VPN connectivity) + +Component Diagram +----------------- + +Overview:: + + (PUBLIC INTERNET) + | \ + / \ / \ + [RoutingNode] ... [RN] [TunnelingNode] ... [TN] + | \ / | | + | < AMQP > | | + [AddressingNode]-- (VLAN) ... | (VLAN)... (VLAN) --- [AddressingNode] + \ | \ / + / \ / \ / \ / \ + [BridgingNode] ... [BridgingNode] + + + [NetworkController] ... [NetworkController] + \ / + < AMQP > + | + / \ + [CloudController]...[CloudController] + +While this diagram may not make this entirely clear, nodes and controllers communicate exclusively across the message bus (AMQP, currently). + +State Model +----------- +Network State consists of the following facts: + +* VLAN assignment (to a project) +* Private Subnet assignment (to a security group) in a VLAN +* Private IP assignments (to running instances) +* Public IP allocations (to a project) +* Public IP associations (to a private IP / running instance) + +While copies of this state exist in many places (expressed in IPTables rule chains, DHCP hosts files, etc), the controllers rely only on the distributed "fact engine" for state, queried over RPC (currently AMQP). The NetworkController inserts most records into this datastore (allocating addresses, etc) - however, individual nodes update state e.g. when running instances crash. + +The Public Traffic Path +----------------------- + +Public Traffic:: + + (PUBLIC INTERNET) + | + <-- [RoutingNode] + | + [AddressingNode] --> | + ( VLAN ) + | <-- [BridgingNode] + | + + +The RoutingNode is currently implemented using IPTables rules, which implement both NATing of public IP addresses, and the appropriate firewall chains. We are also looking at using Netomata / Clusto to manage NATting within a switch or router, and/or to manage firewall rules within a hardware firewall appliance. + +Similarly, the AddressingNode currently manages running DNSMasq instances for DHCP services. However, we could run an internal DHCP server (using Scapy ala Clusto), or even switch to static addressing by inserting the private address into the disk image the same way we insert the SSH keys. (See compute for more details). \ No newline at end of file diff --git a/docs/nova.rst b/docs/nova.rst new file mode 100644 index 000000000000..7f1feda10c44 --- /dev/null +++ b/docs/nova.rst @@ -0,0 +1,89 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +NOVA Libraries +=============== + +The :mod:`crypto` Module +------------------------ + +.. automodule:: nova.crypto + :members: + :undoc-members: + :show-inheritance: + +The :mod:`adminclient` Module +----------------------------- + +.. automodule:: nova.adminclient + :members: + :undoc-members: + :show-inheritance: + +The :mod:`datastore` Module +--------------------------- + +.. automodule:: nova.datastore + :members: + :undoc-members: + :show-inheritance: + +The :mod:`exception` Module +--------------------------- + +.. automodule:: nova.exception + :members: + :undoc-members: + :show-inheritance: + +The :mod:`flags` Module +--------------------------- + +.. automodule:: nova.flags + :members: + :undoc-members: + :show-inheritance: + +The :mod:`rpc` Module +--------------------------- + +.. automodule:: nova.rpc + :members: + :undoc-members: + :show-inheritance: + +The :mod:`server` Module +--------------------------- + +.. automodule:: nova.server + :members: + :undoc-members: + :show-inheritance: + +The :mod:`test` Module +--------------------------- + +.. automodule:: nova.test + :members: + :undoc-members: + :show-inheritance: + +The :mod:`utils` Module +--------------------------- + +.. automodule:: nova.utils + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/objectstore.rst b/docs/objectstore.rst new file mode 100644 index 000000000000..64122c9b7358 --- /dev/null +++ b/docs/objectstore.rst @@ -0,0 +1,64 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Objectstore Documentation +========================= + +This page contains the Objectstore Package documentation. + + +The :mod:`bucket` Module +------------------------ + +.. automodule:: nova.objectstore.bucket + :members: + :undoc-members: + :show-inheritance: + +The :mod:`handler` Module +------------------------- + +.. automodule:: nova.objectstore.handler + :members: + :undoc-members: + :show-inheritance: + +The :mod:`image` Module +----------------------- + +.. automodule:: nova.objectstore.image + :members: + :undoc-members: + :show-inheritance: + +The :mod:`stored` Module +------------------------ + +.. automodule:: nova.objectstore.stored + :members: + :undoc-members: + :show-inheritance: + +RELATED TESTS +------------- + +The :mod:`objectstore_unittest` Module +-------------------------------------- + +.. automodule:: nova.tests.objectstore_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/docs/packages.rst b/docs/packages.rst new file mode 100644 index 000000000000..ad1386f19663 --- /dev/null +++ b/docs/packages.rst @@ -0,0 +1,27 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +nova Packages & Dependencies +============================ + +Nova is being built on Ubuntu Lucid. + +The following packages are required: + + apt-get install python-ipy, python-libvirt, python-boto, python-pycurl, python-twisted, python-daemon, python-redis, python-carrot, python-lockfile + +In addition you need to install python: + + * python-gflags - http://code.google.com/p/python-gflags/ diff --git a/docs/storage.rst b/docs/storage.rst new file mode 100644 index 000000000000..94d7bdeea207 --- /dev/null +++ b/docs/storage.rst @@ -0,0 +1,29 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Storage in the Nova Cloud +========================= + +There are three primary classes of storage in a nova cloud environment: + +* Ephemeral Storage (local disk within an instance) +* Volume Storage (network-attached FS) +* Object Storage (redundant KVS with locality and MR) + +.. toctree:: + :maxdepth: 2 + + volume + objectstore \ No newline at end of file diff --git a/docs/volume.rst b/docs/volume.rst new file mode 100644 index 000000000000..18ce70a3ab13 --- /dev/null +++ b/docs/volume.rst @@ -0,0 +1,43 @@ +.. + Copyright [2010] [Anso Labs, LLC] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Volume Documentation +==================== + +Nova uses ata-over-ethernet (AoE) to export storage volumes from multiple storage nodes. These AoE exports are attached (using libvirt) directly to running instances. + +Nova volumes are exported over the primary system VLAN (usually VLAN 1), and not over individual VLANs. + +AoE exports are numbered according to a "shelf and blade" syntax. In order to avoid collisions, we currently perform an AoE-discover of existing exports, and then grab the next unused number. (This obviously has race condition problems, and should be replaced by allocating a shelf-id to each storage node.) + +The underlying volumes are LVM logical volumes, created on demand within a single large volume group. + + +The :mod:`storage` Module +------------------------- + +.. automodule:: nova.volume.storage + :members: + :undoc-members: + :show-inheritance: + +The :mod:`storage_unittest` Module +---------------------------------- + +.. automodule:: nova.tests.storage_unittest + :members: + :undoc-members: + :show-inheritance: + diff --git a/nova/__init__.py b/nova/__init__.py new file mode 100644 index 000000000000..2b25d1628d46 --- /dev/null +++ b/nova/__init__.py @@ -0,0 +1,30 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova` -- Cloud IaaS Platform +=================================== + +.. automodule:: nova + :platform: Unix + :synopsis: Infrastructure-as-a-Service Cloud platform. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +from exception import * \ No newline at end of file diff --git a/nova/adminclient.py b/nova/adminclient.py new file mode 100644 index 000000000000..2cc592b9f367 --- /dev/null +++ b/nova/adminclient.py @@ -0,0 +1,113 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Nova User API client library. +""" + +import boto +from boto.ec2.regioninfo import RegionInfo +import base64 + +class UserInfo(object): + """ Information about a Nova user + fields include: + username + accesskey + secretkey + + and an optional field containing a zip with X509 cert & rc + file + """ + + def __init__(self, connection=None, username=None, endpoint=None): + self.connection = connection + self.username = username + self.endpoint = endpoint + + def __repr__(self): + return 'UserInfo:%s' % self.username + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'username': + self.username = str(value) + elif name == 'file': + self.file = base64.b64decode(str(value)) + elif name == 'accesskey': + self.accesskey = str(value) + elif name == 'secretkey': + self.secretkey = str(value) + + +class NovaAdminClient(object): + def __init__(self, clc_ip='127.0.0.1', region='nova', access_key='admin', + secret_key='admin', **kwargs): + self.clc_ip = clc_ip + self.region = region + self.access = access_key + self.secret = secret_key + self.apiconn = boto.connect_ec2(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + region=RegionInfo(None, region, clc_ip), + port=8773, + path='/services/Admin', + **kwargs) + self.apiconn.APIVersion = 'nova' + + def connection_for(self, username, **kwargs): + """ + Returns a boto ec2 connection for the given username. + """ + user = self.get_user(username) + return boto.connect_ec2( + aws_access_key_id=user.accesskey, + aws_secret_access_key=user.secretkey, + is_secure=False, + region=RegionInfo(None, self.region, self.clc_ip), + port=8773, + path='/services/Cloud', + **kwargs + ) + + def get_users(self): + """ grabs the list of all users """ + return self.apiconn.get_list('DescribeUsers', {}, (['item', UserInfo])) + + def get_user(self, name): + """ grab a single user by name """ + user = self.apiconn.get_object('DescribeUser', {'Name': name}, UserInfo) + + if user.username != None: + return user + + def has_user(self, username): + """ determine if user exists """ + return self.get_user(username) != None + + def create_user(self, username): + """ creates a new user, returning the userinfo object with access/secret """ + return self.apiconn.get_object('RegisterUser', {'Name': username}, UserInfo) + + def delete_user(self, username): + """ deletes a user """ + return self.apiconn.get_object('DeregisterUser', {'Name': username}, UserInfo) + + def get_zip(self, username): + """ returns the content of a zip file containing novarc and access credentials. """ + return self.apiconn.get_object('GenerateX509ForUser', {'Name': username}, UserInfo).file + diff --git a/nova/auth/__init__.py b/nova/auth/__init__.py new file mode 100644 index 000000000000..7cd6c618dbcd --- /dev/null +++ b/nova/auth/__init__.py @@ -0,0 +1,25 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova.auth` -- Authentication and Access Control +===================================================== + +.. automodule:: nova.auth + :platform: Unix + :synopsis: User-and-Project based RBAC using LDAP, SAML. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +""" \ No newline at end of file diff --git a/nova/auth/access.py b/nova/auth/access.py new file mode 100644 index 000000000000..2c780626d2ef --- /dev/null +++ b/nova/auth/access.py @@ -0,0 +1,69 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Simple base set of RBAC rules which map API endpoints to LDAP groups. +For testing accounts, users will always have PM privileges. +""" + + +# This is logically a RuleSet or some such. + +def allow_describe_images(user, project, target_object): + return True + +def allow_describe_instances(user, project, target_object): + return True + +def allow_describe_addresses(user, project, target_object): + return True + +def allow_run_instances(user, project, target_object): + # target_object is a reservation, not an instance + # it needs to include count, type, image, etc. + + # First, is the project allowed to use this image + + # Second, is this user allowed to launch within this project + + # Third, is the count or type within project quota + + return True + +def allow_terminate_instances(user, project, target_object): + # In a project, the PMs and Sysadmins can terminate + return True + +def allow_get_console_output(user, project, target_object): + # If the user launched the instance, + # Or is a sysadmin in the project, + return True + +def allow_allocate_address(user, project, target_object): + # There's no security concern in allocation, + # but it can get expensive. Limit to PM and NE. + return True + +def allow_associate_address(user, project, target_object): + # project NE only + # In future, will perform a CloudAudit scan first + # (Pass / Fail gate) + return True + +def allow_register(user, project, target_object): + return False + +def is_allowed(action, user, project, target_object): + return globals()['allow_%s' % action](user, project, target_object) + diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py new file mode 100644 index 000000000000..c223b250ced5 --- /dev/null +++ b/nova/auth/fakeldap.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" + Fake LDAP server for test harnesses. +""" + +import logging + +from nova import datastore + +SCOPE_SUBTREE = 1 + + +class NO_SUCH_OBJECT(Exception): + pass + + +def initialize(uri): + return FakeLDAP(uri) + + +class FakeLDAP(object): + def __init__(self, _uri): + self.keeper = datastore.Keeper('fakeldap') + if self.keeper['objects'] is None: + self.keeper['objects'] = {} + + def simple_bind_s(self, dn, password): + pass + + def unbind_s(self): + pass + + def search_s(self, dn, scope, query=None, fields=None): + logging.debug("searching for %s" % dn) + filtered = {} + d = self.keeper['objects'] or {} + for cn, attrs in d.iteritems(): + if cn[-len(dn):] == dn: + filtered[cn] = attrs + if query: + k,v = query[1:-1].split('=') + objects = {} + for cn, attrs in filtered.iteritems(): + if attrs.has_key(k) and (v in attrs[k] or + v == attrs[k]): + objects[cn] = attrs + if objects == {}: + raise NO_SUCH_OBJECT() + return objects.items() + + def add_s(self, cn, attr): + logging.debug("adding %s" % cn) + stored = {} + for k, v in attr: + if type(v) is list: + stored[k] = v + else: + stored[k] = [v] + d = self.keeper['objects'] + d[cn] = stored + self.keeper['objects'] = d + + def delete_s(self, cn): + logging.debug("creating for %s" % cn) + d = self.keeper['objects'] or {} + del d[cn] + self.keeper['objects'] = d diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template new file mode 100644 index 000000000000..a993d18829de --- /dev/null +++ b/nova/auth/novarc.template @@ -0,0 +1,26 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null) +export EC2_ACCESS_KEY="%(access)s" +export EC2_SECRET_KEY="%(secret)s" +export EC2_URL="%(ec2)s" +export S3_URL="%(s3)s" +export EC2_USER_ID=42 # nova does not use user id, but bundling requires it +export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/%(key)s +export EC2_CERT=${NOVA_KEY_DIR}/%(cert)s +export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s +export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set +alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" +alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" diff --git a/nova/auth/rbac.ldif b/nova/auth/rbac.ldif new file mode 100644 index 000000000000..3878d2c1be8d --- /dev/null +++ b/nova/auth/rbac.ldif @@ -0,0 +1,60 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# LDIF fragment to create group branch under root + +#dn: ou=Groups,dc=example,dc=com +#objectclass:organizationalunit +#ou: groups +#description: generic groups branch + +# create the itpeople entry + +dn: cn=sysadmins,ou=Groups,dc=example,dc=com +objectclass: groupofnames +cn: itpeople +description: IT admin group +# add the group members all of which are +# assumed to exist under Users +#member: cn=micky mouse,ou=people,dc=example,dc=com +member: cn=admin,ou=Users,dc=example,dc=com + +dn: cn=netadmins,ou=Groups,dc=example,dc=com +objectclass: groupofnames +cn: netadmins +description: Network admin group +member: cn=admin,ou=Users,dc=example,dc=com + +dn: cn=cloudadmins,ou=Groups,dc=example,dc=com +objectclass: groupofnames +cn: cloudadmins +description: Cloud admin group +member: cn=admin,ou=Users,dc=example,dc=com + +dn: cn=itsec,ou=Groups,dc=example,dc=com +objectclass: groupofnames +cn: itsec +description: IT security users group +member: cn=admin,ou=Users,dc=example,dc=com + +# Example Project Group to demonstrate members +# and project members + +dn: cn=myproject,ou=Groups,dc=example,dc=com +objectclass: groupofnames +objectclass: novaProject +cn: myproject +description: My Project Group +member: cn=admin,ou=Users,dc=example,dc=com +projectManager: cn=admin,ou=Users,dc=example,dc=com diff --git a/nova/auth/signer.py b/nova/auth/signer.py new file mode 100644 index 000000000000..00aa066fb0a7 --- /dev/null +++ b/nova/auth/signer.py @@ -0,0 +1,127 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# PORTIONS OF THIS FILE ARE FROM: +# http://code.google.com/p/boto +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Utility class for parsing signed AMI manifests. +""" + +import logging +import hashlib +import hmac +import urllib +import base64 +from nova.exception import Error + +_log = logging.getLogger('signer') +logging.getLogger('signer').setLevel(logging.WARN) + +class Signer(object): + """ hacked up code from boto/connection.py """ + + def __init__(self, secret_key): + self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1) + if hashlib.sha256: + self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256) + + def generate(self, params, verb, server_string, path): + if params['SignatureVersion'] == '0': + t = self._calc_signature_0(params) + elif params['SignatureVersion'] == '1': + t = self._calc_signature_1(params) + elif params['SignatureVersion'] == '2': + t = self._calc_signature_2(params, verb, server_string, path) + else: + raise Error('Unknown Signature Version: %s' % self.SignatureVersion) + return t + + def _get_utf8_value(self, value): + if not isinstance(value, str) and not isinstance(value, unicode): + value = str(value) + if isinstance(value, unicode): + return value.encode('utf-8') + else: + return value + + def _calc_signature_0(self, params): + s = params['Action'] + params['Timestamp'] + self.hmac.update(s) + keys = params.keys() + keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + val = self._get_utf8_value(params[key]) + pairs.append(key + '=' + urllib.quote(val)) + return base64.b64encode(self.hmac.digest()) + + def _calc_signature_1(self, params): + keys = params.keys() + keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + self.hmac.update(key) + val = self._get_utf8_value(params[key]) + self.hmac.update(val) + pairs.append(key + '=' + urllib.quote(val)) + return base64.b64encode(self.hmac.digest()) + + def _calc_signature_2(self, params, verb, server_string, path): + _log.debug('using _calc_signature_2') + string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) + if self.hmac_256: + hmac = self.hmac_256 + params['SignatureMethod'] = 'HmacSHA256' + else: + hmac = self.hmac + params['SignatureMethod'] = 'HmacSHA1' + keys = params.keys() + keys.sort() + pairs = [] + for key in keys: + val = self._get_utf8_value(params[key]) + pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~')) + qs = '&'.join(pairs) + _log.debug('query string: %s' % qs) + string_to_sign += qs + _log.debug('string_to_sign: %s' % string_to_sign) + hmac.update(string_to_sign) + b64 = base64.b64encode(hmac.digest()) + _log.debug('len(b64)=%d' % len(b64)) + _log.debug('base64 encoded digest: %s' % b64) + return b64 + +if __name__ == '__main__': + print Signer('foo').generate({"SignatureMethod": 'HmacSHA256', 'SignatureVersion': '2'}, "get", "server", "/foo") diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh new file mode 100755 index 000000000000..a0df4e0ae6ec --- /dev/null +++ b/nova/auth/slap.sh @@ -0,0 +1,226 @@ +#!/usr/bin/env bash +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS + +apt-get install -y slapd ldap-utils python-ldap + +cat >/etc/ldap/schema/openssh-lpk_openldap.schema < +# +# Based on the proposal of : Mark Ruijter +# + + +# octetString SYNTAX +attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' + DESC 'MANDATORY: OpenSSH Public key' + EQUALITY octetStringMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) + +# printableString SYNTAX yes|no +objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY + DESC 'MANDATORY: OpenSSH LPK objectclass' + MAY ( sshPublicKey $ uid ) + ) +LPK_SCHEMA_EOF + +cat >/etc/ldap/schema/nova.schema < +# +# + +# using internet experimental oid arc as per BP64 3.1 +objectidentifier novaSchema 1.3.6.1.3.1.666.666 +objectidentifier novaAttrs novaSchema:3 +objectidentifier novaOCs novaSchema:4 + +attributetype ( + novaAttrs:1 + NAME 'accessKey' + DESC 'Key for accessing data' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:2 + NAME 'secretKey' + DESC 'Secret key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:3 + NAME 'keyFingerprint' + DESC 'Fingerprint of private key' + EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:4 + NAME 'isAdmin' + DESC 'Is user an administrator?' + EQUALITY booleanMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 + SINGLE-VALUE + ) + +attributetype ( + novaAttrs:5 + NAME 'projectManager' + DESC 'Project Managers of a project' + SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + ) + +objectClass ( + novaOCs:1 + NAME 'novaUser' + DESC 'access and secret keys' + AUXILIARY + MUST ( uid ) + MAY ( accessKey $ secretKey $ isAdmin ) + ) + +objectClass ( + novaOCs:2 + NAME 'novaKeyPair' + DESC 'Key pair for User' + SUP top + STRUCTURAL + MUST ( cn $ sshPublicKey $ keyFingerprint ) + ) + +objectClass ( + novaOCs:3 + NAME 'novaProject' + DESC 'Container for project' + SUP groupofnames + STRUCTURAL + MUST ( cn $ projectManager ) + ) + +NOVA_SCHEMA_EOF + +mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig +cat >/etc/ldap/slapd.conf </etc/ldap/ldap.conf </etc/ldap/base.ldif < +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" \ No newline at end of file diff --git a/nova/compute/disk.py b/nova/compute/disk.py new file mode 100644 index 000000000000..d3eeb951fe89 --- /dev/null +++ b/nova/compute/disk.py @@ -0,0 +1,122 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Utility methods to resize, repartition, and modify disk images. +Includes injection of SSH PGP keys into authorized_keys file. +""" + +import logging +import os +import tempfile + +from nova.exception import Error +from nova.utils import execute + +def partition(infile, outfile, local_bytes=0, local_type='ext2'): + """Takes a single partition represented by infile and writes a bootable drive image into outfile. + The first 63 sectors (0-62) of the resulting image is a master boot record. + Infile becomes the first primary partition. + If local bytes is specified, a second primary partition is created and formatted as ext2. + In the diagram below, dashes represent drive sectors. + 0 a b c d e + +-----+------. . .-------+------. . .------+ + | mbr | primary partiton | local partition | + +-----+------. . .-------+------. . .------+ + """ + sector_size = 512 + file_size = os.path.getsize(infile) + if file_size % sector_size != 0: + logging.warn("Input partition size not evenly divisible by sector size: %d / %d" (file_size, sector_size)) + primary_sectors = file_size / sector_size + if local_bytes % sector_size != 0: + logging.warn("Bytes for local storage not evenly divisible by sector size: %d / %d" (local_bytes, sector_size)) + local_sectors = local_bytes / sector_size + + mbr_last = 62 # a + primary_first = mbr_last + 1 # b + primary_last = primary_first + primary_sectors # c + local_first = primary_last + 1 # d + local_last = local_first + local_sectors # e + last_sector = local_last # e + + # create an empty file + execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' % (outfile, last_sector, sector_size)) + + # make mbr partition + execute('parted --script %s mklabel msdos' % outfile) + + # make primary partition + execute('parted --script %s mkpart primary %ds %ds' % (outfile, primary_first, primary_last)) + + # make local partition + if local_bytes > 0: + execute('parted --script %s mkpartfs primary %s %ds %ds' % (outfile, local_type, local_first, local_last)) + + # copy file into partition + execute('dd if=%s of=%s bs=%d seek=%d conv=notrunc,fsync' % (infile, outfile, sector_size, primary_first)) + + +def inject_key(key, image, partition=None): + """Injects a ssh key into a disk image. + It adds the specified key to /root/.ssh/authorized_keys + it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. + If partition is not specified it mounts the image as a single partition. + """ + out, err = execute('sudo losetup -f --show %s' % image) + if err: + raise Error('Could not attach image to loopback: %s' % err) + device = out.strip() + try: + if not partition is None: + # create partition + out, err = execute('sudo kpartx -a %s' % device) + if err: + raise Error('Failed to load partition: %s' % err) + mapped_device = '/dev/mapper/%sp%s' % ( device.split('/')[-1] , partition ) + else: + mapped_device = device + out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + + tmpdir = tempfile.mkdtemp() + try: + # mount loopback to dir + out, err = execute('sudo mount %s %s' % (mapped_device, tmpdir)) + if err: + raise Error('Failed to mount filesystem: %s' % err) + + try: + # inject key file + _inject_into_fs(key, tmpdir) + finally: + # unmount device + execute('sudo umount %s' % mapped_device) + finally: + # remove temporary directory + os.rmdir(tmpdir) + if not partition is None: + # remove partitions + execute('sudo kpartx -d %s' % device) + finally: + # remove loopback + execute('sudo losetup -d %s' % device) + +def _inject_into_fs(key, fs): + sshdir = os.path.join(os.path.join(fs, 'root'), '.ssh') + execute('sudo mkdir %s' % sshdir) #error on existing dir doesn't matter + execute('sudo chown root %s' % sshdir) + execute('sudo chmod 700 %s' % sshdir) + keyfile = os.path.join(sshdir, 'authorized_keys') + execute('sudo bash -c "cat >> %s"' % keyfile, '\n' + key + '\n') + diff --git a/nova/compute/exception.py b/nova/compute/exception.py new file mode 100644 index 000000000000..6fe8e381fe14 --- /dev/null +++ b/nova/compute/exception.py @@ -0,0 +1,35 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Exceptions for Compute Node errors, mostly network addressing. +""" + +from nova.exception import Error + +class NoMoreAddresses(Error): + pass + +class AddressNotAllocated(Error): + pass + +class AddressAlreadyAssociated(Error): + pass + +class AddressNotAssociated(Error): + pass + +class NotValidNetworkSize(Error): + pass + diff --git a/nova/compute/fakevirtinstance.xml b/nova/compute/fakevirtinstance.xml new file mode 100644 index 000000000000..6036516bbe2b --- /dev/null +++ b/nova/compute/fakevirtinstance.xml @@ -0,0 +1,43 @@ + + + i-A9B8C7D6 + 12a345bc-67c8-901d-2e34-56f7g89012h3 + 524288 + 524288 + 1 + + + + + + destroy + restart + destroy + + /usr/bin/kvm + + + + + + + + + + + + \ No newline at end of file diff --git a/nova/compute/libvirt.xml.template b/nova/compute/libvirt.xml.template new file mode 100644 index 000000000000..4cf6e8b104fa --- /dev/null +++ b/nova/compute/libvirt.xml.template @@ -0,0 +1,46 @@ + + + %(name)s + + hvm + %(basepath)s/kernel + %(basepath)s/ramdisk + root=/dev/vda1 console=ttyS0 + + + + + %(memory_kb)s + %(vcpus)s + + /usr/bin/kvm + + + + + + + + + + + + + + + %(nova)s + diff --git a/nova/compute/linux_net.py b/nova/compute/linux_net.py new file mode 100644 index 000000000000..0983241f9517 --- /dev/null +++ b/nova/compute/linux_net.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import signal +import os +import nova.utils +import subprocess + +# todo(ja): does the definition of network_path belong here? + +from nova import flags +FLAGS=flags.FLAGS + +def execute(cmd): + if FLAGS.fake_network: + print "FAKE NET: %s" % cmd + return "fake", 0 + else: + nova.utils.execute(cmd) + +def runthis(desc, cmd): + if FLAGS.fake_network: + execute(cmd) + else: + nova.utils.runthis(desc,cmd) + +def Popen(cmd): + if FLAGS.fake_network: + execute(' '.join(cmd)) + else: + subprocess.Popen(cmd) + + +def device_exists(device): + (out, err) = execute("ifconfig %s" % device) + return not err + +def confirm_rule(cmd): + execute("sudo iptables --delete %s" % (cmd)) + execute("sudo iptables -I %s" % (cmd)) + +def remove_rule(cmd): + execute("sudo iptables --delete %s" % (cmd)) + +def bind_public_ip(ip, interface): + runthis("Binding IP to interface: %s", "sudo ip addr add %s dev %s" % (ip, interface)) + +def vlan_create(net): + """ create a vlan on on a bridge device unless vlan already exists """ + if not device_exists("vlan%s" % net.vlan): + execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") + execute("sudo vconfig add %s %s" % (net.bridge_dev, net.vlan)) + execute("sudo ifconfig vlan%s up" % (net.vlan)) + +def bridge_create(net): + """ create a bridge on a vlan unless it already exists """ + if not device_exists(net.bridge_name): + execute("sudo brctl addbr %s" % (net.bridge_name)) + # execute("sudo brctl setfd %s 0" % (net.bridge_name)) + # execute("sudo brctl setageing %s 10" % (net.bridge_name)) + execute("sudo brctl stp %s off" % (net.bridge_name)) + execute("sudo brctl addif %s vlan%s" % (net.bridge_name, net.vlan)) + if net.bridge_gets_ip: + execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \ + (net.bridge_name, net.gateway, net.broadcast, net.netmask)) + confirm_rule("FORWARD --in-interface %s -j ACCEPT" % (net.bridge_name)) + else: + execute("sudo ifconfig %s up" % net.bridge_name) + +def dnsmasq_cmd(net): + cmd = ['sudo dnsmasq', + ' --strict-order', + ' --bind-interfaces', + ' --conf-file=', + ' --pid-file=%s' % dhcp_file(net.vlan, 'pid'), + ' --listen-address=%s' % net.dhcp_listen_address, + ' --except-interface=lo', + ' --dhcp-range=%s,%s,120s' % (net.dhcp_range_start, net.dhcp_range_end), + ' --dhcp-lease-max=61', + ' --dhcp-hostsfile=%s' % dhcp_file(net.vlan, 'conf'), + ' --dhcp-leasefile=%s' % dhcp_file(net.vlan, 'leases')] + return ''.join(cmd) + +def hostDHCP(network, host): + idx = host['address'].split(".")[-1] # Logically, the idx of instances they've launched in this net + return "%s,%s-%s-%s.novalocal,%s" % \ + (host['mac'], host['user_id'], network.vlan, idx, host['address']) + +# todo(ja): if the system has restarted or pid numbers have wrapped +# then you cannot be certain that the pid refers to the +# dnsmasq. As well, sending a HUP only reloads the hostfile, +# so any configuration options (like dchp-range, vlan, ...) +# aren't reloaded +def start_dnsmasq(network): + """ (re)starts a dnsmasq server for a given network + + if a dnsmasq instance is already running then send a HUP + signal causing it to reload, otherwise spawn a new instance + """ + with open(dhcp_file(network.vlan, 'conf'), 'w') as f: + for host_name in network.hosts: + f.write("%s\n" % hostDHCP(network, network.hosts[host_name])) + + pid = dnsmasq_pid_for(network) + + # if dnsmasq is already running, then tell it to reload + if pid: + # todo(ja): use "/proc/%d/cmdline" % (pid) to determine if pid refers + # correct dnsmasq process + try: + os.kill(pid, signal.SIGHUP) + return + except Exception, e: + logging.debug("Killing dnsmasq threw %s", e) + + # otherwise delete the existing leases file and start dnsmasq + lease_file = dhcp_file(network.vlan, 'leases') + if os.path.exists(lease_file): + os.unlink(lease_file) + + Popen(dnsmasq_cmd(network).split(" ")) + +def stop_dnsmasq(network): + """ stops the dnsmasq instance for a given network """ + pid = dnsmasq_pid_for(network) + + if pid: + os.kill(pid, signal.SIGTERM) + +def dhcp_file(vlan, kind): + """ return path to a pid, leases or conf file for a vlan """ + + return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, vlan, kind)) + +def dnsmasq_pid_for(network): + """ the pid for prior dnsmasq instance for a vlan, + returns None if no pid file exists + + if machine has rebooted pid might be incorrect (caller should check) + """ + + pid_file = dhcp_file(network.vlan, 'pid') + + if os.path.exists(pid_file): + with open(pid_file, 'r') as f: + return int(f.read()) + diff --git a/nova/compute/model.py b/nova/compute/model.py new file mode 100644 index 000000000000..78ed3a101225 --- /dev/null +++ b/nova/compute/model.py @@ -0,0 +1,203 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 expandtab +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Datastore Model objects for Compute Instances, with +InstanceDirectory manager. + +# Create a new instance? +>>> InstDir = InstanceDirectory() +>>> inst = InstDir.new() +>>> inst.destroy() +True +>>> inst = InstDir['i-123'] +>>> inst['ip'] = "192.168.0.3" +>>> inst['owner_id'] = "projectA" +>>> inst.save() +True + +>>> InstDir['i-123'] + +>>> InstDir.all.next() + + +>>> inst.destroy() +True +""" + +from nova import vendor + +from nova import datastore +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS + + +# TODO(ja): singleton instance of the directory +class InstanceDirectory(object): + """an api for interacting with the global state of instances """ + def __init__(self): + self.keeper = datastore.Keeper(FLAGS.instances_prefix) + + def get(self, instance_id): + """ returns an instance object for a given id """ + return Instance(instance_id) + + def __getitem__(self, item): + return self.get(item) + + def by_project(self, project): + """ returns a list of instance objects for a project """ + for instance_id in self.keeper['project:%s:instances' % project]: + yield Instance(instance_id) + + def by_node(self, node_id): + """ returns a list of instances for a node """ + for instance in self.all: + if instance['node_name'] == node_id: + yield instance + + def by_ip(self, ip_address): + """ returns an instance object that is using the IP """ + for instance in self.all: + if instance['private_dns_name'] == ip_address: + return instance + return None + + def by_volume(self, volume_id): + """ returns the instance a volume is attached to """ + pass + + def exists(self, instance_id): + if instance_id in self.keeper['instances']: + return True + return False + + @property + def all(self): + """ returns a list of all instances """ + instances = self.keeper['instances'] + if instances != None: + for instance_id in self.keeper['instances']: + yield Instance(instance_id) + + def new(self): + """ returns an empty Instance object, with ID """ + instance_id = utils.generate_uid('i') + return self.get(instance_id) + + + +class Instance(object): + """ Wrapper around stored properties of an instance """ + + def __init__(self, instance_id): + """ loads an instance from the datastore if exists """ + self.keeper = datastore.Keeper(FLAGS.instances_prefix) + self.instance_id = instance_id + self.initial_state = {} + self.state = self.keeper[self.__redis_key] + if self.state: + self.initial_state = self.state + else: + self.state = {'state' : 'pending', + 'instance_id' : instance_id, + 'node_name' : 'unassigned', + 'owner_id' : 'unassigned' } + + @property + def __redis_key(self): + """ Magic string for instance keys """ + return 'instance:%s' % self.instance_id + + def __repr__(self): + return "" % self.instance_id + + def get(self, item, default): + return self.state.get(item, default) + + def __getitem__(self, item): + return self.state[item] + + def __setitem__(self, item, val): + self.state[item] = val + return self.state[item] + + def __delitem__(self, item): + """ We don't support this """ + raise Exception("Silly monkey, Instances NEED all their properties.") + + def save(self): + """ update the directory with the state from this instance + make sure you've set the owner_id before you call save + for the first time. + """ + # TODO(ja): implement hmset in redis-py and use it + # instead of multiple calls to hset + state = self.keeper[self.__redis_key] + if not state: + state = {} + for key, val in self.state.iteritems(): + # if (not self.initial_state.has_key(key) + # or self.initial_state[key] != val): + state[key] = val + self.keeper[self.__redis_key] = state + if self.initial_state == {}: + self.keeper.set_add('project:%s:instances' % self.state['owner_id'], + self.instance_id) + self.keeper.set_add('instances', self.instance_id) + self.initial_state = self.state + return True + + def destroy(self): + """ deletes all related records from datastore. + does NOT do anything to running libvirt state. + """ + self.keeper.set_remove('project:%s:instances' % self.state['owner_id'], + self.instance_id) + del self.keeper[self.__redis_key] + self.keeper.set_remove('instances', self.instance_id) + return True + + @property + def volumes(self): + """ returns a list of attached volumes """ + pass + + @property + def reservation(self): + """ Returns a reservation object """ + pass + +# class Reservation(object): +# """ ORM wrapper for a batch of launched instances """ +# def __init__(self): +# pass +# +# def userdata(self): +# """ """ +# pass +# +# +# class NodeDirectory(object): +# def __init__(self): +# pass +# + +if __name__ == "__main__": + import doctest + doctest.testmod() diff --git a/nova/compute/network.py b/nova/compute/network.py new file mode 100644 index 000000000000..612295f272e4 --- /dev/null +++ b/nova/compute/network.py @@ -0,0 +1,520 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Classes for network control, including VLANs, DHCP, and IP allocation. +""" + +import json +import logging +import os + +# TODO(termie): clean up these imports +from nova import vendor +import IPy + +from nova import datastore +import nova.exception +from nova.compute import exception +from nova import flags +from nova import utils +from nova.auth import users + +import linux_net + +FLAGS = flags.FLAGS +flags.DEFINE_string('net_libvirt_xml_template', + utils.abspath('compute/net.libvirt.xml.template'), + 'Template file for libvirt networks') +flags.DEFINE_string('networks_path', utils.abspath('../networks'), + 'Location to keep network config files') +flags.DEFINE_integer('public_vlan', 1, 'VLAN for public IP addresses') +flags.DEFINE_string('public_interface', 'vlan1', 'Interface for public IP addresses') +flags.DEFINE_string('bridge_dev', 'eth1', + 'network device for bridges') +flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') +flags.DEFINE_integer('vlan_end', 4093, 'Last VLAN for private networks') +flags.DEFINE_integer('network_size', 256, 'Number of addresses in each private subnet') +flags.DEFINE_string('public_range', '4.4.4.0/24', 'Public IP address block') +flags.DEFINE_string('private_range', '10.0.0.0/8', 'Private IP address block') + + +# HACK(vish): to delay _get_keeper() loading +def _get_keeper(): + if _get_keeper.keeper == None: + _get_keeper.keeper = datastore.Keeper(prefix="net") + return _get_keeper.keeper +_get_keeper.keeper = None + +logging.getLogger().setLevel(logging.DEBUG) + +# CLEANUP: +# TODO(ja): use singleton for usermanager instead of self.manager in vlanpool et al +# TODO(ja): does vlanpool "keeper" need to know the min/max - shouldn't FLAGS always win? + +class Network(object): + def __init__(self, *args, **kwargs): + self.bridge_gets_ip = False + try: + os.makedirs(FLAGS.networks_path) + except Exception, err: + pass + self.load(**kwargs) + + def to_dict(self): + return {'vlan': self.vlan, + 'network': self.network_str, + 'hosts': self.hosts} + + def load(self, **kwargs): + self.network_str = kwargs.get('network', "192.168.100.0/24") + self.hosts = kwargs.get('hosts', {}) + self.vlan = kwargs.get('vlan', 100) + self.name = "nova-%s" % (self.vlan) + self.network = IPy.IP(self.network_str) + self.gateway = self.network[1] + self.netmask = self.network.netmask() + self.broadcast = self.network.broadcast() + self.bridge_name = "br%s" % (self.vlan) + + def __str__(self): + return json.dumps(self.to_dict()) + + def __unicode__(self): + return json.dumps(self.to_dict()) + + @classmethod + def from_dict(cls, args): + for arg in args.keys(): + value = args[arg] + del args[arg] + args[str(arg)] = value + self = cls(**args) + return self + + @classmethod + def from_json(cls, json_string): + parsed = json.loads(json_string) + return cls.from_dict(parsed) + + def range(self): + for idx in range(3, len(self.network)-2): + yield self.network[idx] + + def allocate_ip(self, user_id, mac): + for ip in self.range(): + address = str(ip) + if not address in self.hosts.keys(): + logging.debug("Allocating IP %s to %s" % (address, user_id)) + self.hosts[address] = { + "address" : address, "user_id" : user_id, 'mac' : mac + } + self.express(address=address) + return address + raise exception.NoMoreAddresses() + + def deallocate_ip(self, ip_str): + if not ip_str in self.hosts.keys(): + raise exception.AddressNotAllocated() + del self.hosts[ip_str] + # TODO(joshua) SCRUB from the leases file somehow + self.deexpress(address=ip_str) + + def list_addresses(self): + for address in self.hosts.values(): + yield address + + def express(self, address=None): + pass + + def deexpress(self, address=None): + pass + + +class Vlan(Network): + """ + VLAN configuration, that when expressed creates the vlan + + properties: + + vlan - integer (example: 42) + bridge_dev - string (example: eth0) + """ + + def __init__(self, *args, **kwargs): + super(Vlan, self).__init__(*args, **kwargs) + self.bridge_dev = FLAGS.bridge_dev + + def express(self, address=None): + super(Vlan, self).express(address=address) + try: + logging.debug("Starting VLAN inteface for %s network" % (self.vlan)) + linux_net.vlan_create(self) + except: + pass + + +class VirtNetwork(Vlan): + """ + Virtual Network that can export libvirt configuration or express itself to + create a bridge (with or without an IP address/netmask/gateway) + + properties: + bridge_name - string (example value: br42) + vlan - integer (example value: 42) + bridge_gets_ip - boolean used during bridge creation + + if bridge_gets_ip then network address for bridge uses the properties: + gateway + broadcast + netmask + """ + + def __init__(self, *args, **kwargs): + super(VirtNetwork, self).__init__(*args, **kwargs) + + def virtXML(self): + """ generate XML for libvirt network """ + + libvirt_xml = open(FLAGS.net_libvirt_xml_template).read() + xml_info = {'name' : self.name, + 'bridge_name' : self.bridge_name, + 'device' : "vlan%s" % (self.vlan), + 'gateway' : self.gateway, + 'netmask' : self.netmask, + } + libvirt_xml = libvirt_xml % xml_info + return libvirt_xml + + def express(self, address=None): + """ creates a bridge device on top of the Vlan """ + super(VirtNetwork, self).express(address=address) + try: + logging.debug("Starting Bridge inteface for %s network" % (self.vlan)) + linux_net.bridge_create(self) + except: + pass + +class DHCPNetwork(VirtNetwork): + """ + properties: + dhcp_listen_address: the ip of the gateway / dhcp host + dhcp_range_start: the first ip to give out + dhcp_range_end: the last ip to give out + """ + def __init__(self, *args, **kwargs): + super(DHCPNetwork, self).__init__(*args, **kwargs) + logging.debug("Initing DHCPNetwork object...") + self.bridge_gets_ip = True + self.dhcp_listen_address = self.network[1] + self.dhcp_range_start = self.network[3] + self.dhcp_range_end = self.network[-2] + + def express(self, address=None): + super(DHCPNetwork, self).express(address=address) + if len(self.hosts.values()) > 0: + logging.debug("Starting dnsmasq server for network with vlan %s" % self.vlan) + linux_net.start_dnsmasq(self) + else: + logging.debug("Not launching dnsmasq cause I don't think we have any hosts.") + + def deexpress(self, address=None): + # if this is the last address, stop dns + super(DHCPNetwork, self).deexpress(address=address) + if len(self.hosts.values()) == 0: + linux_net.stop_dnsmasq(self) + else: + linux_net.start_dnsmasq(self) + + +class PrivateNetwork(DHCPNetwork): + def __init__(self, **kwargs): + super(PrivateNetwork, self).__init__(**kwargs) + # self.express() + + def to_dict(self): + return {'vlan': self.vlan, + 'network': self.network_str, + 'hosts': self.hosts} + + def express(self, *args, **kwargs): + super(PrivateNetwork, self).express(*args, **kwargs) + + + +class PublicNetwork(Network): + def __init__(self, network="192.168.216.0/24", **kwargs): + super(PublicNetwork, self).__init__(network=network, **kwargs) + self.express() + + def allocate_ip(self, user_id, mac): + for ip in self.range(): + address = str(ip) + if not address in self.hosts.keys(): + logging.debug("Allocating IP %s to %s" % (address, user_id)) + self.hosts[address] = { + "address" : address, "user_id" : user_id, 'mac' : mac + } + self.express(address=address) + return address + raise exception.NoMoreAddresses() + + def deallocate_ip(self, ip_str): + if not ip_str in self.hosts: + raise exception.AddressNotAllocated() + del self.hosts[ip_str] + # TODO(joshua) SCRUB from the leases file somehow + self.deexpress(address=ip_str) + + def associate_address(self, public_ip, private_ip, instance_id): + if not public_ip in self.hosts: + raise exception.AddressNotAllocated() + for addr in self.hosts.values(): + if addr.has_key('private_ip') and addr['private_ip'] == private_ip: + raise exception.AddressAlreadyAssociated() + if self.hosts[public_ip].has_key('private_ip'): + raise exception.AddressAlreadyAssociated() + self.hosts[public_ip]['private_ip'] = private_ip + self.hosts[public_ip]['instance_id'] = instance_id + self.express(address=public_ip) + + def disassociate_address(self, public_ip): + if not public_ip in self.hosts: + raise exception.AddressNotAllocated() + if not self.hosts[public_ip].has_key('private_ip'): + raise exception.AddressNotAssociated() + self.deexpress(public_ip) + del self.hosts[public_ip]['private_ip'] + del self.hosts[public_ip]['instance_id'] + # TODO Express the removal + + def deexpress(self, address): + addr = self.hosts[address] + public_ip = addr['address'] + private_ip = addr['private_ip'] + linux_net.remove_rule("PREROUTING -t nat -d %s -j DNAT --to %s" % (public_ip, private_ip)) + linux_net.remove_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" % (private_ip, public_ip)) + linux_net.remove_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) + for (protocol, port) in [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]: + linux_net.remove_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" % (private_ip, protocol, port)) + + def express(self, address=None): + logging.debug("Todo - need to create IPTables natting entries for this net.") + addresses = self.hosts.values() + if address: + addresses = [self.hosts[address]] + for addr in addresses: + if not addr.has_key('private_ip'): + continue + public_ip = addr['address'] + private_ip = addr['private_ip'] + linux_net.bind_public_ip(public_ip, FLAGS.public_interface) + linux_net.confirm_rule("PREROUTING -t nat -d %s -j DNAT --to %s" % (public_ip, private_ip)) + linux_net.confirm_rule("POSTROUTING -t nat -s %s -j SNAT --to %s" % (private_ip, public_ip)) + # TODO: Get these from the secgroup datastore entries + linux_net.confirm_rule("FORWARD -d %s -p icmp -j ACCEPT" % (private_ip)) + for (protocol, port) in [("tcp",80), ("tcp",22), ("udp",1194), ("tcp",443)]: + linux_net.confirm_rule("FORWARD -d %s -p %s --dport %s -j ACCEPT" % (private_ip, protocol, port)) + + +class NetworkPool(object): + # TODO - Allocations need to be system global + + def __init__(self): + self.network = IPy.IP(FLAGS.private_range) + netsize = FLAGS.network_size + if not netsize in [4,8,16,32,64,128,256,512,1024]: + raise exception.NotValidNetworkSize() + self.netsize = netsize + self.startvlan = FLAGS.vlan_start + + def get_from_vlan(self, vlan): + start = (vlan-self.startvlan) * self.netsize + net_str = "%s-%s" % (self.network[start], self.network[start + self.netsize - 1]) + logging.debug("Allocating %s" % net_str) + return net_str + + +class VlanPool(object): + def __init__(self, **kwargs): + self.start = FLAGS.vlan_start + self.end = FLAGS.vlan_end + self.vlans = kwargs.get('vlans', {}) + self.vlanpool = {} + self.manager = users.UserManager.instance() + for user_id, vlan in self.vlans.iteritems(): + self.vlanpool[vlan] = user_id + + def to_dict(self): + return {'vlans': self.vlans} + + def __str__(self): + return json.dumps(self.to_dict()) + + def __unicode__(self): + return json.dumps(self.to_dict()) + + @classmethod + def from_dict(cls, args): + for arg in args.keys(): + value = args[arg] + del args[arg] + args[str(arg)] = value + self = cls(**args) + return self + + @classmethod + def from_json(cls, json_string): + parsed = json.loads(json_string) + return cls.from_dict(parsed) + + def assign_vlan(self, user_id, vlan): + logging.debug("Assigning vlan %s to user %s" % (vlan, user_id)) + self.vlans[user_id] = vlan + self.vlanpool[vlan] = user_id + return self.vlans[user_id] + + def next(self, user_id): + for old_user_id, vlan in self.vlans.iteritems(): + if not self.manager.get_user(old_user_id): + _get_keeper()["%s-default" % old_user_id] = {} + del _get_keeper()["%s-default" % old_user_id] + del self.vlans[old_user_id] + return self.assign_vlan(user_id, vlan) + vlans = self.vlanpool.keys() + vlans.append(self.start) + nextvlan = max(vlans) + 1 + if nextvlan == self.end: + raise exception.AddressNotAllocated("Out of VLANs") + return self.assign_vlan(user_id, nextvlan) + + +class NetworkController(object): + """ The network controller is in charge of network connections """ + + def __init__(self, **kwargs): + logging.debug("Starting up the network controller.") + self.manager = users.UserManager.instance() + self._pubnet = None + if not _get_keeper()['vlans']: + _get_keeper()['vlans'] = {} + if not _get_keeper()['public']: + _get_keeper()['public'] = {'vlan': FLAGS.public_vlan, 'network' : FLAGS.public_range} + self.express() + + def reset(self): + _get_keeper()['public'] = {'vlan': FLAGS.public_vlan, 'network': FLAGS.public_range } + _get_keeper()['vlans'] = {} + # TODO : Get rid of old interfaces, bridges, and IPTables rules. + + @property + def public_net(self): + if not self._pubnet: + self._pubnet = PublicNetwork.from_dict(_get_keeper()['public']) + self._pubnet.load(**_get_keeper()['public']) + return self._pubnet + + @property + def vlan_pool(self): + return VlanPool.from_dict(_get_keeper()['vlans']) + + def get_network_from_name(self, network_name): + net_dict = _get_keeper()[network_name] + if net_dict: + return PrivateNetwork.from_dict(net_dict) + return None + + def get_public_ip_for_instance(self, instance_id): + # FIXME: this should be a lookup - iteration won't scale + for address_record in self.describe_addresses(type=PublicNetwork): + if address_record.get(u'instance_id', 'free') == instance_id: + return address_record[u'address'] + + def get_users_network(self, user_id): + """ get a user's private network, allocating one if needed """ + + user = self.manager.get_user(user_id) + if not user: + raise Exception("User %s doesn't exist, uhoh." % user_id) + usernet = self.get_network_from_name("%s-default" % user_id) + if not usernet: + pool = self.vlan_pool + vlan = pool.next(user_id) + private_pool = NetworkPool() + network_str = private_pool.get_from_vlan(vlan) + logging.debug("Constructing network %s and %s for %s" % (network_str, vlan, user_id)) + usernet = PrivateNetwork( + network=network_str, + vlan=vlan) + _get_keeper()["%s-default" % user_id] = usernet.to_dict() + _get_keeper()['vlans'] = pool.to_dict() + return usernet + + def allocate_address(self, user_id, mac=None, type=PrivateNetwork): + ip = None + net_name = None + if type == PrivateNetwork: + net = self.get_users_network(user_id) + ip = net.allocate_ip(user_id, mac) + net_name = net.name + _get_keeper()["%s-default" % user_id] = net.to_dict() + else: + net = self.public_net + ip = net.allocate_ip(user_id, mac) + net_name = net.name + _get_keeper()['public'] = net.to_dict() + return (ip, net_name) + + def deallocate_address(self, address): + if address in self.public_net.network: + net = self.public_net + rv = net.deallocate_ip(str(address)) + _get_keeper()['public'] = net.to_dict() + return rv + for user in self.manager.get_users(): + if address in self.get_users_network(user.id).network: + net = self.get_users_network(user.id) + rv = net.deallocate_ip(str(address)) + _get_keeper()["%s-default" % user.id] = net.to_dict() + return rv + raise exception.AddressNotAllocated() + + def describe_addresses(self, type=PrivateNetwork): + if type == PrivateNetwork: + addresses = [] + for user in self.manager.get_users(): + addresses.extend(self.get_users_network(user.id).list_addresses()) + return addresses + return self.public_net.list_addresses() + + def associate_address(self, address, private_ip, instance_id): + net = self.public_net + rv = net.associate_address(address, private_ip, instance_id) + _get_keeper()['public'] = net.to_dict() + return rv + + def disassociate_address(self, address): + net = self.public_net + rv = net.disassociate_address(address) + _get_keeper()['public'] = net.to_dict() + return rv + + def express(self,address=None): + for user in self.manager.get_users(): + self.get_users_network(user.id).express() + + def report_state(self): + pass + diff --git a/nova/compute/node.py b/nova/compute/node.py new file mode 100644 index 000000000000..a4de0f98ae02 --- /dev/null +++ b/nova/compute/node.py @@ -0,0 +1,549 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Compute Node: + + Runs on each compute node, managing the + hypervisor using libvirt. + +""" + +import base64 +import json +import logging +import os +import random +import shutil +import sys + +from nova import vendor +from twisted.internet import defer +from twisted.internet import task +from twisted.application import service + +try: + import libvirt +except Exception, err: + logging.warning('no libvirt found') + +from nova import exception +from nova import fakevirt +from nova import flags +from nova import process +from nova import utils +from nova.compute import disk +from nova.compute import model +from nova.compute import network +from nova.objectstore import image # for image_path flag + +FLAGS = flags.FLAGS +flags.DEFINE_string('libvirt_xml_template', + utils.abspath('compute/libvirt.xml.template'), + 'Network XML Template') +flags.DEFINE_bool('use_s3', True, + 'whether to get images from s3 or use local copy') +flags.DEFINE_string('instances_path', utils.abspath('../instances'), + 'where instances are stored on disk') +flags.DEFINE_string('instances_prefix', 'compute-', + 'prefix for keepers for instances') + +INSTANCE_TYPES = {} +INSTANCE_TYPES['m1.tiny'] = {'memory_mb': 512, 'vcpus': 1, 'local_gb': 0} +INSTANCE_TYPES['m1.small'] = {'memory_mb': 1024, 'vcpus': 1, 'local_gb': 10} +INSTANCE_TYPES['m1.medium'] = {'memory_mb': 2048, 'vcpus': 2, 'local_gb': 10} +INSTANCE_TYPES['m1.large'] = {'memory_mb': 4096, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['m1.xlarge'] = {'memory_mb': 8192, 'vcpus': 4, 'local_gb': 10} +INSTANCE_TYPES['c1.medium'] = {'memory_mb': 2048, 'vcpus': 4, 'local_gb': 10} + +# The number of processes to start in our process pool +# TODO(termie): this should probably be a flag and the pool should probably +# be a singleton +PROCESS_POOL_SIZE = 4 + + +class Node(object, service.Service): + """ + Manages the running instances. + """ + def __init__(self): + """ load configuration options for this node and connect to libvirt """ + super(Node, self).__init__() + self._instances = {} + self._conn = self._get_connection() + self._pool = process.Pool(PROCESS_POOL_SIZE) + self.instdir = model.InstanceDirectory() + # TODO(joshua): This needs to ensure system state, specifically: modprobe aoe + + def _get_connection(self): + """ returns a libvirt connection object """ + # TODO(termie): maybe lazy load after initial check for permissions + # TODO(termie): check whether we can be disconnected + if FLAGS.fake_libvirt: + conn = fakevirt.FakeVirtConnection.instance() + else: + auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], + 'root', + None] + conn = libvirt.openAuth('qemu:///system', auth, 0) + if conn == None: + logging.error('Failed to open connection to the hypervisor') + sys.exit(1) + return conn + + def noop(self): + """ simple test of an AMQP message call """ + return defer.succeed('PONG') + + def get_instance(self, instance_id): + # inst = self.instdir.get(instance_id) + # return inst + if self.instdir.exists(instance_id): + return Instance.fromName(self._conn, self._pool, instance_id) + return None + + @exception.wrap_exception + def adopt_instances(self): + """ if there are instances already running, adopt them """ + return defer.succeed(0) + instance_names = [self._conn.lookupByID(x).name() + for x in self._conn.listDomainsID()] + for name in instance_names: + try: + new_inst = Instance.fromName(self._conn, self._pool, name) + new_inst.update_state() + except: + pass + return defer.succeed(len(self._instances)) + + @exception.wrap_exception + def describe_instances(self): + retval = {} + for inst in self.instdir.by_node(FLAGS.node_name): + retval[inst['instance_id']] = (Instance.fromName(self._conn, self._pool, inst['instance_id'])) + return retval + + @defer.inlineCallbacks + def report_state(self): + logging.debug("Reporting State") + return + + @exception.wrap_exception + def run_instance(self, instance_id, **_kwargs): + """ launch a new instance with specified options """ + logging.debug("Starting instance %s..." % (instance_id)) + inst = self.instdir.get(instance_id) + inst['node_name'] = FLAGS.node_name + inst.save() + # TODO(vish) check to make sure the availability zone matches + new_inst = Instance(self._conn, name=instance_id, + pool=self._pool, data=inst) + if new_inst.is_running(): + raise exception.Error("Instance is already running") + d = new_inst.spawn() + return d + + @exception.wrap_exception + def terminate_instance(self, instance_id): + """ terminate an instance on this machine """ + logging.debug("Got told to terminate instance %s" % instance_id) + instance = self.get_instance(instance_id) + # inst = self.instdir.get(instance_id) + if not instance: + raise exception.Error( + 'trying to terminate unknown instance: %s' % instance_id) + d = instance.destroy() + # d.addCallback(lambda x: inst.destroy()) + return d + + @exception.wrap_exception + def reboot_instance(self, instance_id): + """ reboot an instance on this server + KVM doesn't support reboot, so we terminate and restart """ + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to reboot unknown instance: %s' % instance_id) + return instance.reboot() + + @defer.inlineCallbacks + @exception.wrap_exception + def get_console_output(self, instance_id): + """ send the console output for an instance """ + logging.debug("Getting console output for %s" % (instance_id)) + inst = self.instdir.get(instance_id) + instance = self.get_instance(instance_id) + if not instance: + raise exception.Error( + 'trying to get console log for unknown: %s' % instance_id) + rv = yield instance.console_output() + # TODO(termie): this stuff belongs in the API layer, no need to + # munge the data we send to ourselves + output = {"InstanceId" : instance_id, + "Timestamp" : "2", + "output" : base64.b64encode(rv)} + defer.returnValue(output) + + @defer.inlineCallbacks + @exception.wrap_exception + def attach_volume(self, instance_id = None, + aoe_device = None, mountpoint = None): + utils.runthis("Attached Volume: %s", + "sudo virsh attach-disk %s /dev/etherd/%s %s" + % (instance_id, aoe_device, mountpoint.split("/")[-1])) + return defer.succeed(True) + + def _init_aoe(self): + utils.runthis("Doin an AoE discover, returns %s", "sudo aoe-discover") + utils.runthis("Doin an AoE stat, returns %s", "sudo aoe-stat") + + @exception.wrap_exception + def detach_volume(self, instance_id, mountpoint): + """ detach a volume from an instance """ + # despite the documentation, virsh detach-disk just wants the device + # name without the leading /dev/ + target = mountpoint.rpartition('/dev/')[2] + utils.runthis("Detached Volume: %s", "sudo virsh detach-disk %s %s " + % (instance_id, target)) + return defer.succeed(True) + + +class Group(object): + def __init__(self, group_id): + self.group_id = group_id + + +class ProductCode(object): + def __init__(self, product_code): + self.product_code = product_code + + +def _create_image(data, libvirt_xml): + """ create libvirt.xml and copy files into instance path """ + def basepath(path=''): + return os.path.abspath(os.path.join(data['basepath'], path)) + + def imagepath(path=''): + return os.path.join(FLAGS.images_path, path) + + def image_url(path): + return "%s:%s/_images/%s" % (FLAGS.s3_host, FLAGS.s3_port, path) + + logging.info(basepath('disk')) + try: + os.makedirs(data['basepath']) + os.chmod(data['basepath'], 0777) + except OSError: + # TODO: there is already an instance with this name, do something + pass + try: + logging.info('Creating image for: %s', data['instance_id']) + f = open(basepath('libvirt.xml'), 'w') + f.write(libvirt_xml) + f.close() + if not FLAGS.fake_libvirt: + if FLAGS.use_s3: + if not os.path.exists(basepath('disk')): + utils.fetchfile(image_url("%s/image" % data['image_id']), + basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + utils.fetchfile(image_url("%s/image" % data['kernel_id']), + basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + utils.fetchfile(image_url("%s/image" % data['ramdisk_id']), + basepath('ramdisk')) + else: + if not os.path.exists(basepath('disk')): + shutil.copyfile(imagepath("%s/image" % data['image_id']), + basepath('disk-raw')) + if not os.path.exists(basepath('kernel')): + shutil.copyfile(imagepath("%s/image" % data['kernel_id']), + basepath('kernel')) + if not os.path.exists(basepath('ramdisk')): + shutil.copyfile(imagepath("%s/image" % + data['ramdisk_id']), + basepath('ramdisk')) + if data['key_data']: + logging.info('Injecting key data into image %s' % + data['image_id']) + disk.inject_key(data['key_data'], basepath('disk-raw')) + if os.path.exists(basepath('disk')): + os.remove(basepath('disk')) + bytes = INSTANCE_TYPES[data['instance_type']]['local_gb'] * 1024 * 1024 * 1024 + disk.partition(basepath('disk-raw'), basepath('disk'), bytes) + logging.info('Done create image for: %s', data['instance_id']) + except Exception as ex: + return {'exception': ex} + + +class Instance(object): + + NOSTATE = 0x00 + RUNNING = 0x01 + BLOCKED = 0x02 + PAUSED = 0x03 + SHUTDOWN = 0x04 + SHUTOFF = 0x05 + CRASHED = 0x06 + + def is_pending(self): + return (self.state == Instance.NOSTATE or self.state == 'pending') + + def is_destroyed(self): + return self.state == Instance.SHUTOFF + + def is_running(self): + logging.debug("Instance state is: %s" % self.state) + return (self.state == Instance.RUNNING or self.state == 'running') + + def __init__(self, conn, pool, name, data): + # TODO(termie): pool should probably be a singleton instead of being passed + # here and in the classmethods + """ spawn an instance with a given name """ + # TODO(termie): pool should probably be a singleton instead of being passed + # here and in the classmethods + self._pool = pool + self._conn = conn + self.datamodel = data + print data + + # NOTE(termie): to be passed to multiprocess self._s must be + # pickle-able by cPickle + self._s = {} + + # TODO(termie): is instance_type that actual name for this? + size = data.get('instance_type', FLAGS.default_instance_type) + if size not in INSTANCE_TYPES: + raise exception.Error('invalid instance type: %s' % size) + + self._s.update(INSTANCE_TYPES[size]) + + self._s['name'] = name + self._s['instance_id'] = name + self._s['instance_type'] = size + self._s['mac_address'] = data.get( + 'mac_address', 'df:df:df:df:df:df') + self._s['basepath'] = data.get( + 'basepath', os.path.abspath( + os.path.join(FLAGS.instances_path, self.name))) + self._s['memory_kb'] = int(self._s['memory_mb']) * 1024 + # TODO(joshua) - Get this from network directory controller later + self._s['bridge_name'] = data.get('bridge_name', 'br0') + self._s['image_id'] = data.get('image_id', FLAGS.default_image) + self._s['kernel_id'] = data.get('kernel_id', FLAGS.default_kernel) + self._s['ramdisk_id'] = data.get('ramdisk_id', FLAGS.default_ramdisk) + self._s['owner_id'] = data.get('owner_id', '') + self._s['node_name'] = data.get('node_name', '') + self._s['user_data'] = data.get('user_data', '') + self._s['ami_launch_index'] = data.get('ami_launch_index', None) + self._s['launch_time'] = data.get('launch_time', None) + self._s['reservation_id'] = data.get('reservation_id', None) + # self._s['state'] = Instance.NOSTATE + self._s['state'] = data.get('state', Instance.NOSTATE) + + self._s['key_data'] = data.get('key_data', None) + + # TODO: we may not need to save the next few + self._s['groups'] = data.get('security_group', ['default']) + self._s['product_codes'] = data.get('product_code', []) + self._s['key_name'] = data.get('key_name', None) + self._s['addressing_type'] = data.get('addressing_type', None) + self._s['availability_zone'] = data.get('availability_zone', 'fixme') + + #TODO: put real dns items here + self._s['private_dns_name'] = data.get('private_dns_name', 'fixme') + self._s['dns_name'] = data.get('dns_name', + self._s['private_dns_name']) + logging.debug("Finished init of Instance with id of %s" % name) + + def toXml(self): + # TODO(termie): cache? + logging.debug("Starting the toXML method") + libvirt_xml = open(FLAGS.libvirt_xml_template).read() + xml_info = self._s.copy() + #xml_info.update(self._s) + + # TODO(termie): lazy lazy hack because xml is annoying + xml_info['nova'] = json.dumps(self._s) + libvirt_xml = libvirt_xml % xml_info + logging.debug("Finished the toXML method") + + return libvirt_xml + + @classmethod + def fromName(cls, conn, pool, name): + """ use the saved data for reloading the instance """ + # if FLAGS.fake_libvirt: + # raise Exception('this is a bit useless, eh?') + + instdir = model.InstanceDirectory() + instance = instdir.get(name) + return cls(conn=conn, pool=pool, name=name, data=instance) + + @property + def state(self): + return self._s['state'] + + @property + def name(self): + return self._s['name'] + + def describe(self): + return self._s + + def info(self): + logging.debug("Getting info for dom %s" % self.name) + virt_dom = self._conn.lookupByName(self.name) + (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() + return {'state': state, + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': cpu_time} + + def update_state(self): + info = self.info() + self._s['state'] = info['state'] + self.datamodel['state'] = info['state'] + self.datamodel['node_name'] = FLAGS.node_name + self.datamodel.save() + + @exception.wrap_exception + def destroy(self): + if self.is_destroyed(): + self.datamodel.destroy() + raise exception.Error('trying to destroy already destroyed' + ' instance: %s' % self.name) + + self._s['state'] = Instance.SHUTDOWN + self.datamodel['state'] = 'shutting_down' + self.datamodel.save() + try: + virt_dom = self._conn.lookupByName(self.name) + virt_dom.destroy() + except Exception, _err: + pass + # If the instance is already terminated, we're still happy + d = defer.Deferred() + d.addCallback(lambda x: self.datamodel.destroy()) + # TODO(termie): short-circuit me for tests + # WE'LL save this for when we do shutdown, + # instead of destroy - but destroy returns immediately + timer = task.LoopingCall(f=None) + def _wait_for_shutdown(): + try: + info = self.info() + if info['state'] == Instance.SHUTDOWN: + self._s['state'] = Instance.SHUTDOWN + #self.datamodel['state'] = 'shutdown' + #self.datamodel.save() + timer.stop() + d.callback(None) + except Exception: + self._s['state'] = Instance.SHUTDOWN + timer.stop() + d.callback(None) + timer.f = _wait_for_shutdown + timer.start(interval=0.5, now=True) + return d + + @defer.inlineCallbacks + @exception.wrap_exception + def reboot(self): + # if not self.is_running(): + # raise exception.Error( + # 'trying to reboot a non-running' + # 'instance: %s (state: %s)' % (self.name, self.state)) + + yield self._conn.lookupByName(self.name).destroy() + self.datamodel['state'] = 'rebooting' + self.datamodel.save() + self._s['state'] = Instance.NOSTATE + self._conn.createXML(self.toXml(), 0) + # TODO(termie): this should actually register a callback to check + # for successful boot + self.datamodel['state'] = 'running' + self.datamodel.save() + self._s['state'] = Instance.RUNNING + logging.debug('rebooted instance %s' % self.name) + defer.returnValue(None) + + @exception.wrap_exception + def spawn(self): + self.datamodel['state'] = "spawning" + self.datamodel.save() + logging.debug("Starting spawn in Instance") + xml = self.toXml() + def _launch(retvals): + self.datamodel['state'] = 'launching' + self.datamodel.save() + try: + logging.debug("Arrived in _launch") + if retvals and 'exception' in retvals: + raise retvals['exception'] + self._conn.createXML(self.toXml(), 0) + # TODO(termie): this should actually register + # a callback to check for successful boot + self._s['state'] = Instance.RUNNING + self.datamodel['state'] = 'running' + self.datamodel.save() + logging.debug("Instance is running") + except Exception as ex: + logging.debug(ex) + self.datamodel['state'] = 'shutdown' + self.datamodel.save() + #return self + + d = self._pool.apply(_create_image, self._s, xml) + d.addCallback(_launch) + return d + + @exception.wrap_exception + def console_output(self): + if not FLAGS.fake_libvirt: + fname = os.path.abspath( + os.path.join(self._s['basepath'], 'console.log')) + with open(fname, 'r') as f: + console = f.read() + else: + console = 'FAKE CONSOLE OUTPUT' + return defer.succeed(console) + + def generate_mac(self): + mac = [0x00, 0x16, 0x3e, random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), random.randint(0x00, 0xff) + ] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + + +class NetworkNode(Node): + def __init__(self, **kwargs): + super(NetworkNode, self).__init__(**kwargs) + self.virtNets = {} + + def add_network(self, net_dict): + net = network.VirtNetwork(**net_dict) + self.virtNets[net.name] = net + self.virtNets[net.name].express() + return defer.succeed({'retval': 'network added'}) + + @exception.wrap_exception + def run_instance(self, instance_id, **kwargs): + inst = self.instdir.get(instance_id) + net_dict = json.loads(inst.get('network_str', "{}")) + self.add_network(net_dict) + return super(NetworkNode, self).run_instance(instance_id, **kwargs) + diff --git a/nova/crypto.py b/nova/crypto.py new file mode 100644 index 000000000000..6add55ee5008 --- /dev/null +++ b/nova/crypto.py @@ -0,0 +1,224 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Wrappers around standard crypto, including root and intermediate CAs, +SSH keypairs and x509 certificates. +""" + +import hashlib +import logging +import os +import shutil +import tempfile +import time +import utils + +from nova import vendor +import M2Crypto + +from nova import exception +from nova import flags + + +FLAGS = flags.FLAGS +flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA') +flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys') +flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA') +flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?') + + +def ca_path(username): + if username: + return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, username) + return "%s/cacert.pem" % (FLAGS.ca_path) + +def fetch_ca(username=None, chain=True): + if not FLAGS.use_intermediate_ca: + username = None + buffer = "" + if username: + with open(ca_path(username),"r") as cafile: + buffer += cafile.read() + if username and not chain: + return buffer + with open(ca_path(None),"r") as cafile: + buffer += cafile.read() + return buffer + +def generate_key_pair(bits=1024): + # what is the magic 65537? + + tmpdir = tempfile.mkdtemp() + keyfile = os.path.join(tmpdir, 'temp') + utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile)) + (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile)) + fingerprint = out.split(' ')[1] + private_key = open(keyfile).read() + public_key = open(keyfile + '.pub').read() + + shutil.rmtree(tmpdir) + # code below returns public key in pem format + # key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) + # private_key = key.as_pem(cipher=None) + # bio = M2Crypto.BIO.MemoryBuffer() + # key.save_pub_key_bio(bio) + # public_key = bio.read() + # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key) + + return (private_key, public_key, fingerprint) + + +def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): + """requires lsh-utils""" + convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \ + + " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \ + + " transport | lsh-export-key --openssh" + (out, err) = utils.execute(convert, ssl_public_key) + if err: + raise exception.Error("Failed to generate key: %s", err) + return '%s %s@%s\n' %(out.strip(), name, suffix) + + +def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024): + tmpdir = tempfile.mkdtemp() + keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) + csrfile = os.path.join(tmpdir, 'temp.csr') + logging.debug("openssl genrsa -out %s %s" % (keyfile, bits)) + utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits)) + utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject)) + private_key = open(keyfile).read() + csr = open(csrfile).read() + shutil.rmtree(tmpdir) + return (private_key, csr) + + +def sign_csr(csr_text, intermediate=None): + if not FLAGS.use_intermediate_ca: + intermediate = None + if not intermediate: + return _sign_csr(csr_text, FLAGS.ca_path) + user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate) + if not os.path.exists(user_ca): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate)) + os.chdir(start) + return _sign_csr(csr_text, user_ca) + + +def _sign_csr(csr_text, ca_folder): + tmpfolder = tempfile.mkdtemp() + csrfile = open("%s/inbound.csr" % (tmpfolder), "w") + csrfile.write(csr_text) + csrfile.close() + logging.debug("Flags path: %s" % ca_folder) + start = os.getcwd() + # Change working dir to CA + os.chdir(ca_folder) + utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder)) + os.chdir(start) + with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile: + return crtfile.read() + + +def mkreq(bits, subject="foo", ca=0): + pk = M2Crypto.EVP.PKey() + req = M2Crypto.X509.Request() + rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) + pk.assign_rsa(rsa) + rsa = None # should not be freed here + req.set_pubkey(pk) + req.set_subject(subject) + req.sign(pk,'sha512') + assert req.verify(pk) + pk2 = req.get_pubkey() + assert req.verify(pk2) + return req, pk + + +def mkcacert(subject='nova', years=1): + req, pk = mkreq(2048, subject, ca=1) + pkey = req.get_pubkey() + sub = req.get_subject() + cert = M2Crypto.X509.X509() + cert.set_serial_number(1) + cert.set_version(2) + cert.set_subject(sub) # FIXME subject is not set in mkreq yet + t = long(time.time()) + time.timezone + now = M2Crypto.ASN1.ASN1_UTCTIME() + now.set_time(t) + nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME() + nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365)) + cert.set_not_before(now) + cert.set_not_after(nowPlusYear) + issuer = M2Crypto.X509.X509_Name() + issuer.C = "US" + issuer.CN = subject + cert.set_issuer(issuer) + cert.set_pubkey(pkey) + ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE') + cert.add_ext(ext) + cert.sign(pk, 'sha512') + + # print 'cert', dir(cert) + print cert.as_pem() + print pk.get_rsa().as_pem() + + return cert, pk, pkey + + + +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# http://code.google.com/p/boto + +def compute_md5(fp): + """ + @type fp: file + @param fp: File pointer to the file to MD5 hash. The file pointer will be + reset to the beginning of the file before the method returns. + + @rtype: tuple + @return: the hex digest version of the MD5 hash + """ + m = hashlib.md5() + fp.seek(0) + s = fp.read(8192) + while s: + m.update(s) + s = fp.read(8192) + hex_md5 = m.hexdigest() + # size = fp.tell() + fp.seek(0) + return hex_md5 diff --git a/nova/datastore.py b/nova/datastore.py new file mode 100644 index 000000000000..57940d98b746 --- /dev/null +++ b/nova/datastore.py @@ -0,0 +1,367 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Datastore: + +Providers the Keeper class, a simple pseudo-dictionary that +persists on disk. + +MAKE Sure that ReDIS is running, and your flags are set properly, +before trying to run this. +""" + +import json +import logging +import os +import sqlite3 + +from nova import vendor +import redis + +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('datastore_path', utils.abspath('../keeper'), + 'where keys are stored on disk') +flags.DEFINE_string('redis_host', '127.0.0.1', + 'Host that redis is running on.') +flags.DEFINE_integer('redis_port', 6379, + 'Port that redis is running on.') +flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away') +flags.DEFINE_string('keeper_backend', 'redis', + 'which backend to use for keeper') + + +class Redis(object): + def __init__(self): + if hasattr(self.__class__, '_instance'): + raise Exception('Attempted to instantiate singleton') + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + inst = redis.Redis(host=FLAGS.redis_host, port=FLAGS.redis_port, db=FLAGS.redis_db) + cls._instance = inst + return cls._instance + + +class RedisModel(object): + """ Wrapper around redis-backed properties """ + object_type = 'generic' + def __init__(self, object_id): + """ loads an object from the datastore if exists """ + self.object_id = object_id + self.initial_state = {} + self.state = Redis.instance().hgetall(self.__redis_key) + if self.state: + self.initial_state = self.state + else: + self.set_default_state() + + def set_default_state(self): + self.state = {'state' : 'pending'} + self.state[self.object_type+"_id"] = self.object_id + + @property + def __redis_key(self): + """ Magic string for instance keys """ + return '%s:%s' % (self.object_type, self.object_id) + + def __repr__(self): + return "<%s:%s>" % (self.object_type, self.object_id) + + def __str__(self): + return str(self.state) + + def keys(self): + return self.state.keys() + + def copy(self): + copyDict = {} + for item in self.keys(): + copyDict[item] = self[item] + return copyDict + + def get(self, item, default): + return self.state.get(item, default) + + def __getitem__(self, item): + return self.state[item] + + def __setitem__(self, item, val): + self.state[item] = val + return self.state[item] + + def __delitem__(self, item): + """ We don't support this """ + raise Exception("Silly monkey, we NEED all our properties.") + + def save(self): + """ update the directory with the state from this instance """ + # TODO(ja): implement hmset in redis-py and use it + # instead of multiple calls to hset + for key, val in self.state.iteritems(): + # if (not self.initial_state.has_key(key) + # or self.initial_state[key] != val): + Redis.instance().hset(self.__redis_key, key, val) + if self.initial_state == {}: + self.first_save() + self.initial_state = self.state + return True + + def first_save(self): + pass + + def destroy(self): + """ deletes all related records from datastore. + does NOT do anything to running state. + """ + Redis.instance().delete(self.__redis_key) + return True + + +def slugify(key, prefix=None): + """ + Key has to be a valid filename. Slugify solves that. + """ + return "%s%s" % (prefix, key) + + +class SqliteKeeper(object): + """ Keeper implementation in SQLite, mostly for in-memory testing """ + _conn = {} # class variable + + def __init__(self, prefix): + self.prefix = prefix + + @property + def conn(self): + if self.prefix not in self.__class__._conn: + logging.debug('no sqlite connection (%s), making new', self.prefix) + if FLAGS.datastore_path != ':memory:': + try: + os.mkdir(FLAGS.datastore_path) + except Exception: + pass + conn = sqlite3.connect(os.path.join( + FLAGS.datastore_path, '%s.sqlite' % self.prefix)) + else: + conn = sqlite3.connect(':memory:') + + c = conn.cursor() + try: + c.execute('''CREATE TABLE data (item text, value text)''') + conn.commit() + except Exception: + logging.exception('create table failed') + finally: + c.close() + + self.__class__._conn[self.prefix] = conn + + return self.__class__._conn[self.prefix] + + def __delitem__(self, item): + #logging.debug('sqlite deleting %s', item) + c = self.conn.cursor() + try: + c.execute('DELETE FROM data WHERE item = ?', (item, )) + self.conn.commit() + except Exception: + logging.exception('delete failed: %s', item) + finally: + c.close() + + def __getitem__(self, item): + #logging.debug('sqlite getting %s', item) + result = None + c = self.conn.cursor() + try: + c.execute('SELECT value FROM data WHERE item = ?', (item, )) + row = c.fetchone() + if row: + result = json.loads(row[0]) + else: + result = None + except Exception: + logging.exception('select failed: %s', item) + finally: + c.close() + #logging.debug('sqlite got %s: %s', item, result) + return result + + def __setitem__(self, item, value): + serialized_value = json.dumps(value) + insert = True + if self[item] is not None: + insert = False + #logging.debug('sqlite insert %s: %s', item, value) + c = self.conn.cursor() + try: + if insert: + c.execute('INSERT INTO data VALUES (?, ?)', + (item, serialized_value)) + else: + c.execute('UPDATE data SET item=?, value=? WHERE item = ?', + (item, serialized_value, item)) + + self.conn.commit() + except Exception: + logging.exception('select failed: %s', item) + finally: + c.close() + + def clear(self): + if self.prefix not in self.__class__._conn: + return + self.conn.close() + if FLAGS.datastore_path != ':memory:': + os.unlink(os.path.join(FLAGS.datastore_path, '%s.sqlite' % self.prefix)) + del self.__class__._conn[self.prefix] + + def clear_all(self): + for k, conn in self.__class__._conn.iteritems(): + conn.close() + if FLAGS.datastore_path != ':memory:': + os.unlink(os.path.join(FLAGS.datastore_path, + '%s.sqlite' % self.prefix)) + self.__class__._conn = {} + + + def set_add(self, item, value): + group = self[item] + if not group: + group = [] + group.append(value) + self[item] = group + + def set_is_member(self, item, value): + group = self[item] + if not group: + return False + return value in group + + def set_remove(self, item, value): + group = self[item] + if not group: + group = [] + group.remove(value) + self[item] = group + + def set_fetch(self, item): + # TODO(termie): I don't really know what set_fetch is supposed to do + group = self[item] + if not group: + group = [] + return iter(group) + +class JsonKeeper(object): + """ + Simple dictionary class that persists using + JSON in files saved to disk. + """ + def __init__(self, prefix): + self.prefix = prefix + + def __delitem__(self, item): + """ + Removing a key means deleting a file from disk. + """ + item = slugify(item, self.prefix) + path = "%s/%s" % (FLAGS.datastore_path, item) + if os.path.isfile(path): + os.remove(path) + + def __getitem__(self, item): + """ + Fetch file contents and dejsonify them. + """ + item = slugify(item, self.prefix) + path = "%s/%s" % (FLAGS.datastore_path, item) + if os.path.isfile(path): + return json.load(open(path, 'r')) + return None + + def __setitem__(self, item, value): + """ + JSON encode value and save to file. + """ + item = slugify(item, self.prefix) + path = "%s/%s" % (FLAGS.datastore_path, item) + with open(path, "w") as blobfile: + blobfile.write(json.dumps(value)) + return value + + +class RedisKeeper(object): + """ + Simple dictionary class that persists using + ReDIS. + """ + def __init__(self, prefix="redis-"): + self.prefix = prefix + Redis.instance().ping() + + def __setitem__(self, item, value): + """ + JSON encode value and save to file. + """ + item = slugify(item, self.prefix) + Redis.instance().set(item, json.dumps(value)) + return value + + def __getitem__(self, item): + item = slugify(item, self.prefix) + value = Redis.instance().get(item) + if value: + return json.loads(value) + + def __delitem__(self, item): + item = slugify(item, self.prefix) + return Redis.instance().delete(item) + + def clear(self): + raise NotImplementedError() + + def clear_all(self): + raise NotImplementedError() + + def set_add(self, item, value): + item = slugify(item, self.prefix) + return Redis.instance().sadd(item, json.dumps(value)) + + def set_is_member(self, item, value): + item = slugify(item, self.prefix) + return Redis.instance().sismember(item, json.dumps(value)) + + def set_remove(self, item, value): + item = slugify(item, self.prefix) + return Redis.instance().srem(item, json.dumps(value)) + + def set_fetch(self, item): + item = slugify(item, self.prefix) + for obj in Redis.instance().sinter([item]): + yield json.loads(obj) + + +def Keeper(prefix=''): + KEEPERS = {'redis': RedisKeeper, + 'sqlite': SqliteKeeper} + return KEEPERS[FLAGS.keeper_backend](prefix) + diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py new file mode 100644 index 000000000000..dbf15d2592ce --- /dev/null +++ b/nova/endpoint/__init__.py @@ -0,0 +1,28 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova.endpoint` -- Main NOVA Api endpoints +===================================================== + +.. automodule:: nova.endpoint + :platform: Unix + :synopsis: REST APIs for all nova functions +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" \ No newline at end of file diff --git a/nova/endpoint/admin.py b/nova/endpoint/admin.py new file mode 100644 index 000000000000..e9880acc5e18 --- /dev/null +++ b/nova/endpoint/admin.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Admin API controller, exposed through http via the api worker. +""" + +import base64 + +def user_dict(user, base64_file=None): + """Convert the user object to a result dict""" + if user: + return { + 'username': user.id, + 'accesskey': user.access, + 'secretkey': user.secret, + 'file': base64_file, + } + else: + return {} + +def node_dict(node): + """Convert a node object to a result dict""" + if node: + return { + 'node_id': node.id, + 'workers': ", ".join(node.workers), + 'disks': ", ".join(node.disks), + 'ram': node.memory, + 'load_average' : node.load_average, + } + else: + return {} + +def admin_only(target): + """Decorator for admin-only API calls""" + def wrapper(*args, **kwargs): + """Internal wrapper method for admin-only API calls""" + context = args[1] + if context.user.is_admin(): + return target(*args, **kwargs) + else: + return {} + + return wrapper + +class AdminController(object): + """ + API Controller for users, node status, and worker mgmt. + Trivial admin_only wrapper will be replaced with RBAC, + allowing project managers to administer project users. + + """ + def __init__(self, user_manager, node_manager=None): + self.user_manager = user_manager + self.node_manager = node_manager + + def __str__(self): + return 'AdminController' + + @admin_only + def describe_user(self, _context, name, **_kwargs): + """Returns user data, including access and secret keys. + """ + return user_dict(self.user_manager.get_user(name)) + + @admin_only + def describe_users(self, _context, **_kwargs): + """Returns all users - should be changed to deal with a list. + """ + return {'userSet': + [user_dict(u) for u in self.user_manager.get_users()] } + + @admin_only + def register_user(self, _context, name, **_kwargs): + """ Creates a new user, and returns generated credentials. + """ + self.user_manager.create_user(name) + + return user_dict(self.user_manager.get_user(name)) + + @admin_only + def deregister_user(self, _context, name, **_kwargs): + """Deletes a single user (NOT undoable.) + Should throw an exception if the user has instances, + volumes, or buckets remaining. + """ + self.user_manager.delete_user(name) + + return True + + @admin_only + def generate_x509_for_user(self, _context, name, **_kwargs): + """Generates and returns an x509 certificate for a single user. + Is usually called from a client that will wrap this with + access and secret key info, and return a zip file. + """ + user = self.user_manager.get_user(name) + return user_dict(user, base64.b64encode(user.get_credentials())) + + @admin_only + def describe_nodes(self, _context, **_kwargs): + """Returns status info for all nodes. Includes: + * Disk Space + * Instance List + * RAM used + * CPU used + * DHCP servers running + * Iptables / bridges + """ + return {'nodeSet': + [node_dict(n) for n in self.node_manager.get_nodes()] } + + @admin_only + def describe_node(self, _context, name, **_kwargs): + """Returns status info for single node. + """ + return node_dict(self.node_manager.get_node(name)) + diff --git a/nova/endpoint/api.py b/nova/endpoint/api.py new file mode 100755 index 000000000000..5bbda3f56265 --- /dev/null +++ b/nova/endpoint/api.py @@ -0,0 +1,337 @@ +#!/usr/bin/python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tornado REST API Request Handlers for Nova functions +Most calls are proxied into the responsible controller. +""" + +import logging +import multiprocessing +import random +import re +import urllib +# TODO(termie): replace minidom with etree +from xml.dom import minidom + +from nova import vendor +import tornado.web +from twisted.internet import defer + +from nova import crypto +from nova import exception +from nova import flags +from nova import utils +from nova.endpoint import cloud +from nova.auth import users + +FLAGS = flags.FLAGS +flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') + + +_log = logging.getLogger("api") +_log.setLevel(logging.DEBUG) + + +_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') + + +def _camelcase_to_underscore(str): + return _c2u.sub(r'_\1', str).lower().strip('_') + + +def _underscore_to_camelcase(str): + return ''.join([x[:1].upper() + x[1:] for x in str.split('_')]) + + +def _underscore_to_xmlcase(str): + res = _underscore_to_camelcase(str) + return res[:1].lower() + res[1:] + + +class APIRequestContext(object): + def __init__(self, handler, user): + self.handler = handler + self.user = user + self.request_id = ''.join( + [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') + for x in xrange(20)] + ) + + +class APIRequest(object): + def __init__(self, handler, controller, action): + self.handler = handler + self.controller = controller + self.action = action + + def send(self, user, **kwargs): + context = APIRequestContext(self.handler, user) + + try: + method = getattr(self.controller, + _camelcase_to_underscore(self.action)) + except AttributeError: + _error = ('Unsupported API request: controller = %s,' + 'action = %s') % (self.controller, self.action) + _log.warning(_error) + # TODO: Raise custom exception, trap in apiserver, + # and reraise as 400 error. + raise Exception(_error) + + args = {} + for key, value in kwargs.items(): + parts = key.split(".") + key = _camelcase_to_underscore(parts[0]) + if len(parts) > 1: + d = args.get(key, {}) + d[parts[1]] = value[0] + value = d + else: + value = value[0] + args[key] = value + + for key in args.keys(): + if isinstance(args[key], dict): + if args[key] != {} and args[key].keys()[0].isdigit(): + s = args[key].items() + s.sort() + args[key] = [v for k, v in s] + + d = defer.maybeDeferred(method, context, **args) + d.addCallback(self._render_response, context.request_id) + return d + + def _render_response(self, response_data, request_id): + xml = minidom.Document() + + response_el = xml.createElement(self.action + 'Response') + response_el.setAttribute('xmlns', + 'http://ec2.amazonaws.com/doc/2009-11-30/') + request_id_el = xml.createElement('requestId') + request_id_el.appendChild(xml.createTextNode(request_id)) + response_el.appendChild(request_id_el) + if(response_data == True): + self._render_dict(xml, response_el, {'return': 'true'}) + else: + self._render_dict(xml, response_el, response_data) + + xml.appendChild(response_el) + + response = xml.toxml() + xml.unlink() + _log.debug(response) + return response + + def _render_dict(self, xml, el, data): + try: + for key in data.keys(): + val = data[key] + el.appendChild(self._render_data(xml, key, val)) + except: + _log.debug(data) + raise + + def _render_data(self, xml, el_name, data): + el_name = _underscore_to_xmlcase(el_name) + data_el = xml.createElement(el_name) + + if isinstance(data, list): + for item in data: + data_el.appendChild(self._render_data(xml, 'item', item)) + elif isinstance(data, dict): + self._render_dict(xml, data_el, data) + elif hasattr(data, '__dict__'): + self._render_dict(xml, data_el, data.__dict__) + elif isinstance(data, bool): + data_el.appendChild(xml.createTextNode(str(data).lower())) + elif data != None: + data_el.appendChild(xml.createTextNode(str(data))) + + return data_el + + +class RootRequestHandler(tornado.web.RequestHandler): + def get(self): + # available api versions + versions = [ + '1.0', + '2007-01-19', + '2007-03-01', + '2007-08-29', + '2007-10-10', + '2007-12-15', + '2008-02-01', + '2008-09-01', + '2009-04-04', + ] + for version in versions: + self.write('%s\n' % version) + self.finish() + + +class MetadataRequestHandler(tornado.web.RequestHandler): + def print_data(self, data): + if isinstance(data, dict): + output = '' + for key in data: + if key == '_name': + continue + output += key + if isinstance(data[key], dict): + if '_name' in data[key]: + output += '=' + str(data[key]['_name']) + else: + output += '/' + output += '\n' + self.write(output[:-1]) # cut off last \n + elif isinstance(data, list): + self.write('\n'.join(data)) + else: + self.write(str(data)) + + def lookup(self, path, data): + items = path.split('/') + for item in items: + if item: + if not isinstance(data, dict): + return data + if not item in data: + return None + data = data[item] + return data + + def get(self, path): + cc = self.application.controllers['Cloud'] + meta_data = cc.get_metadata(self.request.remote_ip) + if meta_data is None: + _log.error('Failed to get metadata for ip: %s' % + self.request.remote_ip) + raise tornado.web.HTTPError(404) + data = self.lookup(path, meta_data) + if data is None: + raise tornado.web.HTTPError(404) + self.print_data(data) + self.finish() + + +class APIRequestHandler(tornado.web.RequestHandler): + def get(self, controller_name): + self.execute(controller_name) + + @tornado.web.asynchronous + def execute(self, controller_name): + # Obtain the appropriate controller for this request. + try: + controller = self.application.controllers[controller_name] + except KeyError: + self._error('unhandled', 'no controller named %s' % controller_name) + return + + args = self.request.arguments + + # Read request signature. + try: + signature = args.pop('Signature')[0] + except: + raise tornado.web.HTTPError(400) + + # Make a copy of args for authentication and signature verification. + auth_params = {} + for key, value in args.items(): + auth_params[key] = value[0] + + # Get requested action and remove authentication args for final request. + try: + action = args.pop('Action')[0] + args.pop('AWSAccessKeyId') + args.pop('SignatureMethod') + args.pop('SignatureVersion') + args.pop('Version') + args.pop('Timestamp') + except: + raise tornado.web.HTTPError(400) + + # Authenticate the request. + user = self.application.user_manager.authenticate( + auth_params, + signature, + self.request.method, + self.request.host, + self.request.path + ) + + if not user: + raise tornado.web.HTTPError(403) + + _log.debug('action: %s' % action) + + for key, value in args.items(): + _log.debug('arg: %s\t\tval: %s' % (key, value)) + + request = APIRequest(self, controller, action) + d = request.send(user, **args) + # d.addCallback(utils.debug) + + # TODO: Wrap response in AWS XML format + d.addCallbacks(self._write_callback, self._error_callback) + + def _write_callback(self, data): + self.set_header('Content-Type', 'text/xml') + self.write(data) + self.finish() + + def _error_callback(self, failure): + try: + failure.raiseException() + except exception.ApiError as ex: + self._error(type(ex).__name__ + "." + ex.code, ex.message) + # TODO(vish): do something more useful with unknown exceptions + except Exception as ex: + self._error(type(ex).__name__, str(ex)) + raise + + def post(self, controller_name): + self.execute(controller_name) + + def _error(self, code, message): + self._status_code = 400 + self.set_header('Content-Type', 'text/xml') + self.write('\n') + self.write('%s' + '%s' + '?' % (code, message)) + self.finish() + + +class APIServerApplication(tornado.web.Application): + def __init__(self, user_manager, controllers): + tornado.web.Application.__init__(self, [ + (r'/', RootRequestHandler), + (r'/services/([A-Za-z0-9]+)/', APIRequestHandler), + (r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2008-09-01/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2008-02-01/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2007-12-15/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2007-10-10/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2007-08-29/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2007-03-01/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler), + (r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler), + ], pool=multiprocessing.Pool(4)) + self.user_manager = user_manager + self.controllers = controllers diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py new file mode 100644 index 000000000000..27dd81aa2990 --- /dev/null +++ b/nova/endpoint/cloud.py @@ -0,0 +1,572 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import json +import logging +import os +import time + +from nova import vendor +from twisted.internet import defer + +from nova import datastore +from nova import flags +from nova import rpc +from nova import utils +from nova import exception +from nova.auth import users +from nova.compute import model +from nova.compute import network +from nova.endpoint import images +from nova.volume import storage + +FLAGS = flags.FLAGS + +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + +def _gen_key(user_id, key_name): + """ Tuck this into UserManager """ + try: + manager = users.UserManager.instance() + private_key, fingerprint = manager.generate_key_pair(user_id, key_name) + except Exception as ex: + return {'exception': ex} + return {'private_key': private_key, 'fingerprint': fingerprint} + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self._instances = datastore.Keeper(FLAGS.instances_prefix) + self.instdir = model.InstanceDirectory() + self.network = network.NetworkController() + self.setup() + + @property + def instances(self): + """ All instances in the system, as dicts """ + for instance in self.instdir.all: + yield {instance['instance_id']: instance} + + @property + def volumes(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers("volumes"): + volume = storage.Volume(volume_id=volume_id) + yield volume + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(os.path.abspath(FLAGS.keys_path)) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + # TODO: Do this with M2Crypto instead + + def get_instance_by_ip(self, ip): + return self.instdir.by_ip(ip) + + def get_metadata(self, ip): + i = self.instdir.by_ip(ip) + if i is None: + return None + if i['key_name']: + keys = { + '0': { + '_name': i['key_name'], + 'openssh-key': i['key_data'] + } + } + else: + keys = '' + data = { + 'user-data': base64.b64decode(i['user_data']), + 'meta-data': { + 'ami-id': i['image_id'], + 'ami-launch-index': i['ami_launch_index'], + 'ami-manifest-path': 'FIXME', # image property + 'block-device-mapping': { # TODO: replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': i['private_dns_name'], # is this public sometimes? + 'instance-action': 'none', + 'instance-id': i['instance_id'], + 'instance-type': i.get('instance_type', ''), + 'local-hostname': i['private_dns_name'], + 'local-ipv4': i['private_dns_name'], # TODO: switch to IP + 'kernel-id': i.get('kernel_id', ''), + 'placement': { + 'availaibility-zone': i.get('availability_zone', 'nova'), + }, + 'public-hostname': i.get('dns_name', ''), + 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-keys' : keys, + 'ramdisk-id': i.get('ramdisk_id', ''), + 'reservation-id': i['reservation_id'], + 'security-groups': i.get('groups', '') + } + } + if False: # TODO: store ancestor ids + data['ancestor-ami-ids'] = [] + if i.get('product_codes', None): + data['product-codes'] = i['product_codes'] + return data + + + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = [] + key_names = key_name and key_name or [] + if len(key_names) > 0: + for key_name in key_names: + key_pair = context.user.get_key_pair(key_name) + if key_pair != None: + key_pairs.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + else: + for key_pair in context.user.get_key_pairs(): + key_pairs.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + + return { 'keypairsSet': key_pairs } + + def create_key_pair(self, context, key_name, **kwargs): + try: + d = defer.Deferred() + p = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + d.errback(kwargs['exception']) + return + d.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + p.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return d + + except users.UserError, e: + raise + + def delete_key_pair(self, context, key_name, **kwargs): + context.user.delete_key_pair(key_name) + # aws returns true even if the key doens't exist + return True + + def describe_security_groups(self, context, group_names, **kwargs): + groups = { 'securityGroupSet': [] } + + # Stubbed for now to unblock other things. + return groups + + def create_security_group(self, context, group_name, **kwargs): + return True + + def delete_security_group(self, context, group_name, **kwargs): + return True + + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + instance = self.instdir.get(instance_id[0]) + if instance['state'] == 'pending': + raise exception.ApiError('Cannot get output for pending instance') + if not context.user.is_authorized(instance.get('owner_id', None)): + raise exception.ApiError('Not authorized to view output') + return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "get_console_output", + "args" : {"instance_id": instance_id[0]}}) + + def _get_user_id(self, context): + if context and context.user: + return context.user.id + else: + return None + + def describe_volumes(self, context, **kwargs): + volumes = [] + for volume in self.volumes: + if context.user.is_authorized(volume.get('user_id', None)): + v = self.format_volume(context, volume) + volumes.append(v) + return defer.succeed({'volumeSet': volumes}) + + def format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['volume_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['create_time'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume.get('status', None), + volume.get('user_id', None), + volume.get('node_name', None), + volume.get('instance_id', ''), + volume.get('mountpoint', '')) + return v + + def create_volume(self, context, size, **kwargs): + # TODO(vish): refactor this to create the volume object here and tell storage to create it + res = rpc.call(FLAGS.storage_topic, {"method": "create_volume", + "args" : {"size": size, + "user_id": context.user.id}}) + def _format_result(result): + volume = self._get_volume(result['result']) + return {'volumeSet': [self.format_volume(context, volume)]} + res.addCallback(_format_result) + return res + + def _get_by_id(self, nodes, id): + if nodes == {}: + raise exception.NotFound("%s not found" % id) + for node_name, node in nodes.iteritems(): + if node.has_key(id): + return node_name, node[id] + raise exception.NotFound("%s not found" % id) + + def _get_volume(self, volume_id): + for volume in self.volumes: + if volume['volume_id'] == volume_id: + return volume + + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume = self._get_volume(volume_id) + storage_node = volume['node_name'] + # TODO: (joshua) Fix volumes to store creator id + if not context.user.is_authorized(volume.get('user_id', None)): + raise exception.ApiError("%s not authorized for %s" % + (context.user.id, volume_id)) + instance = self.instdir.get(instance_id) + compute_node = instance['node_name'] + if not context.user.is_authorized(instance.get('owner_id', None)): + raise exception.ApiError(message="%s not authorized for %s" % + (context.user.id, instance_id)) + aoe_device = volume['aoe_device'] + # Needs to get right node controller for attaching to + # TODO: Maybe have another exchange that goes to everyone? + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "attach_volume", + "args" : {"aoe_device": aoe_device, + "instance_id" : instance_id, + "mountpoint" : device}}) + rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node), + {"method": "attach_volume", + "args" : {"volume_id": volume_id, + "instance_id" : instance_id, + "mountpoint" : device}}) + return defer.succeed(True) + + def detach_volume(self, context, volume_id, **kwargs): + # TODO(joshua): Make sure the updated state has been received first + volume = self._get_volume(volume_id) + storage_node = volume['node_name'] + if not context.user.is_authorized(volume.get('user_id', None)): + raise exception.ApiError("%s not authorized for %s" % + (context.user.id, volume_id)) + if 'instance_id' in volume.keys(): + instance_id = volume['instance_id'] + try: + instance = self.instdir.get(instance_id) + compute_node = instance['node_name'] + mountpoint = volume['mountpoint'] + if not context.user.is_authorized( + instance.get('owner_id', None)): + raise exception.ApiError( + "%s not authorized for %s" % + (context.user.id, instance_id)) + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "detach_volume", + "args" : {"instance_id": instance_id, + "mountpoint": mountpoint}}) + except exception.NotFound: + pass + rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node), + {"method": "detach_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + def _convert_to_set(self, lst, str): + if lst == None or lst == []: + return None + return [{str: x} for x in lst] + + def describe_instances(self, context, **kwargs): + return defer.succeed(self.format_instances(context.user)) + + def format_instances(self, user, reservation_id = None): + if self.instances == {}: + return {'reservationSet': []} + reservations = {} + for inst in self.instances: + instance = inst.values()[0] + res_id = instance.get('reservation_id', 'Unknown') + if (user.is_authorized(instance.get('owner_id', None)) + and (reservation_id == None or reservation_id == res_id)): + i = {} + i['instance_id'] = instance.get('instance_id', None) + i['image_id'] = instance.get('image_id', None) + i['instance_state'] = { + 'code': 42, + 'name': instance.get('state', 'pending') + } + i['public_dns_name'] = self.network.get_public_ip_for_instance( + i['instance_id']) + i['private_dns_name'] = instance.get('private_dns_name', None) + if not i['public_dns_name']: + i['public_dns_name'] = i['private_dns_name'] + i['dns_name'] = instance.get('dns_name', None) + i['key_name'] = instance.get('key_name', None) + if user.is_admin(): + i['key_name'] = '%s (%s, %s)' % (i['key_name'], + instance.get('owner_id', None), instance.get('node_name','')) + i['product_codes_set'] = self._convert_to_set( + instance.get('product_codes', None), 'product_code') + i['instance_type'] = instance.get('instance_type', None) + i['launch_time'] = instance.get('launch_time', None) + i['ami_launch_index'] = instance.get('ami_launch_index', + None) + if not reservations.has_key(res_id): + r = {} + r['reservation_id'] = res_id + r['owner_id'] = instance.get('owner_id', None) + r['group_set'] = self._convert_to_set( + instance.get('groups', None), 'group_id') + r['instances_set'] = [] + reservations[res_id] = r + reservations[res_id]['instances_set'].append(i) + + instance_response = {'reservationSet' : list(reservations.values()) } + return instance_response + + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context.user) + + def format_addresses(self, user): + addresses = [] + # TODO(vish): move authorization checking into network.py + for address_record in self.network.describe_addresses( + type=network.PublicNetwork): + #logging.debug(address_record) + if user.is_authorized(address_record[u'user_id']): + address = { + 'public_ip': address_record[u'address'], + 'instance_id' : address_record.get(u'instance_id', 'free') + } + # FIXME: add another field for user id + if user.is_admin(): + address['instance_id'] = "%s (%s)" % ( + address['instance_id'], + address_record[u'user_id'], + ) + addresses.append(address) + # logging.debug(addresses) + return {'addressesSet': addresses} + + def allocate_address(self, context, **kwargs): + # TODO: Verify user is valid? + kwargs['owner_id'] = context.user.id + (address,network_name) = self.network.allocate_address( + context.user.id, type=network.PublicNetwork) + return defer.succeed({'addressSet': [{'publicIp' : address}]}) + + def release_address(self, context, **kwargs): + self.network.deallocate_address(kwargs.get('public_ip', None)) + return defer.succeed({'releaseResponse': ["Address released."]}) + + def associate_address(self, context, instance_id, **kwargs): + instance = self.instdir.get(instance_id) + rv = self.network.associate_address( + kwargs['public_ip'], + instance['private_dns_name'], + instance_id) + return defer.succeed({'associateResponse': ["Address associated."]}) + + def disassociate_address(self, context, **kwargs): + rv = self.network.disassociate_address(kwargs['public_ip']) + # TODO - Strip the IP from the instance + return rv + + def run_instances(self, context, **kwargs): + logging.debug("Going to run instances...") + reservation_id = utils.generate_uid('r') + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair = context.user.get_key_pair(kwargs['key_name']) + if not key_pair: + raise exception.ApiError('Key Pair %s not found' % + kwargs['key_name']) + key_data = key_pair.public_key + + for num in range(int(kwargs['max_count'])): + inst = self.instdir.new() + # TODO(ja): add ari, aki + inst['image_id'] = kwargs['image_id'] + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', '') + inst['reservation_id'] = reservation_id + inst['launch_time'] = launch_time + inst['key_data'] = key_data or '' + inst['key_name'] = kwargs.get('key_name', '') + inst['owner_id'] = context.user.id + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = num + address, _netname = self.network.allocate_address( + inst['owner_id'], mac=inst['mac_address']) + network = self.network.get_users_network(str(context.user.id)) + inst['network_str'] = json.dumps(network.to_dict()) + inst['bridge_name'] = network.bridge_name + inst['private_dns_name'] = str(address) + # TODO: allocate expresses on the router node + inst.save() + rpc.cast(FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id" : inst.instance_id}}) + logging.debug("Casting to node for %s's instance with IP of %s" % + (context.user.name, inst['private_dns_name'])) + # TODO: Make the NetworkComputeNode figure out the network name from ip. + return defer.succeed(self.format_instances( + context.user, reservation_id)) + + def terminate_instances(self, context, instance_id, **kwargs): + logging.debug("Going to start terminating instances") + # TODO: return error if not authorized + for i in instance_id: + logging.debug("Going to try and terminate %s" % i) + instance = self.instdir.get(i) + #if instance['state'] == 'pending': + # raise exception.ApiError('Cannot terminate pending instance') + if context.user.is_authorized(instance.get('owner_id', None)): + try: + self.network.disassociate_address( + instance.get('public_dns_name', 'bork')) + except: + pass + if instance.get('private_dns_name', None): + logging.debug("Deallocating address %s" % instance.get('private_dns_name', None)) + try: + self.network.deallocate_address(instance.get('private_dns_name', None)) + except Exception, _err: + pass + if instance.get('node_name', 'unassigned') != 'unassigned': #It's also internal default + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "terminate_instance", + "args" : {"instance_id": i}}) + else: + instance.destroy() + return defer.succeed(True) + + def reboot_instances(self, context, instance_id, **kwargs): + # TODO: return error if not authorized + for i in instance_id: + instance = self.instdir.get(i) + if instance['state'] == 'pending': + raise exception.ApiError('Cannot reboot pending instance') + if context.user.is_authorized(instance.get('owner_id', None)): + rpc.cast('%s.%s' % (FLAGS.node_topic, instance['node_name']), + {"method": "reboot_instance", + "args" : {"instance_id": i}}) + return defer.succeed(True) + + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume = self._get_volume(volume_id) + storage_node = volume['node_name'] + if context.user.is_authorized(volume.get('user_id', None)): + rpc.cast('%s.%s' % (FLAGS.storage_topic, storage_node), + {"method": "delete_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + def describe_images(self, context, image_id=None, **kwargs): + imageSet = images.list(context.user) + if not image_id is None: + imageSet = [i for i in imageSet if i['imageId'] in image_id] + + return defer.succeed({'imagesSet': imageSet}) + + def deregister_image(self, context, image_id, **kwargs): + images.deregister(context.user, image_id) + + return defer.succeed({'imageId': image_id}) + + def register_image(self, context, image_location=None, **kwargs): + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + + image_id = images.register(context.user, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + + return defer.succeed({'imageId': image_id}) + + def modify_image_attribute(self, context, image_id, + attribute, operation_type, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('only launchPermission is supported') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'delete']: + raise exception.ApiError('operation_type must be add or delete') + result = images.modify(context.user, image_id, operation_type) + return defer.succeed(result) + + def update_state(self, topic, value): + """ accepts status reports from the queue and consolidates them """ + # TODO(jmc): if an instance has disappeared from + # the node, call instance_death + if topic == "instances": + return defer.succeed(True) + aggregate_state = getattr(self, topic) + node_name = value.keys()[0] + items = value[node_name] + + logging.debug("Updating %s state for %s" % (topic, node_name)) + + for item_id in items.keys(): + if (aggregate_state.has_key('pending') and + aggregate_state['pending'].has_key(item_id)): + del aggregate_state['pending'][item_id] + aggregate_state[node_name] = items + + return defer.succeed(True) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py new file mode 100644 index 000000000000..f494ce8926d2 --- /dev/null +++ b/nova/endpoint/images.py @@ -0,0 +1,92 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore daemon. +""" + +import json +import random +import urllib + +from nova import vendor +import boto +import boto.s3 + +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS + + +def modify(user, image_id, operation): + conn(user).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + + +def register(user, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(user).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + + +def list(user, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + # FIXME: send along the list of only_images to check for + response = conn(user).make_request( + method='GET', + bucket='_images') + + return json.loads(response.read()) + + +def deregister(user, image_id): + """ unregister an image """ + conn(user).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + + +def conn(user): + return boto.s3.connection.S3Connection ( + aws_access_key_id=user.access, + aws_secret_access_key=user.secret, + is_secure=False, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/exception.py b/nova/exception.py new file mode 100644 index 000000000000..dc7b16cdbb6e --- /dev/null +++ b/nova/exception.py @@ -0,0 +1,53 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Nova base exception handling, including decorator for re-raising +Nova-type exceptions. SHOULD include dedicated exception logging. +""" + +import logging +import traceback +import sys + +class Error(Exception): + pass + +class ApiError(Error): + def __init__(self, message='Unknown', code='Unknown'): + self.message = message + self.code = code + +class NotFound(Error): + pass + +class NotAuthorized(Error): + pass + +def wrap_exception(f): + def _wrap(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if not isinstance(e, Error): + # exc_type, exc_value, exc_traceback = sys.exc_info() + logging.exception('Uncaught exception') + # logging.debug(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + _wrap.func_name = f.func_name + return _wrap + + diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py new file mode 100644 index 000000000000..ec2e50791f57 --- /dev/null +++ b/nova/fakerabbit.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Based a bit on the carrot.backeds.queue backend... but a lot better """ + +import logging +import Queue as queue + +from carrot.backends import base + + +class Message(base.BaseMessage): + pass + + +class Exchange(object): + def __init__(self, name, exchange_type): + self.name = name + self.exchange_type = exchange_type + self._queue = queue.Queue() + self._routes = {} + + def publish(self, message, routing_key=None): + logging.debug('(%s) publish (key: %s) %s', + self.name, routing_key, message) + if routing_key in self._routes: + for f in self._routes[routing_key]: + logging.debug('Publishing to route %s', f) + f(message, routing_key=routing_key) + + def bind(self, callback, routing_key): + self._routes.setdefault(routing_key, []) + self._routes[routing_key].append(callback) + + +class Queue(object): + def __init__(self, name): + self.name = name + self._queue = queue.Queue() + + def __repr__(self): + return '' % self.name + + def push(self, message, routing_key=None): + self._queue.put(message) + + def size(self): + return self._queue.qsize() + + def pop(self): + return self._queue.get() + + +class Backend(object): + """ Singleton backend for testing """ + class __impl(base.BaseBackend): + def __init__(self, *args, **kwargs): + #super(__impl, self).__init__(*args, **kwargs) + self._exchanges = {} + self._queues = {} + + def _reset_all(self): + self._exchanges = {} + self._queues = {} + + def queue_declare(self, queue, **kwargs): + if queue not in self._queues: + logging.debug('Declaring queue %s', queue) + self._queues[queue] = Queue(queue) + + def exchange_declare(self, exchange, type, *args, **kwargs): + if exchange not in self._exchanges: + logging.debug('Declaring exchange %s', exchange) + self._exchanges[exchange] = Exchange(exchange, type) + + def queue_bind(self, queue, exchange, routing_key, **kwargs): + logging.debug('Binding %s to %s with key %s', + queue, exchange, routing_key) + self._exchanges[exchange].bind(self._queues[queue].push, + routing_key) + + def get(self, queue, no_ack=False): + if not self._queues[queue].size(): + return None + (message_data, content_type, content_encoding) = \ + self._queues[queue].pop() + message = Message(backend=self, body=message_data, + content_type=content_type, + content_encoding=content_encoding) + logging.debug('Getting from %s: %s', queue, message) + return message + + def prepare_message(self, message_data, delivery_mode, + content_type, content_encoding, **kwargs): + """Prepare message for sending.""" + return (message_data, content_type, content_encoding) + + def publish(self, message, exchange, routing_key, **kwargs): + if exchange in self._exchanges: + self._exchanges[exchange].publish( + message, routing_key=routing_key) + + + __instance = None + + def __init__(self, *args, **kwargs): + if Backend.__instance is None: + Backend.__instance = Backend.__impl(*args, **kwargs) + self.__dict__['_Backend__instance'] = Backend.__instance + + def __getattr__(self, attr): + return getattr(self.__instance, attr) + + def __setattr__(self, attr, value): + return setattr(self.__instance, attr, value) + + +def reset_all(): + Backend()._reset_all() diff --git a/nova/fakevirt.py b/nova/fakevirt.py new file mode 100644 index 000000000000..2b918d388a8d --- /dev/null +++ b/nova/fakevirt.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +A fake (in-memory) hypervisor+api. Allows nova testing w/o KVM and libvirt. +""" + +import StringIO +from xml.etree import ElementTree + + +class FakeVirtConnection(object): + # FIXME: networkCreateXML, listNetworks don't do anything since + # they aren't exercised in tests yet + + def __init__(self): + self.next_index = 0 + self.instances = {} + + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + cls._instance = cls() + return cls._instance + + def lookupByID(self, i): + return self.instances[str(i)] + + def listDomainsID(self): + return self.instances.keys() + + def listNetworks(self): + return [] + + def lookupByName(self, instance_id): + for x in self.instances.values(): + if x.name() == instance_id: + return x + raise Exception('no instance found for instance_id: %s' % instance_id) + + def networkCreateXML(self, xml): + pass + + def createXML(self, xml, flags): + # parse the xml :( + xml_stringio = StringIO.StringIO(xml) + + my_xml = ElementTree.parse(xml_stringio) + name = my_xml.find('name').text + + fake_instance = FakeVirtInstance(conn=self, + index=str(self.next_index), + name=name, + xml=my_xml) + self.instances[str(self.next_index)] = fake_instance + self.next_index += 1 + + def _removeInstance(self, i): + self.instances.pop(str(i)) + + +class FakeVirtInstance(object): + NOSTATE = 0x00 + RUNNING = 0x01 + BLOCKED = 0x02 + PAUSED = 0x03 + SHUTDOWN = 0x04 + SHUTOFF = 0x05 + CRASHED = 0x06 + + def __init__(self, conn, index, name, xml): + self._conn = conn + self._destroyed = False + self._name = name + self._index = index + self._state = self.RUNNING + + def name(self): + return self._name + + def destroy(self): + if self._state == self.SHUTOFF: + raise Exception('instance already destroyed: %s' % self.name()) + self._state = self.SHUTDOWN + self._conn._removeInstance(self._index) + + def info(self): + return [self._state, 0, 2, 0, 0] + + def XMLDesc(self, flags): + return open('fakevirtinstance.xml', 'r').read() + + def blockStats(self, disk): + return [0L, 0L, 0L, 0L, null] + + def interfaceStats(self, iface): + return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/flags.py b/nova/flags.py new file mode 100644 index 000000000000..7818e1b14c19 --- /dev/null +++ b/nova/flags.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Package-level global flags are defined here, the rest are defined +where they're used. +""" + +import socket + +from nova import vendor +from gflags import * + +# This keeps pylint from barfing on the imports +FLAGS = FLAGS +DEFINE_string = DEFINE_string +DEFINE_integer = DEFINE_integer +DEFINE_bool = DEFINE_bool + +# __GLOBAL FLAGS ONLY__ +# Define any app-specific flags in their own files, docs at: +# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 + +DEFINE_integer('s3_port', 3333, 's3 port') +DEFINE_integer('s3_internal_port', 3334, 's3 port') +DEFINE_string('s3_host', '127.0.0.1', 's3 host') +#DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') +DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') +DEFINE_string('storage_topic', 'storage', 'the topic storage nodes listen on') +DEFINE_bool('fake_libvirt', False, + 'whether to use a fake libvirt or not') +DEFINE_bool('verbose', False, 'show debug output') +DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') +DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') +DEFINE_bool('fake_users', False, 'use fake users') +DEFINE_string('rabbit_host', 'localhost', 'rabbit host') +DEFINE_integer('rabbit_port', 5672, 'rabbit port') +DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') +DEFINE_string('rabbit_password', 'guest', 'rabbit password') +DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') +DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') +DEFINE_string('ec2_url', + 'http://127.0.0.1:8773/services/Cloud', + 'Url to ec2 api server') + +DEFINE_string('default_image', + 'ami-11111', + 'default image to use, testing only') +DEFINE_string('default_kernel', + 'aki-11111', + 'default kernel to use, testing only') +DEFINE_string('default_ramdisk', + 'ari-11111', + 'default ramdisk to use, testing only') +DEFINE_string('default_instance_type', + 'm1.small', + 'default instance type to use, testing only') + +# UNUSED +DEFINE_string('node_availability_zone', + 'nova', + 'availability zone of this node') +DEFINE_string('node_name', + socket.gethostname(), + 'name of this node') + diff --git a/nova/objectstore/__init__.py b/nova/objectstore/__init__.py new file mode 100644 index 000000000000..c6c09e53e783 --- /dev/null +++ b/nova/objectstore/__init__.py @@ -0,0 +1,28 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova.objectstore` -- S3-type object store +===================================================== + +.. automodule:: nova.objectstore + :platform: Unix + :synopsis: Currently a trivial file-based system, getting extended w/ mongo. +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" \ No newline at end of file diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py new file mode 100644 index 000000000000..0777c2f11c77 --- /dev/null +++ b/nova/objectstore/bucket.py @@ -0,0 +1,174 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Simple object store using Blobs and JSON files on disk. +""" + +import datetime +import glob +import json +import os +import bisect + +from nova import exception +from nova import flags +from nova import utils +from nova.objectstore import stored + + +FLAGS = flags.FLAGS +flags.DEFINE_string('buckets_path', utils.abspath('../buckets'), + 'path to s3 buckets') + + +class Bucket(object): + def __init__(self, name): + self.name = name + self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name)) + if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ + not os.path.isdir(self.path): + raise exception.NotFound() + + self.ctime = os.path.getctime(self.path) + + def __repr__(self): + return "" % self.name + + @staticmethod + def all(): + """ list of all buckets """ + buckets = [] + for fn in glob.glob("%s/*.json" % FLAGS.buckets_path): + try: + json.load(open(fn)) + name = os.path.split(fn)[-1][:-5] + buckets.append(Bucket(name)) + except: + pass + + return buckets + + @staticmethod + def create(bucket_name, user): + """Create a new bucket owned by a user. + + @bucket_name: a string representing the name of the bucket to create + @user: a nova.auth.user who should own the bucket. + + Raises: + NotAuthorized: if the bucket is already exists or has invalid name + """ + path = os.path.abspath(os.path.join( + FLAGS.buckets_path, bucket_name)) + if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ + os.path.exists(path): + raise exception.NotAuthorized() + + os.makedirs(path) + + with open(path+'.json', 'w') as f: + json.dump({'ownerId': user.id}, f) + + @property + def metadata(self): + """ dictionary of metadata around bucket, + keys are 'Name' and 'CreationDate' + """ + + return { + "Name": self.name, + "CreationDate": datetime.datetime.utcfromtimestamp(self.ctime), + } + + @property + def owner_id(self): + try: + with open(self.path+'.json') as f: + return json.load(f)['ownerId'] + except: + return None + + def is_authorized(self, user): + try: + return user.is_admin() or self.owner_id == user.id + except Exception, e: + pass + + def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): + object_names = [] + for root, dirs, files in os.walk(self.path): + for file_name in files: + object_names.append(os.path.join(root, file_name)[len(self.path)+1:]) + object_names.sort() + contents = [] + + start_pos = 0 + if marker: + start_pos = bisect.bisect_right(object_names, marker, start_pos) + if prefix: + start_pos = bisect.bisect_left(object_names, prefix, start_pos) + + truncated = False + for object_name in object_names[start_pos:]: + if not object_name.startswith(prefix): + break + if len(contents) >= max_keys: + truncated = True + break + object_path = self._object_path(object_name) + c = {"Key": object_name} + if not terse: + info = os.stat(object_path) + c.update({ + "LastModified": datetime.datetime.utcfromtimestamp( + info.st_mtime), + "Size": info.st_size, + }) + contents.append(c) + marker = object_name + + return { + "Name": self.name, + "Prefix": prefix, + "Marker": marker, + "MaxKeys": max_keys, + "IsTruncated": truncated, + "Contents": contents, + } + + def _object_path(self, object_name): + fn = os.path.join(self.path, object_name) + + if not fn.startswith(self.path): + raise exception.NotAuthorized() + + return fn + + def delete(self): + if len(os.listdir(self.path)) > 0: + raise exception.NotAuthorized() + os.rmdir(self.path) + os.remove(self.path+'.json') + + def __getitem__(self, key): + return stored.Object(self, key) + + def __setitem__(self, key, value): + with open(self._object_path(key), 'wb') as f: + f.write(value) + + def __delitem__(self, key): + stored.Object(self, key).delete() diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py new file mode 100644 index 000000000000..c3e036a40307 --- /dev/null +++ b/nova/objectstore/handler.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Implementation of an S3-like storage server based on local files. + +Useful to test features that will eventually run on S3, or if you want to +run something locally that was once running on S3. + +We don't support all the features of S3, but it does work with the +standard S3 client for the most basic semantics. To use the standard +S3 client with this module:: + + c = S3.AWSAuthConnection("", "", server="localhost", port=8888, + is_secure=False) + c.create_bucket("mybucket") + c.put("mybucket", "mykey", "a value") + print c.get("mybucket", "mykey").body + +""" +import datetime +import os +import urllib +import json +import logging +import multiprocessing + + +from nova import vendor +from tornado import escape, web + +from nova import exception +from nova import flags +from nova.objectstore import bucket +from nova.objectstore import image + + +FLAGS = flags.FLAGS + + +def catch_nova_exceptions(target): + # FIXME: find a way to wrap all handlers in the web.Application.__init__ ? + def wrapper(*args, **kwargs): + try: + return target(*args, **kwargs) + except exception.NotFound: + raise web.HTTPError(404) + except exception.NotAuthorized: + raise web.HTTPError(403) + + return wrapper + + +class Application(web.Application): + """Implementation of an S3-like storage server based on local files.""" + def __init__(self, user_manager): + web.Application.__init__(self, [ + (r"/", RootHandler), + (r"/_images/", ImageHandler), + (r"/([^/]+)/(.+)", ObjectHandler), + (r"/([^/]+)/", BucketHandler), + ]) + self.buckets_path = os.path.abspath(FLAGS.buckets_path) + self.images_path = os.path.abspath(FLAGS.images_path) + + if not os.path.exists(self.buckets_path): + raise Exception("buckets_path does not exist") + if not os.path.exists(self.images_path): + raise Exception("images_path does not exist") + self.user_manager = user_manager + + +class BaseRequestHandler(web.RequestHandler): + SUPPORTED_METHODS = ("PUT", "GET", "DELETE", "HEAD") + + @property + def user(self): + if not hasattr(self, '_user'): + try: + access = self.request.headers['Authorization'].split(' ')[1].split(':')[0] + user = self.application.user_manager.get_user_from_access_key(access) + user.secret # FIXME: check signature here! + self._user = user + except: + raise web.HTTPError(403) + return self._user + + def render_xml(self, value): + assert isinstance(value, dict) and len(value) == 1 + self.set_header("Content-Type", "application/xml; charset=UTF-8") + name = value.keys()[0] + parts = [] + parts.append('<' + escape.utf8(name) + + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') + self._render_parts(value.values()[0], parts) + parts.append('') + self.finish('\n' + + ''.join(parts)) + + def _render_parts(self, value, parts=[]): + if isinstance(value, basestring): + parts.append(escape.xhtml_escape(value)) + elif isinstance(value, int) or isinstance(value, long): + parts.append(str(value)) + elif isinstance(value, datetime.datetime): + parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) + elif isinstance(value, dict): + for name, subvalue in value.iteritems(): + if not isinstance(subvalue, list): + subvalue = [subvalue] + for subsubvalue in subvalue: + parts.append('<' + escape.utf8(name) + '>') + self._render_parts(subsubvalue, parts) + parts.append('') + else: + raise Exception("Unknown S3 value type %r", value) + + def head(self, *args, **kwargs): + return self.get(*args, **kwargs) + + +class RootHandler(BaseRequestHandler): + def get(self): + buckets = [b for b in bucket.Bucket.all() if b.is_authorized(self.user)] + + self.render_xml({"ListAllMyBucketsResult": { + "Buckets": {"Bucket": [b.metadata for b in buckets]}, + }}) + + +class BucketHandler(BaseRequestHandler): + @catch_nova_exceptions + def get(self, bucket_name): + logging.debug("List keys for bucket %s" % (bucket_name)) + + bucket_object = bucket.Bucket(bucket_name) + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + prefix = self.get_argument("prefix", u"") + marker = self.get_argument("marker", u"") + max_keys = int(self.get_argument("max-keys", 1000)) + terse = int(self.get_argument("terse", 0)) + + results = bucket_object.list_keys(prefix=prefix, marker=marker, max_keys=max_keys, terse=terse) + self.render_xml({"ListBucketResult": results}) + + @catch_nova_exceptions + def put(self, bucket_name): + logging.debug("Creating bucket %s" % (bucket_name)) + bucket.Bucket.create(bucket_name, self.user) + self.finish() + + @catch_nova_exceptions + def delete(self, bucket_name): + logging.debug("Deleting bucket %s" % (bucket_name)) + bucket_object = bucket.Bucket(bucket_name) + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + bucket_object.delete() + self.set_status(204) + self.finish() + + +class ObjectHandler(BaseRequestHandler): + @catch_nova_exceptions + def get(self, bucket_name, object_name): + logging.debug("Getting object: %s / %s" % (bucket_name, object_name)) + + bucket_object = bucket.Bucket(bucket_name) + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + obj = bucket_object[urllib.unquote(object_name)] + self.set_header("Content-Type", "application/unknown") + self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(obj.mtime)) + self.set_header("Etag", '"' + obj.md5 + '"') + self.finish(obj.read()) + + @catch_nova_exceptions + def put(self, bucket_name, object_name): + logging.debug("Putting object: %s / %s" % (bucket_name, object_name)) + bucket_object = bucket.Bucket(bucket_name) + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + key = urllib.unquote(object_name) + bucket_object[key] = self.request.body + self.set_header("Etag", '"' + bucket_object[key].md5 + '"') + self.finish() + + @catch_nova_exceptions + def delete(self, bucket_name, object_name): + logging.debug("Deleting object: %s / %s" % (bucket_name, object_name)) + bucket_object = bucket.Bucket(bucket_name) + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + del bucket_object[urllib.unquote(object_name)] + self.set_status(204) + self.finish() + + +class ImageHandler(BaseRequestHandler): + SUPPORTED_METHODS = ("POST", "PUT", "GET", "DELETE") + + @catch_nova_exceptions + def get(self): + """ returns a json listing of all images + that a user has permissions to see """ + + images = [i for i in image.Image.all() if i.is_authorized(self.user)] + + self.finish(json.dumps([i.metadata for i in images])) + + @catch_nova_exceptions + def put(self): + """ create a new registered image """ + + image_id = self.get_argument('image_id', u'') + image_location = self.get_argument('image_location', u'') + + image_path = os.path.join(FLAGS.images_path, image_id) + if not image_path.startswith(FLAGS.images_path) or \ + os.path.exists(image_path): + raise web.HTTPError(403) + + bucket_object = bucket.Bucket(image_location.split("/")[0]) + manifest = image_location[len(image_location.split('/')[0])+1:] + + if not bucket_object.is_authorized(self.user): + raise web.HTTPError(403) + + p = multiprocessing.Process(target=image.Image.create,args= + (image_id, image_location, self.user)) + p.start() + self.finish() + + @catch_nova_exceptions + def post(self): + """ update image attributes: public/private """ + + image_id = self.get_argument('image_id', u'') + operation = self.get_argument('operation', u'') + + image_object = image.Image(image_id) + + if image_object.owner_id != self.user.id: + raise web.HTTPError(403) + + image_object.set_public(operation=='add') + + self.finish() + + @catch_nova_exceptions + def delete(self): + """ delete a registered image """ + image_id = self.get_argument("image_id", u"") + image_object = image.Image(image_id) + + if image_object.owner_id != self.user.id: + raise web.HTTPError(403) + + image_object.delete() + + self.set_status(204) diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py new file mode 100644 index 000000000000..1878487f7c07 --- /dev/null +++ b/nova/objectstore/image.py @@ -0,0 +1,177 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Take uploaded bucket contents and register them as disk images (AMIs). +Requires decryption using keys in the manifest. +""" + +# TODO(jesse): Got these from Euca2ools, will need to revisit them + +import binascii +import glob +import json +import os +import shutil +import tarfile +import tempfile +from xml.etree import ElementTree + +from nova import exception +from nova import flags +from nova import utils +from nova.objectstore import bucket + + +FLAGS = flags.FLAGS +flags.DEFINE_string('images_path', utils.abspath('../images'), + 'path to decrypted images') + +class Image(object): + def __init__(self, image_id): + self.image_id = image_id + self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id)) + if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \ + not os.path.isdir(self.path): + raise exception.NotFound + + def delete(self): + for fn in ['info.json', 'image']: + try: + os.unlink(os.path.join(self.path, fn)) + except: + pass + try: + os.rmdir(self.path) + except: + pass + + def is_authorized(self, user): + try: + return self.metadata['isPublic'] or self.metadata['imageOwnerId'] == user.id + except: + return False + + def set_public(self, state): + md = self.metadata + md['isPublic'] = state + with open(os.path.join(self.path, 'info.json'), 'w') as f: + json.dump(md, f) + + @staticmethod + def all(): + images = [] + for fn in glob.glob("%s/*/info.json" % FLAGS.images_path): + try: + image_id = fn.split('/')[-2] + images.append(Image(image_id)) + except: + pass + return images + + @property + def owner_id(self): + return self.metadata['imageOwnerId'] + + @property + def metadata(self): + with open(os.path.join(self.path, 'info.json')) as f: + return json.load(f) + + @staticmethod + def create(image_id, image_location, user): + image_path = os.path.join(FLAGS.images_path, image_id) + os.makedirs(image_path) + + bucket_name = image_location.split("/")[0] + manifest_path = image_location[len(bucket_name)+1:] + bucket_object = bucket.Bucket(bucket_name) + + manifest = ElementTree.fromstring(bucket_object[manifest_path].read()) + image_type = 'machine' + + try: + kernel_id = manifest.find("machine_configuration/kernel_id").text + if kernel_id == 'true': + image_type = 'kernel' + except: + pass + + try: + ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text + if ramdisk_id == 'true': + image_type = 'ramdisk' + except: + pass + + info = { + 'imageId': image_id, + 'imageLocation': image_location, + 'imageOwnerId': user.id, + 'isPublic': False, # FIXME: grab public from manifest + 'architecture': 'x86_64', # FIXME: grab architecture from manifest + 'type' : image_type + } + + def write_state(state): + info['imageState'] = state + with open(os.path.join(image_path, 'info.json'), "w") as f: + json.dump(info, f) + + write_state('pending') + + encrypted_filename = os.path.join(image_path, 'image.encrypted') + with open(encrypted_filename, 'w') as f: + for filename in manifest.find("image").getiterator("filename"): + shutil.copyfileobj(bucket_object[filename.text].file, f) + + write_state('decrypting') + + # FIXME: grab kernelId and ramdiskId from bundle manifest + encrypted_key = binascii.a2b_hex(manifest.find("image/ec2_encrypted_key").text) + encrypted_iv = binascii.a2b_hex(manifest.find("image/ec2_encrypted_iv").text) + cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem") + + decrypted_filename = os.path.join(image_path, 'image.tar.gz') + Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename) + + write_state('untarring') + + image_file = Image.untarzip_image(image_path, decrypted_filename) + shutil.move(os.path.join(image_path, image_file), os.path.join(image_path, 'image')) + + write_state('available') + os.unlink(decrypted_filename) + os.unlink(encrypted_filename) + + @staticmethod + def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, cloud_private_key, decrypted_filename): + key, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_key) + if err: + raise exception.Error("Failed to decrypt private key: %s" % err) + iv, err = utils.execute('openssl rsautl -decrypt -inkey %s' % cloud_private_key, encrypted_iv) + if err: + raise exception.Error("Failed to decrypt initialization vector: %s" % err) + out, err = utils.execute('openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s' % (encrypted_filename, key, iv, decrypted_filename)) + if err: + raise exception.Error("Failed to decrypt image file %s : %s" % (encrypted_filename, err)) + + @staticmethod + def untarzip_image(path, filename): + tar_file = tarfile.open(filename, "r|gz") + tar_file.extractall(path) + image_file = tar_file.getnames()[0] + tar_file.close() + return image_file diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py new file mode 100644 index 000000000000..05a7a1102c21 --- /dev/null +++ b/nova/objectstore/stored.py @@ -0,0 +1,58 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Properties of an object stored within a bucket. +""" + +from nova.exception import NotFound, NotAuthorized + +import os +import nova.crypto + +class Object(object): + def __init__(self, bucket, key): + """ wrapper class of an existing key """ + self.bucket = bucket + self.key = key + self.path = bucket._object_path(key) + if not os.path.isfile(self.path): + raise NotFound + + def __repr__(self): + return "" % (self.bucket, self.key) + + @property + def md5(self): + """ computes the MD5 of the contents of file """ + with open(self.path, "r") as f: + return nova.crypto.compute_md5(f) + + @property + def mtime(self): + """ mtime of file """ + return os.path.getmtime(self.path) + + def read(self): + """ read all contents of key into memory and return """ + return self.file.read() + + @property + def file(self): + """ return a file object for the key """ + return open(self.path, 'rb') + + def delete(self): + """ deletes the file """ + os.unlink(self.path) diff --git a/nova/process.py b/nova/process.py new file mode 100644 index 000000000000..754728fdf5c2 --- /dev/null +++ b/nova/process.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Process pool, still buggy right now. +""" + +import logging +import multiprocessing + +from nova import vendor +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import protocol +from twisted.internet import threads + +# NOTE(termie): this is copied from twisted.internet.utils but since +# they don't export it I've copied. +class _BackRelay(protocol.ProcessProtocol): + """ + Trivial protocol for communicating with a process and turning its output + into the result of a L{Deferred}. + + @ivar deferred: A L{Deferred} which will be called back with all of stdout + and, if C{errortoo} is true, all of stderr as well (mixed together in + one string). If C{errortoo} is false and any bytes are received over + stderr, this will fire with an L{_UnexpectedErrorOutput} instance and + the attribute will be set to C{None}. + + @ivar onProcessEnded: If C{errortoo} is false and bytes are received over + stderr, this attribute will refer to a L{Deferred} which will be called + back when the process ends. This C{Deferred} is also associated with + the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in + this case so that users can determine when the process has actually + ended, in addition to knowing when bytes have been received via stderr. + """ + + def __init__(self, deferred, errortoo=0): + self.deferred = deferred + self.s = StringIO.StringIO() + if errortoo: + self.errReceived = self.errReceivedIsGood + else: + self.errReceived = self.errReceivedIsBad + + def errReceivedIsBad(self, text): + if self.deferred is not None: + self.onProcessEnded = defer.Deferred() + err = _UnexpectedErrorOutput(text, self.onProcessEnded) + self.deferred.errback(failure.Failure(err)) + self.deferred = None + self.transport.loseConnection() + + def errReceivedIsGood(self, text): + self.s.write(text) + + def outReceived(self, text): + self.s.write(text) + + def processEnded(self, reason): + if self.deferred is not None: + self.deferred.callback(self.s.getvalue()) + elif self.onProcessEnded is not None: + self.onProcessEnded.errback(reason) + + +class BackRelayWithInput(_BackRelay): + def __init__(self, deferred, errortoo=0, input=None): + super(BackRelayWithInput, self).__init__(deferred, errortoo) + self.input = input + + def connectionMade(self): + if self.input: + self.transport.write(self.input) + self.transport.closeStdin() + + +def getProcessOutput(executable, args=None, env=None, path=None, reactor=None, + errortoo=0, input=None): + if reactor is None: + from twisted.internet import reactor + args = args and args or () + env = env and env and {} + d = defer.Deferred() + p = BackRelayWithInput(d, errortoo=errortoo, input=input) + reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path) + return d + + +class Pool(object): + """ A simple process pool implementation around mutliprocessing. + + Allows up to `size` processes at a time and queues the rest. + + Using workarounds for multiprocessing behavior described in: + http://pypi.python.org/pypi/twisted.internet.processes/1.0b1 + """ + + def __init__(self, size=None): + self._size = size + self._pool = multiprocessing.Pool(size) + self._registerShutdown() + + def _registerShutdown(self): + reactor.addSystemEventTrigger( + 'during', 'shutdown', self.shutdown, reactor) + + def shutdown(self, reactor=None): + if not self._pool: + return + self._pool.close() + # wait for workers to finish + self._pool.terminate() + self._pool = None + + def apply(self, f, *args, **kw): + """ Add a task to the pool and return a deferred. """ + result = self._pool.apply_async(f, args, kw) + return threads.deferToThread(result.get) diff --git a/nova/rpc.py b/nova/rpc.py new file mode 100644 index 000000000000..62c6afff33d0 --- /dev/null +++ b/nova/rpc.py @@ -0,0 +1,222 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +AMQP-based RPC. Queues have consumers and publishers. +No fan-out support yet. +""" + +import logging +import sys +import uuid + +from nova import vendor +import anyjson +from carrot import connection +from carrot import messaging +from twisted.internet import defer +from twisted.internet import reactor +from twisted.internet import task + +from nova import fakerabbit +from nova import flags + + +FLAGS = flags.FLAGS + + +_log = logging.getLogger('amqplib') +_log.setLevel(logging.WARN) + + +class Connection(connection.BrokerConnection): + @classmethod + def instance(cls): + if not hasattr(cls, '_instance'): + params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + + if FLAGS.fake_rabbit: + params['backend_cls'] = fakerabbit.Backend + + cls._instance = cls(**params) + return cls._instance + + +class Consumer(messaging.Consumer): + # TODO(termie): it would be nice to give these some way of automatically + # cleaning up after themselves + def attach_to_tornado(self, io_inst=None): + from tornado import ioloop + if io_inst is None: + io_inst = ioloop.IOLoop.instance() + + injected = ioloop.PeriodicCallback( + lambda: self.fetch(enable_callbacks=True), 1, io_loop=io_inst) + injected.start() + return injected + + attachToTornado = attach_to_tornado + + def attach_to_twisted(self): + loop = task.LoopingCall(self.fetch, enable_callbacks=True) + loop.start(interval=0.001) + +class Publisher(messaging.Publisher): + pass + + +class TopicConsumer(Consumer): + exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): + self.queue = topic + self.routing_key = topic + self.exchange = FLAGS.control_exchange + super(TopicConsumer, self).__init__(connection=connection) + + +class AdapterConsumer(TopicConsumer): + def __init__(self, connection=None, topic="broadcast", proxy=None): + _log.debug('Initing the Adapter Consumer for %s' % (topic)) + self.proxy = proxy + super(AdapterConsumer, self).__init__(connection=connection, topic=topic) + + def receive(self, message_data, message): + _log.debug('received %s' % (message_data)) + msg_id = message_data.pop('_msg_id', None) + + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + return + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + d = defer.maybeDeferred(node_func, **node_args) + if msg_id: + d.addCallback(lambda rval: msg_reply(msg_id, rval)) + d.addErrback(lambda e: msg_reply(msg_id, str(e))) + message.ack() + return + + +class TopicPublisher(Publisher): + exchange_type = "topic" + def __init__(self, connection=None, topic="broadcast"): + self.routing_key = topic + self.exchange = FLAGS.control_exchange + super(TopicPublisher, self).__init__(connection=connection) + + +class DirectConsumer(Consumer): + exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): + self.queue = msg_id + self.routing_key = msg_id + self.exchange = msg_id + self.auto_delete = True + super(DirectConsumer, self).__init__(connection=connection) + + +class DirectPublisher(Publisher): + exchange_type = "direct" + def __init__(self, connection=None, msg_id=None): + self.routing_key = msg_id + self.exchange = msg_id + self.auto_delete = True + super(DirectPublisher, self).__init__(connection=connection) + + +def msg_reply(msg_id, reply): + conn = Connection.instance() + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + + try: + publisher.send({'result': reply}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()) + }) + publisher.close() + + +def call(topic, msg): + _log.debug("Making asynchronous call...") + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + _log.debug("MSG_ID is %s" % (msg_id)) + + conn = Connection.instance() + d = defer.Deferred() + consumer = DirectConsumer(connection=conn, msg_id=msg_id) + consumer.register_callback(lambda data, message: d.callback(data)) + injected = consumer.attach_to_tornado() + + # clean up after the injected listened and return x + d.addCallback(lambda x: injected.stop() and x or x) + + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() + return d + + +def cast(topic, msg): + _log.debug("Making asynchronous cast...") + conn = Connection.instance() + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() + + +def generic_response(message_data, message): + _log.debug('response %s', message_data) + message.ack() + sys.exit(0) + + +def send_message(topic, message, wait=True): + msg_id = uuid.uuid4().hex + message.update({'_msg_id': msg_id}) + _log.debug('topic is %s', topic) + _log.debug('message %s', message) + + if wait: + consumer = messaging.Consumer(connection=rpc.Connection.instance(), + queue=msg_id, + exchange=msg_id, + auto_delete=True, + exchange_type="direct", + routing_key=msg_id) + consumer.register_callback(generic_response) + + publisher = messaging.Publisher(connection=rpc.Connection.instance(), + exchange="nova", + exchange_type="topic", + routing_key=topic) + publisher.send(message) + publisher.close() + + if wait: + consumer.wait() + + +# TODO: Replace with a docstring test +if __name__ == "__main__": + send_message(sys.argv[1], anyjson.deserialize(sys.argv[2])) diff --git a/nova/server.py b/nova/server.py new file mode 100644 index 000000000000..227f7fddc317 --- /dev/null +++ b/nova/server.py @@ -0,0 +1,139 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Base functionality for nova daemons - gradually being replaced with twistd.py. +""" + +import logging +import logging.handlers +import os +import signal +import sys +import time + +from nova import vendor +import daemon +from daemon import pidlockfile + +from nova import flags + + +FLAGS = flags.FLAGS +flags.DEFINE_bool('daemonize', False, 'daemonize this process') +# NOTE(termie): right now I am defaulting to using syslog when we daemonize +# it may be better to do something else -shrug- +# NOTE(Devin): I think we should let each process have its own log file +# and put it in /var/logs/nova/(appname).log +# This makes debugging much easier and cuts down on sys log +# clutter. +flags.DEFINE_bool('use_syslog', True, 'output to syslog when daemonizing') +flags.DEFINE_string('logfile', None, 'log file to output to') +flags.DEFINE_string('pidfile', None, 'pid file to output to') +flags.DEFINE_string('working_directory', './', 'working directory...') + + +def stop(pidfile): + """ + Stop the daemon + """ + # Get the pid from the pidfile + try: + pf = file(pidfile,'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + pid = None + + if not pid: + message = "pidfile %s does not exist. Daemon not running?\n" + sys.stderr.write(message % pidfile) + return # not an error in a restart + + # Try killing the daemon process + try: + while 1: + os.kill(pid, signal.SIGTERM) + time.sleep(0.1) + except OSError, err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(pidfile): + os.remove(pidfile) + else: + print str(err) + sys.exit(1) + + +def serve(name, main): + argv = FLAGS(sys.argv) + + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + + logging.debug("Full set of FLAGS: \n\n\n" ) + for flag in FLAGS: + logging.debug("%s : %s" % (flag, FLAGS.get(flag, None) )) + + action = 'start' + if len(argv) > 1: + action = argv.pop() + + if action == 'stop': + stop(FLAGS.pidfile) + sys.exit() + elif action == 'restart': + stop(FLAGS.pidfile) + elif action == 'start': + pass + else: + print 'usage: %s [options] [start|stop|restart]' % argv[0] + sys.exit(1) + + logging.getLogger('amqplib').setLevel(logging.WARN) + if FLAGS.daemonize: + logger = logging.getLogger() + formatter = logging.Formatter( + name + '(%(name)s): %(levelname)s %(message)s') + if FLAGS.use_syslog and not FLAGS.logfile: + syslog = logging.handlers.SysLogHandler(address='/dev/log') + syslog.setFormatter(formatter) + logger.addHandler(syslog) + else: + if not FLAGS.logfile: + FLAGS.logfile = '%s.log' % name + logfile = logging.handlers.FileHandler(FLAGS.logfile) + logfile.setFormatter(formatter) + logger.addHandler(logfile) + stdin, stdout, stderr = None, None, None + else: + stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr + + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + else: + logging.getLogger().setLevel(logging.WARNING) + + with daemon.DaemonContext( + detach_process=FLAGS.daemonize, + working_directory=FLAGS.working_directory, + pidfile=pidlockfile.TimeoutPIDLockFile(FLAGS.pidfile, + acquire_timeout=1, + threaded=False), + stdin=stdin, + stdout=stdout, + stderr=stderr + ): + main(argv) diff --git a/nova/test.py b/nova/test.py new file mode 100644 index 000000000000..610ad89aa9a7 --- /dev/null +++ b/nova/test.py @@ -0,0 +1,246 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Base classes for our unit tests. +Allows overriding of flags for use of fakes, +and some black magic for inline callbacks. +""" + +import logging +import time +import unittest + +from nova import vendor +import mox +from tornado import ioloop +from twisted.internet import defer +from twisted.python import failure +from twisted.trial import unittest as trial_unittest +import stubout + +from nova import datastore +from nova import fakerabbit +from nova import flags + + +FLAGS = flags.FLAGS +flags.DEFINE_bool('fake_tests', True, + 'should we use everything for testing') + + +def skip_if_fake(f): + def _skipper(*args, **kw): + if FLAGS.fake_tests: + raise trial_unittest.SkipTest('Test cannot be run in fake mode') + else: + return f(*args, **kw) + + _skipper.func_name = f.func_name + return _skipper + + +class TrialTestCase(trial_unittest.TestCase): + def setUp(self): + super(TrialTestCase, self).setUp() + + # emulate some of the mox stuff, we can't use the metaclass + # because it screws with our generators + self.mox = mox.Mox() + self.stubs = stubout.StubOutForTesting() + self.flag_overrides = {} + + def tearDown(self): + super(TrialTestCase, self).tearDown() + self.reset_flags() + self.mox.UnsetStubs() + self.stubs.UnsetAll() + self.stubs.SmartUnsetAll() + self.mox.VerifyAll() + + if FLAGS.fake_rabbit: + fakerabbit.reset_all() + + # attempt to wipe all keepers + #keeper = datastore.Keeper() + #keeper.clear_all() + + def flags(self, **kw): + for k, v in kw.iteritems(): + if k in self.flag_overrides: + self.reset_flags() + raise Exception( + 'trying to override already overriden flag: %s' % k) + self.flag_overrides[k] = getattr(FLAGS, k) + setattr(FLAGS, k, v) + + def reset_flags(self): + for k, v in self.flag_overrides.iteritems(): + setattr(FLAGS, k, v) + + + +class BaseTestCase(TrialTestCase): + def setUp(self): + super(BaseTestCase, self).setUp() + # TODO(termie): we could possibly keep a more global registry of + # the injected listeners... this is fine for now though + self.injected = [] + self.ioloop = ioloop.IOLoop.instance() + + self._waiting = None + self._doneWaiting = False + self._timedOut = False + self.set_up() + + def set_up(self): + pass + + def tear_down(self): + pass + + def tearDown(self): + super(BaseTestCase, self).tearDown() + for x in self.injected: + x.stop() + if FLAGS.fake_rabbit: + fakerabbit.reset_all() + self.tear_down() + + def _waitForTest(self, timeout=60): + """ Push the ioloop along to wait for our test to complete. """ + self._waiting = self.ioloop.add_timeout(time.time() + timeout, + self._timeout) + def _wait(): + if self._timedOut: + self.fail('test timed out') + self._done() + if self._doneWaiting: + self.ioloop.stop() + return + # we can use add_callback here but this uses less cpu when testing + self.ioloop.add_timeout(time.time() + 0.01, _wait) + + self.ioloop.add_callback(_wait) + self.ioloop.start() + + def _done(self): + if self._waiting: + try: + self.ioloop.remove_timeout(self._waiting) + except Exception: + pass + self._waiting = None + self._doneWaiting = True + + def _maybeInlineCallbacks(self, f): + """ If we're doing async calls in our tests, wait on them. + + This is probably the most complicated hunk of code we have so far. + + First up, if the function is normal (not async) we just act normal + and return. + + Async tests will use the "Inline Callbacks" pattern, which means + you yield Deferreds at every "waiting" step of your code instead + of making epic callback chains. + + Example (callback chain, ugly): + + d = self.node.terminate_instance(instance_id) # a Deferred instance + def _describe(_): + d_desc = self.node.describe_instances() # another Deferred instance + return d_desc + def _checkDescribe(rv): + self.assertEqual(rv, []) + d.addCallback(_describe) + d.addCallback(_checkDescribe) + d.addCallback(lambda x: self._done()) + self._waitForTest() + + Example (inline callbacks! yay!): + + yield self.node.terminate_instance(instance_id) + rv = yield self.node.describe_instances() + self.assertEqual(rv, []) + + If the test fits the Inline Callbacks pattern we will automatically + handle calling wait and done. + """ + # TODO(termie): this can be a wrapper function instead and + # and we can make a metaclass so that we don't + # have to copy all that "run" code below. + g = f() + if not hasattr(g, 'send'): + self._done() + return defer.succeed(g) + + inlined = defer.inlineCallbacks(f) + d = inlined() + return d + + def _catchExceptions(self, result, failure): + exc = (failure.type, failure.value, failure.getTracebackObject()) + if isinstance(failure.value, self.failureException): + result.addFailure(self, exc) + elif isinstance(failure.value, KeyboardInterrupt): + raise + else: + result.addError(self, exc) + + self._done() + + def _timeout(self): + self._waiting = False + self._timedOut = True + + def run(self, result=None): + if result is None: result = self.defaultTestResult() + + result.startTest(self) + testMethod = getattr(self, self._testMethodName) + try: + try: + self.setUp() + except KeyboardInterrupt: + raise + except: + result.addError(self, self._exc_info()) + return + + ok = False + try: + d = self._maybeInlineCallbacks(testMethod) + d.addErrback(lambda x: self._catchExceptions(result, x)) + d.addBoth(lambda x: self._done() and x) + self._waitForTest() + ok = True + except self.failureException: + result.addFailure(self, self._exc_info()) + except KeyboardInterrupt: + raise + except: + result.addError(self, self._exc_info()) + + try: + self.tearDown() + except KeyboardInterrupt: + raise + except: + result.addError(self, self._exc_info()) + ok = False + if ok: result.addSuccess(self) + finally: + result.stopTest(self) diff --git a/nova/tests/CA/cacert.pem b/nova/tests/CA/cacert.pem new file mode 100644 index 000000000000..9ffb5bb807a7 --- /dev/null +++ b/nova/tests/CA/cacert.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICyzCCAjSgAwIBAgIJANiqHZUcbScCMA0GCSqGSIb3DQEBBAUAME4xEjAQBgNV +BAoTCU5PVkEgUk9PVDEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UECBMK +Q2FsaWZvcm5pYTELMAkGA1UEBhMCVVMwHhcNMTAwNTI4MDExOTI1WhcNMTEwNTI4 +MDExOTI1WjBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWlu +IFZpZXcxEzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMIGfMA0GCSqG +SIb3DQEBAQUAA4GNADCBiQKBgQDobUnq8rpXA/HQZ2Uu9Me3SlqCayz3ws2wtvFQ +koWPUzpriIYPkpprz2EaVu07Zb9uJHvjcoY07nYntl4jR8S7PH4XZhlVFn8AQWzs +iThU4KJF71UfVM00dDrarSgVpyOIcFXO3iUvLoJj7+RUPjrWdLuJoMqnhicgLeHZ +LAZ8ewIDAQABo4GwMIGtMAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFMh1RMlTVtt8 +EdESYpsTU08r0FnpMH4GA1UdIwR3MHWAFMh1RMlTVtt8EdESYpsTU08r0FnpoVKk +UDBOMRIwEAYDVQQKEwlOT1ZBIFJPT1QxFjAUBgNVBAcTDU1vdW50YWluIFZpZXcx +EzARBgNVBAgTCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTggkA2KodlRxtJwIwDQYJ +KoZIhvcNAQEEBQADgYEAq+YCgflK36HCdodNu2ya3O6UDRUE2dW8n96tAOmvHqmR +v38k8GIW0pjWDo+lZYnFmeJYd+QGcJl9fLzXxffV5k+rNCfr/gEYtznWLNUX7AZB +b/VC7L+yK9qz08C8n51TslXaf3fUGkfkQxsvEP7+hi0qavdd/8eTbdheWahYwWg= +-----END CERTIFICATE----- diff --git a/nova/tests/CA/private/cakey.pem b/nova/tests/CA/private/cakey.pem new file mode 100644 index 000000000000..eee54cc3879e --- /dev/null +++ b/nova/tests/CA/private/cakey.pem @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDobUnq8rpXA/HQZ2Uu9Me3SlqCayz3ws2wtvFQkoWPUzpriIYP +kpprz2EaVu07Zb9uJHvjcoY07nYntl4jR8S7PH4XZhlVFn8AQWzsiThU4KJF71Uf +VM00dDrarSgVpyOIcFXO3iUvLoJj7+RUPjrWdLuJoMqnhicgLeHZLAZ8ewIDAQAB +AoGBANQonmZ2Nh2jniFrn/LiwULP/ho6Fov6J6N8+n1focaYZCUwM58XZRmv7KUM +X/PuBnVVnDibm2HJodTSJM/zfODnGO15kdmJ9X23FkkdTyuvphO5tYF0ONARXdfX +9LbPcLYA14VSCZCKCye6mbv/xi0C/s7q6ZBoMl7XaeD9hgUxAkEA9lxQY/ZxcLV0 +Ae5I2spBbtuXEGns11YnKnppc59RrAono1gaDeYY2WZRwztIcD6VtUv7qkzH6ubo +shAG4fvnPQJBAPGFaDODs2ckPvxnILEbjpnZXGQqDCpQ3sVJ6nfu+qdAWS92ESNo +Y6DC8zFjFaQFbKy6Jxr1VsvYDXhF8cmy7hcCQHkLElSLGWGPRdhNA268QTn+mlJu +OPf0VHoCex1cAfzNYHxZJTP/AeaO501NK2I63cOd+aDK6M75dQtH5JnT8uECQQCg +jVydkhk6oV+1jiCvW3BKWbIPa9w2bRgJ8n8JRzYc5Kvk3wm5jfVcsvvTgtip9mkt +0XmZdCpEy9T4dRasTGP1AkBMhShiVP7+P+SIQlZtSn8ckTt9G6cefEjxsv0kVFZe +SjkUO0ZifahF8r3Q1eEUSzdXEvicEwONvcpc7MLwfSD7 +-----END RSA PRIVATE KEY----- diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py new file mode 100644 index 000000000000..a4ccbbaebcc3 --- /dev/null +++ b/nova/tests/__init__.py @@ -0,0 +1,27 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova.tests` -- Nova Unittests +===================================================== + +.. automodule:: nova.tests + :platform: Unix +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" \ No newline at end of file diff --git a/nova/tests/access_unittest.py b/nova/tests/access_unittest.py new file mode 100644 index 000000000000..ab0759c2d949 --- /dev/null +++ b/nova/tests/access_unittest.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import unittest + +from nova import flags +from nova import test +from nova.auth import users +from nova.endpoint import cloud + +FLAGS = flags.FLAGS + +class AccessTestCase(test.BaseTestCase): + def setUp(self): + FLAGS.fake_libvirt = True + FLAGS.fake_storage = True + self.users = users.UserManager.instance() + super(AccessTestCase, self).setUp() + # Make a test project + # Make a test user + self.users.create_user('test1', 'access', 'secret') + + # Make the test user a member of the project + + def tearDown(self): + # Delete the test user + # Delete the test project + self.users.delete_user('test1') + pass + + def test_001_basic_user_access(self): + user = self.users.get_user('test1') + # instance-foo, should be using object and not owner_id + instance_id = "i-12345678" + self.assertTrue(user.is_authorized(instance_id, action="describe_instances")) + + def test_002_sysadmin_access(self): + user = self.users.get_user('test1') + bucket = "foo/bar/image" + self.assertFalse(user.is_authorized(bucket, action="register")) + self.users.add_role(user, "sysadmin") + + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/tests/api_integration.py b/nova/tests/api_integration.py new file mode 100644 index 000000000000..d2e1026b8ed8 --- /dev/null +++ b/nova/tests/api_integration.py @@ -0,0 +1,50 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import boto +from boto.ec2.regioninfo import RegionInfo + +ACCESS_KEY = 'fake' +SECRET_KEY = 'fake' +CLC_IP = '127.0.0.1' +CLC_PORT = 8773 +REGION = 'test' + +def get_connection(): + return boto.connect_ec2 ( + aws_access_key_id=ACCESS_KEY, + aws_secret_access_key=SECRET_KEY, + is_secure=False, + region=RegionInfo(None, REGION, CLC_IP), + port=CLC_PORT, + path='/services/Cloud', + debug=99 + ) + +class APIIntegrationTests(unittest.TestCase): + def test_001_get_all_images(self): + conn = get_connection() + res = conn.get_all_images() + print res + + +if __name__ == '__main__': + unittest.main() + +#print conn.get_all_key_pairs() +#print conn.create_key_pair +#print conn.create_security_group('name', 'description') + diff --git a/nova/tests/api_unittest.py b/nova/tests/api_unittest.py new file mode 100644 index 000000000000..fdbf088f96c3 --- /dev/null +++ b/nova/tests/api_unittest.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import httplib +import random +import StringIO + +from nova import vendor +import boto +from boto.ec2 import regioninfo +from tornado import httpserver +from twisted.internet import defer + +from nova import flags +from nova import test +from nova.auth import users +from nova.endpoint import api +from nova.endpoint import cloud + + +FLAGS = flags.FLAGS + + +# NOTE(termie): These are a bunch of helper methods and classes to short +# circuit boto calls and feed them into our tornado handlers, +# it's pretty damn circuitous so apologies if you have to fix +# a bug in it +def boto_to_tornado(method, path, headers, data, host, connection=None): + """ translate boto requests into tornado requests + + connection should be a FakeTornadoHttpConnection instance + """ + headers = httpserver.HTTPHeaders() + for k, v in headers.iteritems(): + headers[k] = v + + req = httpserver.HTTPRequest(method=method, + uri=path, + headers=headers, + body=data, + host=host, + remote_ip='127.0.0.1', + connection=connection) + return req + + +def raw_to_httpresponse(s): + """ translate a raw tornado http response into an httplib.HTTPResponse """ + sock = FakeHttplibSocket(s) + resp = httplib.HTTPResponse(sock) + resp.begin() + return resp + + +class FakeHttplibSocket(object): + """ a fake socket implementation for httplib.HTTPResponse, trivial """ + def __init__(self, s): + self.fp = StringIO.StringIO(s) + + def makefile(self, mode, other): + return self.fp + + +class FakeTornadoStream(object): + """ a fake stream to satisfy tornado's assumptions, trivial """ + def set_close_callback(self, f): + pass + + +class FakeTornadoConnection(object): + """ a fake connection object for tornado to pass to its handlers + + web requests are expected to write to this as they get data and call + finish when they are done with the request, we buffer the writes and + kick off a callback when it is done so that we can feed the result back + into boto. + """ + def __init__(self, d): + self.d = d + self._buffer = StringIO.StringIO() + + def write(self, chunk): + self._buffer.write(chunk) + + def finish(self): + s = self._buffer.getvalue() + self.d.callback(s) + + xheaders = None + + @property + def stream(self): + return FakeTornadoStream() + + +class FakeHttplibConnection(object): + """ a fake httplib.HTTPConnection for boto to use + + requests made via this connection actually get translated and routed into + our tornado app, we then wait for the response and turn it back into + the httplib.HTTPResponse that boto expects. + """ + def __init__(self, app, host, is_secure=False): + self.app = app + self.host = host + self.deferred = defer.Deferred() + + def request(self, method, path, data, headers): + req = boto_to_tornado + conn = FakeTornadoConnection(self.deferred) + request = boto_to_tornado(connection=conn, + method=method, + path=path, + headers=headers, + data=data, + host=self.host) + handler = self.app(request) + self.deferred.addCallback(raw_to_httpresponse) + + def getresponse(self): + @defer.inlineCallbacks + def _waiter(): + result = yield self.deferred + defer.returnValue(result) + d = _waiter() + # NOTE(termie): defer.returnValue above should ensure that + # this deferred has already been called by the time + # we get here, we are going to cheat and return + # the result of the callback + return d.result + + def close(self): + pass + + +class ApiEc2TestCase(test.BaseTestCase): + def setUp(self): + super(ApiEc2TestCase, self).setUp() + + self.users = users.UserManager.instance() + self.cloud = cloud.CloudController() + + self.host = '127.0.0.1' + + self.app = api.APIServerApplication(self.users, {'Cloud': self.cloud}) + self.ec2 = boto.connect_ec2( + aws_access_key_id='fake', + aws_secret_access_key='fake', + is_secure=False, + region=regioninfo.RegionInfo(None, 'test', self.host), + port=FLAGS.cc_port, + path='/services/Cloud') + + self.mox.StubOutWithMock(self.ec2, 'new_http_connection') + + def expect_http(self, host=None, is_secure=False): + http = FakeHttplibConnection( + self.app, '%s:%d' % (self.host, FLAGS.cc_port), False) + self.ec2.new_http_connection(host, is_secure).AndReturn(http) + return http + + def test_describe_instances(self): + self.expect_http() + self.mox.ReplayAll() + + self.assertEqual(self.ec2.get_all_instances(), []) + + + def test_get_all_key_pairs(self): + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) + self.users.generate_key_pair('fake', keyname) + + rv = self.ec2.get_all_key_pairs() + self.assertTrue(filter(lambda k: k.name == keyname, rv)) + diff --git a/nova/tests/bundle/1mb.manifest.xml b/nova/tests/bundle/1mb.manifest.xml new file mode 100644 index 000000000000..dc33159571a7 --- /dev/null +++ b/nova/tests/bundle/1mb.manifest.xml @@ -0,0 +1 @@ +2007-10-10euca-tools1.231337x86_641mb42machineda39a3ee5e6b4b0d3255bfef95601890afd807091048576113633a2ea00dc64083dd9a10eb5e233635b42a7beb1670ab75452087d9de74c60aba1cd27c136fda56f62beb581de128fb1f10d072b9e556fd25e903107a57827c21f6ee8a93a4ff55b11311fcef217e3eefb07e81f71e88216f43b4b54029c1f2549f2925a839a73947d2d5aeecec4a62ece4af9156d557ae907978298296d99154c11147fd8caf92447e90ce339928933d7579244c2f8ffb07cc0ea35f8738da8b90eff6c7a49671a84500e993e9462e4c36d5c19c0b3a2b397d035b4c0cce742b58e12552175d81d129b0425e9f71ebacb9aeb539fa9dd2ac36749fb82876f6902e5fb24b6ec19f35ec4c20acd50437fd30966e99c4d9a0647577970a8fa302314bd082c9715f071160c69bbfb070f51d2ba1076775f1d988ccde150e515088156b248e4b5a64e46c4fe064feeeedfe14511f7fde478a51acb89f9b2f6c84b60593e5c3f792ba6b01fed9bf2158fdac03086374883b39d13a3ca74497eeaaf579fc3f26effc73bfd9446a2a8c4061f0874bfaca058905180e22d3d8881551cb38f7606f19f00e4e19535dd234b66b31b77e9c7bad3885d9c9efa75c863631fd4f82a009e17d789066d9cc6032a436f05384832f6d9a3283d3e63eab04fa0da5c8c87db9b17e854e842c3fb416507d067a266b44538125ce732e486098e8ebd1ca91fa3079f007fce7d14957a9b7e57282407ead3c6eb68fe975df3d83190021b1mb.part.0c4413423cf7a57e71187e19bfd5cd4b514a642831mb.part.19d4262e6589393d09a11a0332af169887bc2e57d4e00b5ba28114dda4a9df7eeae94be847ec46117a09a1cbe41e578660642f0660dda1776b39fb3bf826b6cfec019e2a5e9c566728d186b7400ebc989a30670eb1db26ce01e68bd9d3f31290370077a85b81c66b63c1e0d5499bac115c06c17a21a81b6d3a67ebbce6c17019095af7ab07f3796c708cc843e58efc12ddc788c5e \ No newline at end of file diff --git a/nova/tests/bundle/1mb.part.0 b/nova/tests/bundle/1mb.part.0 new file mode 100644 index 0000000000000000000000000000000000000000..15a1657c571b7cb8d063496d0416ac68c593fdea GIT binary patch literal 1024 zcmV+b1poW(o-5FdA$^^VrS-L zQsvngz!d~t4Bvc0o;TG23bOShW-8d=ht!*~*p)Sy#zDK1OKM=0iM@P>UDd*A4?SR}Z7y=7|7GIgWw3?(%jW@5kVQB2Y&uT&hp>OR>G~KPW2sL+U zU#h)ZAa4$^&VA8cpq$Fl^&5~+<%~9s6mhuia=RuC+k#E$wV2X!I`W^a9pz@WrB&Rw zb1A%~qDJ!Ib5SM%;|b`98U}yb*s;VG^@_s1g_2W34=fXkF-iB<0~(-E+DDd1NLRMv z7NI7s^{*(*g{WJ(?1RjjNuLg7c7keglhjZ>tZX&a+wAf<-)-%IwJu{D_}-!Ah33dy zT**R!kZxlN@<(Z#d?rPsV{nGPDW-^?5X>+}HN(1hNd3kDvnM@*@Y%U{1-^k;o_}zk z1hlUQG$Auuu2RKT@A_$Ry9iz+Eih9`DVr-C8*cz0Et8o!C#b=c2nMy@rJ_GU_0UMC ztXmiI(|kU0agb%3aG-o&gr%WXG<-V@?@hjbwh>l*Aau}`dJJ>mt-!ZSe}`(sjM$i) z6@7P9x@@c|s*;`)7jDyYyeM)~E1#Dd7JVfw9Dp;oM2Y`lR#W4OrFJE7FY<%M8q_~( zF&yic0Pm+3odThDKy!;h1OtHlxs|=h6yR{}%AHFje?!0T+j)6ZuCvB?AU#}cyH}$< zy;1k0x~Cg8jpvvYJh%u8-68=i9tUN!EDAdn_B+&xxp<_$=7RZpKg4tlP98YU#jZu& zw{sELW;YSOv&ON)=1jmpc9m+N;AyYyJ1<9qLz-K9PZYh z0ry&AN4o-i&s?RwlLvBZvAqMfjuY>f+F%(Xv9g)g!X9j_0vN=%(KA0cy5dWr0*Gu{ zgRq8_gW3)e159E1F$1S-LsTCLs%$p3rcTNz0;&wo>RAm3soq&njTX$e$~1bN;YfV$ zI&65X%u}I_4lLGyTQ;z~!{wIth$@)sHlT0;@LPnUgL3?FN4pvIeL1#r=^VyvZ&Zj{ zU<|yc5nb_DbML`^Q8z00uG#s9+8kOBS_kVnm*4bJg+AKXDEXHd1dL}B!d589Y9iMe u8I@F7lOy63fYVjTYgaTiSu{$ManPz`+(hH#t^1dUMcr5Eb^oDo1y&xKZ~EN; literal 0 HcmV?d00001 diff --git a/nova/tests/bundle/1mb.part.1 b/nova/tests/bundle/1mb.part.1 new file mode 100644 index 000000000000..2f0406e2d1a7 --- /dev/null +++ b/nova/tests/bundle/1mb.part.1 @@ -0,0 +1 @@ +­´ˆà«€ç‰°Ƴ ¡ÀiDHW̽×JÈ8ïrV¼³h§X’·@Yj“~Ø ·Gû5û 3Nt«˜•H6Ñ$§Ëgö™é Lá¢+³æ¤X†pm¬@,øŽ>7ÚÊ×užp¼ aü`¥V2X@£#ᶠ\ No newline at end of file diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py new file mode 100644 index 000000000000..568a8dcd38b3 --- /dev/null +++ b/nova/tests/cloud_unittest.py @@ -0,0 +1,161 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import StringIO +import time +import unittest +from xml.etree import ElementTree + +from nova import vendor +import mox +from tornado import ioloop +from twisted.internet import defer + +from nova import flags +from nova import rpc +from nova import test +from nova.auth import users +from nova.compute import node +from nova.endpoint import api +from nova.endpoint import cloud + + +FLAGS = flags.FLAGS + + +class CloudTestCase(test.BaseTestCase): + def setUp(self): + super(CloudTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_storage=True, + fake_users=True, + redis_db=8) + + self.conn = rpc.Connection.instance() + logging.getLogger().setLevel(logging.DEBUG) + + # set up our cloud + self.cloud = cloud.CloudController() + self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.cloud_topic, + proxy=self.cloud) + self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) + + # set up a node + self.node = node.Node() + self.node_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.compute_topic, + proxy=self.node) + self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop)) + + user_mocker = mox.Mox() + self.admin = user_mocker.CreateMock(users.User) + self.admin.is_authorized(mox.IgnoreArg()).AndReturn(True) + self.context = api.APIRequestContext(handler=None,user=self.admin) + + def test_console_output(self): + if FLAGS.fake_libvirt: + logging.debug("Can't test instances without a real virtual env.") + return + instance_id = 'foo' + inst = yield self.node.run_instance(instance_id) + output = yield self.cloud.get_console_output(self.context, [instance_id]) + logging.debug(output) + self.assert_(output) + rv = yield self.node.terminate_instance(instance_id) + + def test_run_instances(self): + if FLAGS.fake_libvirt: + logging.debug("Can't test instances without a real virtual env.") + return + image_id = FLAGS.default_image + instance_type = FLAGS.default_instance_type + max_count = 1 + kwargs = {'image_id': image_id, + 'instance_type': instance_type, + 'max_count': max_count} + rv = yield self.cloud.run_instances(self.context, **kwargs) + # TODO: check for proper response + instance = rv['reservationSet'][0][rv['reservationSet'][0].keys()[0]][0] + logging.debug("Need to watch instance %s until it's running..." % instance['instance_id']) + while True: + rv = yield defer.succeed(time.sleep(1)) + info = self.cloud._get_instance(instance['instance_id']) + logging.debug(info['state']) + if info['state'] == node.Instance.RUNNING: + break + self.assert_(rv) + + if not FLAGS.fake_libvirt: + time.sleep(45) # Should use boto for polling here + for reservations in rv['reservationSet']: + # for res_id in reservations.keys(): + # logging.debug(reservations[res_id]) + # for instance in reservations[res_id]: + for instance in reservations[reservations.keys()[0]]: + logging.debug("Terminating instance %s" % instance['instance_id']) + rv = yield self.node.terminate_instance(instance['instance_id']) + + def test_instance_update_state(self): + def instance(num): + return { + 'reservation_id': 'r-1', + 'instance_id': 'i-%s' % num, + 'image_id': 'ami-%s' % num, + 'private_dns_name': '10.0.0.%s' % num, + 'dns_name': '10.0.0%s' % num, + 'ami_launch_index': str(num), + 'instance_type': 'fake', + 'availability_zone': 'fake', + 'key_name': None, + 'kernel_id': 'fake', + 'ramdisk_id': 'fake', + 'groups': ['default'], + 'product_codes': None, + 'state': 0x01, + 'user_data': '' + } + + rv = self.cloud.format_instances(self.admin) + print rv + self.assert_(len(rv['reservationSet']) == 0) + + # simulate launch of 5 instances + # self.cloud.instances['pending'] = {} + #for i in xrange(5): + # inst = instance(i) + # self.cloud.instances['pending'][inst['instance_id']] = inst + + #rv = self.cloud.format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + + # report 4 nodes each having 1 of the instances + #for i in xrange(4): + # self.cloud.update_state('instances', {('node-%s' % i): {('i-%s' % i): instance(i)}}) + + # one instance should be pending still + #self.assert_(len(self.cloud.instances['pending'].keys()) == 1) + + # check that the reservations collapse + #rv = self.cloud.format_instances(self.admin) + #self.assert_(len(rv['reservationSet']) == 1) + #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5) + + # check that we can get metadata for each instance + #for i in xrange(4): + # data = self.cloud.get_metadata(instance(i)['private_dns_name']) + # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i) diff --git a/nova/tests/datastore_unittest.py b/nova/tests/datastore_unittest.py new file mode 100644 index 000000000000..4e4d8586a38c --- /dev/null +++ b/nova/tests/datastore_unittest.py @@ -0,0 +1,60 @@ +from nova import test +from nova import datastore +import random + +class KeeperTestCase(test.BaseTestCase): + """ + Basic persistence tests for Keeper datastore. + Generalize, then use these to support + migration to redis / cassandra / multiple stores. + """ + + def __init__(self, *args, **kwargs): + """ + Create a new keeper instance for test keys. + """ + super(KeeperTestCase, self).__init__(*args, **kwargs) + self.keeper = datastore.Keeper('test-') + + def tear_down(self): + """ + Scrub out test keeper data. + """ + pass + + def test_store_strings(self): + """ + Confirm that simple strings go in and come out safely. + Should also test unicode strings. + """ + randomstring = ''.join( + [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') + for _x in xrange(20)] + ) + self.keeper['test_string'] = randomstring + self.assertEqual(randomstring, self.keeper['test_string']) + + def test_store_dicts(self): + """ + Arbitrary dictionaries should be storable. + """ + test_dict = {'key_one': 'value_one'} + self.keeper['test_dict'] = test_dict + self.assertEqual(test_dict['key_one'], + self.keeper['test_dict']['key_one']) + + def test_sets(self): + """ + A keeper dict should be self-serializing. + """ + self.keeper.set_add('test_set', 'foo') + test_dict = {'arbitrary': 'dict of stuff'} + self.keeper.set_add('test_set', test_dict) + self.assertTrue(self.keeper.set_is_member('test_set', 'foo')) + self.assertFalse(self.keeper.set_is_member('test_set', 'bar')) + self.keeper.set_remove('test_set', 'foo') + self.assertFalse(self.keeper.set_is_member('test_set', 'foo')) + rv = self.keeper.set_fetch('test_set') + self.assertEqual(test_dict, rv.next()) + self.keeper.set_remove('test_set', test_dict) + diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py new file mode 100644 index 000000000000..3c7b0be52d33 --- /dev/null +++ b/nova/tests/fake_flags.py @@ -0,0 +1,26 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nova import flags + +FLAGS = flags.FLAGS + +FLAGS.fake_libvirt = True +FLAGS.fake_storage = True +FLAGS.fake_rabbit = True +FLAGS.fake_network = True +FLAGS.fake_users = True +FLAGS.keeper_backend = 'sqlite' +FLAGS.datastore_path = ':memory:' +FLAGS.verbose = True diff --git a/nova/tests/future_unittest.py b/nova/tests/future_unittest.py new file mode 100644 index 000000000000..81d69dffff0e --- /dev/null +++ b/nova/tests/future_unittest.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import StringIO +import time +import unittest +from xml.etree import ElementTree + +from nova import vendor +import mox +from tornado import ioloop +from twisted.internet import defer + +from nova import cloud +from nova import exception +from nova import flags +from nova import node +from nova import rpc +from nova import test + + +FLAGS = flags.FLAGS + + +class AdminTestCase(test.BaseTestCase): + def setUp(self): + super(AdminTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_rabbit=True) + + self.conn = rpc.Connection.instance() + + logging.getLogger().setLevel(logging.INFO) + + # set up our cloud + self.cloud = cloud.CloudController() + self.cloud_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.cloud_topic, + proxy=self.cloud) + self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) + + # set up a node + self.node = node.Node() + self.node_consumer = rpc.AdapterConsumer(connection=self.conn, + topic=FLAGS.compute_topic, + proxy=self.node) + self.injected.append(self.node_consumer.attach_to_tornado(self.ioloop)) + + def test_flush_terminated(self): + # Launch an instance + + # Wait until it's running + + # Terminate it + + # Wait until it's terminated + + # Flush terminated nodes + + # ASSERT that it's gone + pass diff --git a/nova/tests/keeper_unittest.py b/nova/tests/keeper_unittest.py new file mode 100644 index 000000000000..3896c9e57820 --- /dev/null +++ b/nova/tests/keeper_unittest.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +import random + +from nova import datastore +from nova import test + +class KeeperTestCase(test.TrialTestCase): + """ + Basic persistence tests for Keeper datastore. + Generalize, then use these to support + migration to redis / cassandra / multiple stores. + """ + + def setUp(self): + super(KeeperTestCase, self).setUp() + self.keeper = datastore.Keeper('test') + + def tearDown(self): + super(KeeperTestCase, self).tearDown() + self.keeper.clear() + + def test_store_strings(self): + """ + Confirm that simple strings go in and come out safely. + Should also test unicode strings. + """ + randomstring = ''.join( + [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') + for _x in xrange(20)] + ) + self.keeper['test_string'] = randomstring + self.assertEqual(randomstring, self.keeper['test_string']) + + def test_store_dicts(self): + """ + Arbitrary dictionaries should be storable. + """ + test_dict = {'key_one': 'value_one'} + self.keeper['test_dict'] = test_dict + self.assertEqual(test_dict['key_one'], + self.keeper['test_dict']['key_one']) + + def test_sets(self): + """ + A keeper dict should be self-serializing. + """ + self.keeper.set_add('test_set', 'foo') + test_dict = {'arbitrary': 'dict of stuff'} + self.keeper.set_add('test_set', test_dict) + self.assertTrue(self.keeper.set_is_member('test_set', 'foo')) + self.assertFalse(self.keeper.set_is_member('test_set', 'bar')) + self.keeper.set_remove('test_set', 'foo') + self.assertFalse(self.keeper.set_is_member('test_set', 'foo')) + rv = self.keeper.set_fetch('test_set') + self.assertEqual(test_dict, rv.next()) + self.keeper.set_remove('test_set', test_dict) + diff --git a/nova/tests/network_unittest.py b/nova/tests/network_unittest.py new file mode 100644 index 000000000000..43c7831a7a42 --- /dev/null +++ b/nova/tests/network_unittest.py @@ -0,0 +1,113 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import unittest + +from nova import vendor +import IPy + +from nova import flags +from nova import test +from nova.compute import network +from nova.auth import users + + +class NetworkTestCase(test.TrialTestCase): + def setUp(self): + super(NetworkTestCase, self).setUp() + logging.getLogger().setLevel(logging.DEBUG) + self.manager = users.UserManager.instance() + for i in range(0, 6): + name = 'user%s' % i + if not self.manager.get_user(name): + self.manager.create_user(name, name, name) + self.network = network.NetworkController(netsize=16) + + def tearDown(self): + super(NetworkTestCase, self).tearDown() + for i in range(0, 6): + name = 'user%s' % i + self.manager.delete_user(name) + + def test_network_serialization(self): + net1 = network.Network(vlan=100, network="192.168.100.0/24", conn=None) + address = net1.allocate_ip("user0", "01:24:55:36:f2:a0") + net_json = str(net1) + net2 = network.Network.from_json(net_json) + self.assertEqual(net_json, str(net2)) + self.assertTrue(IPy.IP(address) in net2.network) + + def test_allocate_deallocate_address(self): + for flag in flags.FLAGS: + print "%s=%s" % (flag, flags.FLAGS.get(flag, None)) + (address, net_name) = self.network.allocate_address( + "user0", "01:24:55:36:f2:a0") + logging.debug("Was allocated %s" % (address)) + self.assertEqual(True, address in self._get_user_addresses("user0")) + rv = self.network.deallocate_address(address) + self.assertEqual(False, address in self._get_user_addresses("user0")) + + def test_range_allocation(self): + (address, net_name) = self.network.allocate_address( + "user0", "01:24:55:36:f2:a0") + (secondaddress, net_name) = self.network.allocate_address( + "user1", "01:24:55:36:f2:a0") + self.assertEqual(True, address in self._get_user_addresses("user0")) + self.assertEqual(True, + secondaddress in self._get_user_addresses("user1")) + self.assertEqual(False, address in self._get_user_addresses("user1")) + rv = self.network.deallocate_address(address) + self.assertEqual(False, address in self._get_user_addresses("user0")) + rv = self.network.deallocate_address(secondaddress) + self.assertEqual(False, + secondaddress in self._get_user_addresses("user1")) + + def test_subnet_edge(self): + (secondaddress, net_name) = self.network.allocate_address("user0") + for user in range(1,5): + user_id = "user%s" % (user) + (address, net_name) = self.network.allocate_address( + user_id, "01:24:55:36:f2:a0") + (address2, net_name) = self.network.allocate_address( + user_id, "01:24:55:36:f2:a0") + (address3, net_name) = self.network.allocate_address( + user_id, "01:24:55:36:f2:a0") + self.assertEqual(False, + address in self._get_user_addresses("user0")) + self.assertEqual(False, + address2 in self._get_user_addresses("user0")) + self.assertEqual(False, + address3 in self._get_user_addresses("user0")) + rv = self.network.deallocate_address(address) + rv = self.network.deallocate_address(address2) + rv = self.network.deallocate_address(address3) + rv = self.network.deallocate_address(secondaddress) + + def test_too_many_users(self): + for i in range(0, 30): + name = 'toomany-user%s' % i + self.manager.create_user(name, name, name) + (address, net_name) = self.network.allocate_address( + name, "01:24:55:36:f2:a0") + self.manager.delete_user(name) + + def _get_user_addresses(self, user_id): + rv = self.network.describe_addresses() + user_addresses = [] + for item in rv: + if item['user_id'] == user_id: + user_addresses.append(item['address']) + return user_addresses diff --git a/nova/tests/node_unittest.py b/nova/tests/node_unittest.py new file mode 100644 index 000000000000..7a6115fcc853 --- /dev/null +++ b/nova/tests/node_unittest.py @@ -0,0 +1,128 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import StringIO +import time +import unittest +from xml.etree import ElementTree + +from nova import vendor +import mox +from tornado import ioloop +from twisted.internet import defer + +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.compute import model +from nova.compute import node + +FLAGS = flags.FLAGS + + +class InstanceXmlTestCase(test.TrialTestCase): + # @defer.inlineCallbacks + def test_serialization(self): + # TODO: Reimplement this, it doesn't make sense in redis-land + return + + # instance_id = 'foo' + # first_node = node.Node() + # inst = yield first_node.run_instance(instance_id) + # + # # force the state so that we can verify that it changes + # inst._s['state'] = node.Instance.NOSTATE + # xml = inst.toXml() + # self.assert_(ElementTree.parse(StringIO.StringIO(xml))) + # + # second_node = node.Node() + # new_inst = node.Instance.fromXml(second_node._conn, pool=second_node._pool, xml=xml) + # self.assertEqual(new_inst.state, node.Instance.RUNNING) + # rv = yield first_node.terminate_instance(instance_id) + + +class NodeConnectionTestCase(test.TrialTestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(NodeConnectionTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_storage=True, + fake_users=True, + redis_db=8) + self.node = node.Node() + + def create_instance(self): + instdir = model.InstanceDirectory() + inst = instdir.new() + # TODO(ja): add ami, ari, aki, user_data + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['owner_id'] = 'fake' + inst['node_name'] = FLAGS.node_name + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + inst.save() + return inst['instance_id'] + + @defer.inlineCallbacks + def test_run_describe_terminate(self): + instance_id = self.create_instance() + + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + + rv = yield self.node.terminate_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv, {}) + + @defer.inlineCallbacks + def test_reboot(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + logging.debug("describe_instances returns %s" % (rv)) + self.assertEqual(rv[instance_id].name, instance_id) + + yield self.node.reboot_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + rv = yield self.node.terminate_instance(instance_id) + + @defer.inlineCallbacks + def test_console_output(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + console = yield self.node.get_console_output(instance_id) + self.assert_(console) + rv = yield self.node.terminate_instance(instance_id) + + @defer.inlineCallbacks + def test_run_instance_existing(self): + instance_id = self.create_instance() + rv = yield self.node.run_instance(instance_id) + + rv = yield self.node.describe_instances() + self.assertEqual(rv[instance_id].name, instance_id) + + self.assertRaises(exception.Error, self.node.run_instance, instance_id) + rv = yield self.node.terminate_instance(instance_id) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py new file mode 100644 index 000000000000..5f41d47a0c9b --- /dev/null +++ b/nova/tests/objectstore_unittest.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import hashlib +import logging +import os +import shutil +import tempfile + +from nova import vendor + +from nova import flags +from nova import rpc +from nova import objectstore +from nova import test +from nova.auth import users + +FLAGS = flags.FLAGS + + +oss_tempdir = tempfile.mkdtemp(prefix='test_oss-') + + +# delete tempdirs from previous runs (we don't delete after test to allow +# checking the contents after running tests) +for path in glob.glob(os.path.abspath(os.path.join(oss_tempdir, '../test_oss-*'))): + if path != oss_tempdir: + shutil.rmtree(path) + + +# create bucket/images path +os.makedirs(os.path.join(oss_tempdir, 'images')) +os.makedirs(os.path.join(oss_tempdir, 'buckets')) + +class ObjectStoreTestCase(test.BaseTestCase): + def setUp(self): + super(ObjectStoreTestCase, self).setUp() + self.flags(fake_users=True, + buckets_path=os.path.join(oss_tempdir, 'buckets'), + images_path=os.path.join(oss_tempdir, 'images'), + ca_path=os.path.join(os.path.dirname(__file__), 'CA')) + self.conn = rpc.Connection.instance() + logging.getLogger().setLevel(logging.DEBUG) + + self.um = users.UserManager.instance() + + def test_buckets(self): + try: + self.um.create_user('user1') + except: pass + try: + self.um.create_user('user2') + except: pass + try: + self.um.create_user('admin_user', admin=True) + except: pass + + objectstore.bucket.Bucket.create('new_bucket', self.um.get_user('user1')) + bucket = objectstore.bucket.Bucket('new_bucket') + + # creator is authorized to use bucket + self.assert_(bucket.is_authorized(self.um.get_user('user1'))) + + # another user is not authorized + self.assert_(bucket.is_authorized(self.um.get_user('user2')) == False) + + # admin is authorized to use bucket + self.assert_(bucket.is_authorized(self.um.get_user('admin_user'))) + + # new buckets are empty + self.assert_(bucket.list_keys()['Contents'] == []) + + # storing keys works + bucket['foo'] = "bar" + + self.assert_(len(bucket.list_keys()['Contents']) == 1) + + self.assert_(bucket['foo'].read() == 'bar') + + # md5 of key works + self.assert_(bucket['foo'].md5 == hashlib.md5('bar').hexdigest()) + + # deleting non-empty bucket throws exception + exception = False + try: + bucket.delete() + except: + exception = True + + self.assert_(exception) + + # deleting key + del bucket['foo'] + + # deleting empty button + bucket.delete() + + # accessing deleted bucket throws exception + exception = False + try: + objectstore.bucket.Bucket('new_bucket') + except: + exception = True + + self.assert_(exception) + self.um.delete_user('user1') + self.um.delete_user('user2') + self.um.delete_user('admin_user') + + def test_images(self): + try: + self.um.create_user('image_creator') + except: pass + image_user = self.um.get_user('image_creator') + + # create a bucket for our bundle + objectstore.bucket.Bucket.create('image_bucket', image_user) + bucket = objectstore.bucket.Bucket('image_bucket') + + # upload an image manifest/parts + bundle_path = os.path.join(os.path.dirname(__file__), 'bundle') + for path in glob.glob(bundle_path + '/*'): + bucket[os.path.basename(path)] = open(path, 'rb').read() + + # register an image + objectstore.image.Image.create('i-testing', 'image_bucket/1mb.manifest.xml', image_user) + + # verify image + my_img = objectstore.image.Image('i-testing') + result_image_file = os.path.join(my_img.path, 'image') + self.assertEqual(os.stat(result_image_file).st_size, 1048576) + + sha = hashlib.sha1(open(result_image_file).read()).hexdigest() + self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3') + + # verify image permissions + try: + self.um.create_user('new_user') + except: pass + new_user = self.um.get_user('new_user') + self.assert_(my_img.is_authorized(new_user) == False) + + self.um.delete_user('new_user') + self.um.delete_user('image_creator') + +# class ApiObjectStoreTestCase(test.BaseTestCase): +# def setUp(self): +# super(ApiObjectStoreTestCase, self).setUp() +# FLAGS.fake_users = True +# FLAGS.buckets_path = os.path.join(tempdir, 'buckets') +# FLAGS.images_path = os.path.join(tempdir, 'images') +# FLAGS.ca_path = os.path.join(os.path.dirname(__file__), 'CA') +# +# self.users = users.UserManager.instance() +# self.app = handler.Application(self.users) +# +# self.host = '127.0.0.1' +# +# self.conn = boto.s3.connection.S3Connection( +# aws_access_key_id=user.access, +# aws_secret_access_key=user.secret, +# is_secure=False, +# calling_format=boto.s3.connection.OrdinaryCallingFormat(), +# port=FLAGS.s3_port, +# host=FLAGS.s3_host) +# +# self.mox.StubOutWithMock(self.ec2, 'new_http_connection') +# +# def tearDown(self): +# FLAGS.Reset() +# super(ApiObjectStoreTestCase, self).tearDown() +# +# def test_describe_instances(self): +# self.expect_http() +# self.mox.ReplayAll() +# +# self.assertEqual(self.ec2.get_all_instances(), []) diff --git a/nova/tests/real_flags.py b/nova/tests/real_flags.py new file mode 100644 index 000000000000..68fe8dc5b67d --- /dev/null +++ b/nova/tests/real_flags.py @@ -0,0 +1,24 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nova import flags + +FLAGS = flags.FLAGS + +FLAGS.fake_libvirt = False +FLAGS.fake_storage = False +FLAGS.fake_rabbit = False +FLAGS.fake_network = False +FLAGS.fake_users = False +FLAGS.verbose = False diff --git a/nova/tests/storage_unittest.py b/nova/tests/storage_unittest.py new file mode 100644 index 000000000000..31966d2d56d1 --- /dev/null +++ b/nova/tests/storage_unittest.py @@ -0,0 +1,86 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import StringIO +import time +import unittest +from xml.etree import ElementTree + +from nova import vendor +import mox +from tornado import ioloop +from twisted.internet import defer + +from nova import exception +from nova import flags +from nova import test +from nova.compute import node +from nova.volume import storage + + +FLAGS = flags.FLAGS + + +class StorageTestCase(test.TrialTestCase): + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(StorageTestCase, self).setUp() + self.mynode = node.Node() + self.mystorage = None + self.flags(fake_libvirt=True, + fake_storage=True, + redis_db=8) + if FLAGS.fake_storage: + self.mystorage = storage.FakeBlockStore() + else: + self.mystorage = storage.BlockStore() + + @test.skip_if_fake + def test_run_create_volume(self): + vol_size = '0' + user_id = 'fake' + volume_id = self.mystorage.create_volume(vol_size, user_id) + # rv = self.mystorage.describe_volumes() + + # Volumes have to be sorted by timestamp in order to work here... + # TODO(termie): get_volume returns differently than create_volume + self.assertEqual(volume_id, + self.mystorage.get_volume(volume_id)['volume_id']) + + rv = self.mystorage.delete_volume(volume_id) + self.assertRaises(exception.Error, + self.mystorage.get_volume, + volume_id) + + @test.skip_if_fake + def test_run_attach_detach_volume(self): + # Create one volume and one node to test with + instance_id = "storage-test" + # TODO(joshua) - Redo this test, can't make fake instances this way any more + # rv = self.mynode.run_instance(instance_id) + vol_size = "5" + user_id = "fake" + volume_id = self.mystorage.create_volume(vol_size, user_id) + rv = self.mystorage.attach_volume(volume_id, + instance_id, + "/dev/sdf") + volume_obj = self.mystorage.get_volume(volume_id) + self.assertEqual(volume_obj['status'], "attached") + # TODO(???): assert that it's attached to the right instance + + rv = self.mystorage.detach_volume(volume_id) + volume_obj = self.mystorage.get_volume(volume_id) + self.assertEqual(volume_obj['status'], "available") diff --git a/nova/tests/users_unittest.py b/nova/tests/users_unittest.py new file mode 100644 index 000000000000..70f508b35158 --- /dev/null +++ b/nova/tests/users_unittest.py @@ -0,0 +1,137 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import unittest + +from nova import vendor +from M2Crypto import BIO +from M2Crypto import RSA +from M2Crypto import X509 + +from nova import crypto +from nova import flags +from nova import test +from nova import utils +from nova.auth import users +from nova.endpoint import cloud + + +FLAGS = flags.FLAGS + + +class UserTestCase(test.BaseTestCase): + def setUp(self): + super(UserTestCase, self).setUp() + self.flags(fake_libvirt=True, + fake_storage=True, + redis_db=8) + self.users = users.UserManager.instance() + + def test_001_can_create_user(self): + self.users.create_user('test1', 'access', 'secret') + + def test_002_can_get_user(self): + user = self.users.get_user('test1') + + def test_003_can_retreive_properties(self): + user = self.users.get_user('test1') + self.assertEqual('test1', user.id) + self.assertEqual('access', user.access) + self.assertEqual('secret', user.secret) + + def test_004_signature_is_valid(self): + #self.assertTrue(self.users.authenticate( **boto.generate_url ... ? ? ? )) + pass + #raise NotImplementedError + + def test_005_can_get_credentials(self): + return + credentials = self.users.get_user('test1').get_credentials() + self.assertEqual(credentials, + 'export EC2_ACCESS_KEY="access"\n' + + 'export EC2_SECRET_KEY="secret"\n' + + 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + + 'export S3_URL="http://127.0.0.1:3333/"\n' + + 'export EC2_USER_ID="test1"\n') + + def test_006_test_key_storage(self): + user = self.users.get_user('test1') + user.create_key_pair('public', 'key', 'fingerprint') + key = user.get_key_pair('public') + self.assertEqual('key', key.public_key) + self.assertEqual('fingerprint', key.fingerprint) + + def test_007_test_key_generation(self): + user = self.users.get_user('test1') + private_key, fingerprint = user.generate_key_pair('public2') + key = RSA.load_key_string(private_key, callback=lambda: None) + bio = BIO.MemoryBuffer() + public_key = user.get_key_pair('public2').public_key + key.save_pub_key_bio(bio) + converted = crypto.ssl_pub_to_ssh_pub(bio.read()) + # assert key fields are equal + print converted + self.assertEqual(public_key.split(" ")[1].strip(), + converted.split(" ")[1].strip()) + + def test_008_can_list_key_pairs(self): + keys = self.users.get_user('test1').get_key_pairs() + self.assertTrue(filter(lambda k: k.name == 'public', keys)) + self.assertTrue(filter(lambda k: k.name == 'public2', keys)) + + def test_009_can_delete_key_pair(self): + self.users.get_user('test1').delete_key_pair('public') + keys = self.users.get_user('test1').get_key_pairs() + self.assertFalse(filter(lambda k: k.name == 'public', keys)) + + def test_010_can_list_users(self): + users = self.users.get_users() + self.assertTrue(filter(lambda u: u.id == 'test1', users)) + + def test_011_can_generate_x509(self): + # MUST HAVE RUN CLOUD SETUP BY NOW + self.cloud = cloud.CloudController() + self.cloud.setup() + private_key, signed_cert_string = self.users.get_user('test1').generate_x509_cert() + logging.debug(signed_cert_string) + + # Need to verify that it's signed by the right intermediate CA + full_chain = crypto.fetch_ca(username='test1', chain=True) + int_cert = crypto.fetch_ca(username='test1', chain=False) + cloud_cert = crypto.fetch_ca() + logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + signed_cert = X509.load_cert_string(signed_cert_string) + chain_cert = X509.load_cert_string(full_chain) + int_cert = X509.load_cert_string(int_cert) + cloud_cert = X509.load_cert_string(cloud_cert) + self.assertTrue(signed_cert.verify(chain_cert.get_pubkey())) + self.assertTrue(signed_cert.verify(int_cert.get_pubkey())) + + if not FLAGS.use_intermediate_ca: + self.assertTrue(signed_cert.verify(cloud_cert.get_pubkey())) + else: + self.assertFalse(signed_cert.verify(cloud_cert.get_pubkey())) + + def test_012_can_delete_user(self): + self.users.delete_user('test1') + users = self.users.get_users() + if users != None: + self.assertFalse(filter(lambda u: u.id == 'test1', users)) + + +if __name__ == "__main__": + # TODO: Implement use_fake as an option + unittest.main() diff --git a/nova/twistd.py b/nova/twistd.py new file mode 100644 index 000000000000..ea3c9c1689ac --- /dev/null +++ b/nova/twistd.py @@ -0,0 +1,249 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Twisted daemon helpers, specifically to parse out gFlags from twisted flags, +manage pid files and support syslogging. +""" + +import logging +import os +import signal +import sys +import time +import UserDict +import logging.handlers + +from nova import vendor +from twisted.scripts import twistd +from twisted.python import log +from twisted.python import reflect +from twisted.python import runtime +from twisted.python import usage + +from nova import flags + +if runtime.platformType == "win32": + from twisted.scripts._twistw import ServerOptions +else: + from twisted.scripts._twistd_unix import ServerOptions + + +FLAGS = flags.FLAGS + + +class TwistdServerOptions(ServerOptions): + def parseArgs(self, *args): + return + + +def WrapTwistedOptions(wrapped): + class TwistedOptionsToFlags(wrapped): + subCommands = None + def __init__(self): + # NOTE(termie): _data exists because Twisted stuff expects + # to be able to set arbitrary things that are + # not actual flags + self._data = {} + self._flagHandlers = {} + self._paramHandlers = {} + + # Absorb the twistd flags into our FLAGS + self._absorbFlags() + self._absorbParameters() + self._absorbHandlers() + + super(TwistedOptionsToFlags, self).__init__() + + def _absorbFlags(self): + twistd_flags = [] + reflect.accumulateClassList(self.__class__, 'optFlags', twistd_flags) + for flag in twistd_flags: + key = flag[0].replace('-', '_') + flags.DEFINE_boolean(key, None, str(flag[-1])) + + def _absorbParameters(self): + twistd_params = [] + reflect.accumulateClassList(self.__class__, 'optParameters', twistd_params) + for param in twistd_params: + key = param[0].replace('-', '_') + flags.DEFINE_string(key, param[2], str(param[-1])) + + def _absorbHandlers(self): + twistd_handlers = {} + reflect.addMethodNamesToDict(self.__class__, twistd_handlers, "opt_") + + # NOTE(termie): Much of the following is derived/copied from + # twisted.python.usage with the express purpose of + # providing compatibility + for name in twistd_handlers.keys(): + method = getattr(self, 'opt_'+name) + + takesArg = not usage.flagFunction(method, name) + doc = getattr(method, '__doc__', None) + if not doc: + doc = 'undocumented' + + if not takesArg: + if name not in FLAGS: + flags.DEFINE_boolean(name, None, doc) + self._flagHandlers[name] = method + else: + if name not in FLAGS: + flags.DEFINE_string(name, None, doc) + self._paramHandlers[name] = method + + + def _doHandlers(self): + for flag, handler in self._flagHandlers.iteritems(): + if self[flag]: + handler() + for param, handler in self._paramHandlers.iteritems(): + if self[param] is not None: + handler(self[param]) + + def __str__(self): + return str(FLAGS) + + def parseOptions(self, options=None): + if options is None: + options = sys.argv + else: + options.insert(0, '') + + args = FLAGS(options) + argv = args[1:] + # ignore subcommands + + try: + self.parseArgs(*argv) + except TypeError: + raise usage.UsageError("Wrong number of arguments.") + + self.postOptions() + return args + + def parseArgs(self, *args): + # TODO(termie): figure out a decent way of dealing with args + #return + super(TwistedOptionsToFlags, self).parseArgs(*args) + + def postOptions(self): + self._doHandlers() + + super(TwistedOptionsToFlags, self).postOptions() + + def __getitem__(self, key): + key = key.replace('-', '_') + try: + return getattr(FLAGS, key) + except (AttributeError, KeyError): + return self._data[key] + + def __setitem__(self, key, value): + key = key.replace('-', '_') + try: + return setattr(FLAGS, key, value) + except (AttributeError, KeyError): + self._data[key] = value + + return TwistedOptionsToFlags + + +def stop(pidfile): + """ + Stop the daemon + """ + # Get the pid from the pidfile + try: + pf = file(pidfile,'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + pid = None + + if not pid: + message = "pidfile %s does not exist. Daemon not running?\n" + sys.stderr.write(message % pidfile) + return # not an error in a restart + + # Try killing the daemon process + try: + while 1: + os.kill(pid, signal.SIGKILL) + time.sleep(0.1) + except OSError, err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(pidfile): + os.remove(pidfile) + else: + print str(err) + sys.exit(1) + + +def serve(filename): + logging.debug("Serving %s" % filename) + name = os.path.basename(filename) + OptionsClass = WrapTwistedOptions(TwistdServerOptions) + options = OptionsClass() + argv = options.parseOptions() + logging.getLogger('amqplib').setLevel(logging.WARN) + FLAGS.python = filename + FLAGS.no_save = True + if not FLAGS.pidfile: + FLAGS.pidfile = '%s.pid' % name + elif FLAGS.pidfile.endswith('twistd.pid'): + FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) + + if not FLAGS.logfile: + FLAGS.logfile = '%s.log' % name + + action = 'start' + if len(argv) > 1: + action = argv.pop() + + if action == 'stop': + stop(FLAGS.pidfile) + sys.exit() + elif action == 'restart': + stop(FLAGS.pidfile) + elif action == 'start': + pass + else: + print 'usage: %s [options] [start|stop|restart]' % argv[0] + sys.exit(1) + + formatter = logging.Formatter( + name + '(%(name)s): %(levelname)s %(message)s') + handler = logging.StreamHandler(log.StdioOnnaStick()) + handler.setFormatter(formatter) + logging.getLogger().addHandler(handler) + + if FLAGS.verbose: + logging.getLogger().setLevel(logging.DEBUG) + else: + logging.getLogger().setLevel(logging.WARNING) + + if FLAGS.syslog: + syslog = logging.handlers.SysLogHandler(address='/dev/log') + syslog.setFormatter(formatter) + logging.getLogger().addHandler(syslog) + + logging.debug("Full set of FLAGS:") + for flag in FLAGS: + logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + + twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py new file mode 100644 index 000000000000..0cfa2cf6c71d --- /dev/null +++ b/nova/utils.py @@ -0,0 +1,96 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +System-level utilities and helper functions. +""" + +import logging +import socket +import sys +import os.path +import inspect +import subprocess +import random + +def fetchfile(url, target): + logging.debug("Fetching %s" % url) +# c = pycurl.Curl() +# fp = open(target, "wb") +# c.setopt(c.URL, url) +# c.setopt(c.WRITEDATA, fp) +# c.perform() +# c.close() +# fp.close() + execute("curl %s -o %s" % (url, target)) + +def execute(cmd, input=None): + #logging.debug("Running %s" % (cmd)) + obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + result = None + if input != None: + result = obj.communicate(input) + else: + result = obj.communicate() + obj.stdin.close() + if obj.returncode: + logging.debug("Result was %s" % (obj.returncode)) + return result + +def abspath(s): + return os.path.join(os.path.dirname(__file__), s) + +def default_flagfile(filename='nova.conf'): + for arg in sys.argv: + if arg.find('flagfile') != -1: + break + else: + if not os.path.isabs(filename): + # turn relative filename into an absolute path + script_dir = os.path.dirname(inspect.stack()[-1][1]) + filename = os.path.abspath(os.path.join(script_dir, filename)) + if os.path.exists(filename): + sys.argv = sys.argv[:1] + ['--flagfile=%s' % filename] + sys.argv[1:] + +def debug(arg): + logging.debug('debug in callback: %s', arg) + return arg + +def runthis(prompt, cmd): + logging.debug("Running %s" % (cmd)) + logging.debug(prompt % (subprocess.call(cmd.split(" ")))) + + +def generate_uid(topic, size=8): + return '%s-%s' % (topic, ''.join([random.choice('01234567890abcdefghijklmnopqrstuvwxyz') for x in xrange(size)])) + +def generate_mac(): + mac = [0x00, 0x16, 0x3e, random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), random.randint(0x00, 0xff) + ] + return ':'.join(map(lambda x: "%02x" % x, mac)) + +def last_octet(address): + return int(address.split(".")[-1]) + +def get_my_ip(): + ''' returns the actual ip of the local machine. + ''' + csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + csock.connect(('www.google.com', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr diff --git a/nova/vendor.py b/nova/vendor.py new file mode 100644 index 000000000000..758adeb3cc3e --- /dev/null +++ b/nova/vendor.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Get our vendor folders into the system path. +""" + +import os +import sys + +# abspath/__file__/../vendor +VENDOR_PATH = os.path.abspath( + os.path.join(os.path.dirname(os.path.dirname(__file__)), 'vendor')) + +if not os.path.exists(VENDOR_PATH): + print 'warning: no vendor libraries included' +else: + paths = [VENDOR_PATH, + os.path.join(VENDOR_PATH, 'pymox'), + os.path.join(VENDOR_PATH, 'tornado'), + os.path.join(VENDOR_PATH, 'python-gflags'), + os.path.join(VENDOR_PATH, 'python-daemon'), + os.path.join(VENDOR_PATH, 'lockfile'), + os.path.join(VENDOR_PATH, 'boto'), + os.path.join(VENDOR_PATH, 'Twisted-10.0.0'), + os.path.join(VENDOR_PATH, 'redis-py'), + ] + + for p in paths: + if p not in sys.path: + sys.path.insert(0, p) diff --git a/nova/volume/__init__.py b/nova/volume/__init__.py new file mode 100644 index 000000000000..1c569f383060 --- /dev/null +++ b/nova/volume/__init__.py @@ -0,0 +1,27 @@ +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +:mod:`nova.volume` -- Nova Block Storage +===================================================== + +.. automodule:: nova.volume + :platform: Unix +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" \ No newline at end of file diff --git a/nova/volume/storage.py b/nova/volume/storage.py new file mode 100644 index 000000000000..823e1390a55e --- /dev/null +++ b/nova/volume/storage.py @@ -0,0 +1,250 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Nova Storage manages creating, attaching, detaching, and +destroying persistent storage volumes, ala EBS. +Currently uses Ata-over-Ethernet. +""" + +import logging +import random +import socket +import subprocess +import time + +from nova import vendor +from tornado import ioloop +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import rpc +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('storage_dev', '/dev/sdb', + 'Physical device to use for volumes') +flags.DEFINE_string('volume_group', 'nova-volumes', + 'Name for the VG that will contain exported volumes') +flags.DEFINE_string('aoe_eth_dev', 'eth0', + 'Which device to export the volumes on') +flags.DEFINE_string('storage_name', + socket.gethostname(), + 'name of this node') +flags.DEFINE_integer('shelf_id', + utils.last_octet(utils.get_my_ip()), + 'AoE shelf_id for this node') +flags.DEFINE_string('storage_availability_zone', + 'nova', + 'availability zone of this node') +flags.DEFINE_boolean('fake_storage', False, + 'Should we make real storage volumes to attach?') + +class BlockStore(object): + def __init__(self): + super(BlockStore, self).__init__() + self.volume_class = Volume + if FLAGS.fake_storage: + self.volume_class = FakeVolume + self._init_volume_group() + self.keeper = datastore.Keeper('instances') + + def report_state(self): + #TODO: aggregate the state of the system + pass + + def create_volume(self, size, user_id): + """ + Creates an exported volume (fake or real), + restarts exports to make it available. + Volume at this point has size, owner, and zone. + """ + logging.debug("Creating volume of size: %s" % (size)) + vol = self.volume_class.create(size, user_id) + self.keeper.set_add('volumes', vol['volume_id']) + self._restart_exports() + return vol['volume_id'] + + def get_volume(self, volume_id): + """ Returns a redis-backed volume object """ + if volume_id in self.keeper['volumes']: + return self.volume_class(volume_id=volume_id) + raise exception.Error("Volume does not exist") + + def by_project(self, project): + """ returns a list of volume objects for a project """ + # TODO(termie): I don't understand why this is doing a range + #for volume_id in datastore.Redis.instance().lrange("project:%s:volumes" % + #project, 0, -1): + for volume_id in datastore['project:%s:volumes' % project]: + yield self.volume_class(volume_id=volume_id) + + def by_node(self, node_id): + """ returns a list of volumes for a node """ + for volume in self.all: + if volume['node_name'] == node_id: + yield volume + + @property + def all(self): + """ returns a list of all volumes """ + for volume_id in self.keeper['volumes']: + yield self.volume_class(volume_id=volume_id) + + + def delete_volume(self, volume_id): + logging.debug("Deleting volume with id of: %s" % (volume_id)) + vol = self.get_volume(volume_id) + vol.destroy() + self.keeper.set_remove('volumes', vol['volume_id']) + return True + + def attach_volume(self, volume_id, instance_id, mountpoint): + self.volume_class(volume_id).attach(instance_id, mountpoint) + + def detach_volume(self, volume_id): + self.volume_class(volume_id).detach() + + def loop_volumes(self): + volumes = subprocess.Popen(["sudo", "lvs", "--noheadings"], stdout=subprocess.PIPE).communicate()[0].split("\n") + for lv in volumes: + if len(lv.split(" ")) > 1: + yield lv.split(" ")[2] + + def _restart_exports(self): + if FLAGS.fake_storage: + return + utils.runthis("Setting exports to auto: %s", "sudo vblade-persist auto all") + utils.runthis("Starting all exports: %s", "sudo vblade-persist start all") + utils.runthis("Discovering AOE devices: %s", "sudo aoe-discover") + + def _init_volume_group(self): + if FLAGS.fake_storage: + return + utils.runthis("PVCreate returned: %s", "sudo pvcreate %s" % (FLAGS.storage_dev)) + utils.runthis("VGCreate returned: %s", "sudo vgcreate %s %s" % (FLAGS.volume_group, FLAGS.storage_dev)) + + +class FakeBlockStore(BlockStore): + def __init__(self): + super(FakeBlockStore, self).__init__() + + def loop_volumes(self): + return self.volumes + + def _init_volume_group(self): + pass + + def _restart_exports(self): + pass + + +class Volume(datastore.RedisModel): + + object_type = 'volume' + + def __init__(self, volume_id=None): + self.volume_id = volume_id + super(Volume, self).__init__(object_id=volume_id) + + @classmethod + def create(cls, size, user_id): + volume_id = utils.generate_uid('vol') + vol = cls(volume_id=volume_id) + #TODO(vish): do we really need to store the volume id as .object_id .volume_id and ['volume_id']? + vol['volume_id'] = volume_id + vol['node_name'] = FLAGS.storage_name + vol['size'] = size + vol['user_id'] = user_id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol["instance_id"] = 'none' + vol["mountpoint"] = 'none' + vol["create_time"] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + vol["attachment_set"] = '' + vol.create_lv() + vol.setup_export() + vol['status'] = "available" + vol.save() + return vol + + def attach(self, instance_id, mountpoint): + self['instance_id'] = instance_id + self['mountpoint'] = mountpoint + self['status'] = "attached" + self.save() + + def detach(self): + self['instance_id'] = None + self['mountpoint'] = None + self['status'] = "available" + self.save() + + def destroy(self): + try: + self._remove_export() + except: + pass + self._delete_lv() + super(Volume, self).destroy() + + def create_lv(self): + if str(self['size']) == '0': + sizestr = '100M' + else: + sizestr = '%sG' % self['size'] + utils.runthis("Creating LV: %s", "sudo lvcreate -L %s -n %s %s" % (sizestr, self.volume_id, FLAGS.volume_group)) + + def _delete_lv(self): + utils.runthis("Removing LV: %s", "sudo lvremove -f %s/%s" % (FLAGS.volume_group, self.volume_id)) + + def setup_export(self): + (shelf_id, blade_id) = get_next_aoe_numbers() + self['aoe_device'] = "e%s.%s" % (shelf_id, blade_id) + self.save() + utils.runthis("Creating AOE export: %s", + "sudo vblade-persist setup %s %s %s /dev/%s/%s" % + (shelf_id, blade_id, FLAGS.aoe_eth_dev, FLAGS.volume_group, self.volume_id)) + + def _remove_export(self): + utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist stop %s %s" % (self.aoe_device[1], self.aoe_device[3])) + utils.runthis("Destroyed AOE export: %s", "sudo vblade-persist destroy %s %s" % (self.aoe_device[1], self.aoe_device[3])) + + +class FakeVolume(Volume): + def create_lv(self): + pass + + def setup_export(self): + # TODO(???): This may not be good enough? + self['aoe_device'] = 'e%s.%s' % (FLAGS.shelf_id, + ''.join([random.choice('0123456789') for x in xrange(3)])) + self.save() + + def _remove_export(self): + pass + + def _delete_lv(self): + pass + +def get_next_aoe_numbers(): + aoes = glob.glob("/var/lib/vblade-persist/vblades/e*") + aoes.extend(['e0.0']) + blade_id = int(max([int(a.split('.')[1]) for a in aoes])) + 1 + logging.debug("Next blade_id is %s" % (blade_id)) + shelf_id = FLAGS.shelf_id + return (shelf_id, blade_id) diff --git a/run_tests.py b/run_tests.py new file mode 100644 index 000000000000..535a0464a5e7 --- /dev/null +++ b/run_tests.py @@ -0,0 +1,99 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This is our basic test running framework based on Twisted's Trial. + +Usage Examples: + + # to run all the tests + python run_tests.py + + # to run a specific test suite imported here + python run_tests.py NodeConnectionTestCase + + # to run a specific test imported here + python run_tests.py NodeConnectionTestCase.test_reboot + + # to run some test suites elsewhere + python run_tests.py nova.tests.node_unittest + python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase + +Due to our use of multiprocessing it we frequently get some ignorable +'Interrupted system call' exceptions after test completion. + +""" +import __main__ +import sys + +from nova import vendor +from twisted.scripts import trial as trial_script + +from nova import flags +from nova import twistd + +from nova.tests.access_unittest import * +from nova.tests.api_unittest import * +from nova.tests.cloud_unittest import * +from nova.tests.keeper_unittest import * +from nova.tests.network_unittest import * +from nova.tests.node_unittest import * +from nova.tests.objectstore_unittest import * +from nova.tests.storage_unittest import * +from nova.tests.users_unittest import * +from nova.tests.datastore_unittest import * + + +FLAGS = flags.FLAGS + + +if __name__ == '__main__': + OptionsClass = twistd.WrapTwistedOptions(trial_script.Options) + config = OptionsClass() + argv = config.parseOptions() + + FLAGS.verbose = True + + # TODO(termie): these should make a call instead of doing work on import + if FLAGS.fake_tests: + from nova.tests.fake_flags import * + else: + from nova.tests.real_flags import * + + if len(argv) == 1 and len(config['tests']) == 0: + # If no tests were specified run the ones imported in this file + # NOTE(termie): "tests" is not a flag, just some Trial related stuff + config['tests'].update(['__main__']) + elif len(config['tests']): + # If we specified tests check first whether they are in __main__ + for arg in config['tests']: + key = arg.split('.')[0] + if hasattr(__main__, key): + config['tests'].remove(arg) + config['tests'].add('__main__.%s' % arg) + + trial_script._initialDebugSetup(config) + trialRunner = trial_script._makeRunner(config) + suite = trial_script._getSuite(config) + if config['until-failure']: + test_result = trialRunner.runUntilFailure(suite) + else: + test_result = trialRunner.run(suite) + if config.tracer: + sys.settrace(None) + results = config.tracer.results() + results.write_results(show_missing=1, summary=False, + coverdir=config.coverdir) + sys.exit(not test_result.wasSuccessful()) diff --git a/setup.py b/setup.py new file mode 100644 index 000000000000..a25ae0c8cded --- /dev/null +++ b/setup.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# Copyright [2010] [Anso Labs, LLC] +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import sys + +from setuptools import setup, find_packages + +srcdir = os.path.join(os.path.dirname(sys.argv[0]), 'src') + +setup(name='nova', + version='0.3.0', + description='None Other, Vaguely Awesome', + author='nova-core', + author_email='nova-core@googlegroups.com', + url='http://novacc.org/', + packages = find_packages(), + + ) diff --git a/vendor/IPy.py b/vendor/IPy.py new file mode 100644 index 000000000000..9fd80ece442c --- /dev/null +++ b/vendor/IPy.py @@ -0,0 +1,1304 @@ +""" +IPy - class and tools for handling of IPv4 and IPv6 addresses and networks. +See README file for learn how to use IPy. + +Further Information might be available at: +http://software.inl.fr/trac/trac.cgi/wiki/IPy +""" + +# $HeadURL: https://svn.inl.fr/inl-svn/src/tools/ipy/tags/IPy-0.70/IPy.py $ +# $Id: IPy.py 19309 2009-10-29 10:21:13Z haypo $ + +__rcsid__ = '$Id: IPy.py 19309 2009-10-29 10:21:13Z haypo $' +__version__ = '0.70' + +import types + +# Definition of the Ranges for IPv4 IPs +# this should include www.iana.org/assignments/ipv4-address-space +# and www.iana.org/assignments/multicast-addresses +IPv4ranges = { + '0': 'PUBLIC', # fall back + '00000000': 'PRIVATE', # 0/8 + '00001010': 'PRIVATE', # 10/8 + '01111111': 'PRIVATE', # 127.0/8 + '1': 'PUBLIC', # fall back + '1010100111111110': 'PRIVATE', # 169.254/16 + '101011000001': 'PRIVATE', # 172.16/12 + '1100000010101000': 'PRIVATE', # 192.168/16 + '11011111': 'RESERVED', # 223/8 + '111': 'RESERVED' # 224/3 + } + +# Definition of the Ranges for IPv6 IPs +# see also www.iana.org/assignments/ipv6-address-space, +# www.iana.org/assignments/ipv6-tla-assignments, +# www.iana.org/assignments/ipv6-multicast-addresses, +# www.iana.org/assignments/ipv6-anycast-addresses +IPv6ranges = { + '00000000' : 'RESERVED', # ::/8 + '00000001' : 'UNASSIGNED', # 100::/8 + '0000001' : 'NSAP', # 200::/7 + '0000010' : 'IPX', # 400::/7 + '0000011' : 'UNASSIGNED', # 600::/7 + '00001' : 'UNASSIGNED', # 800::/5 + '0001' : 'UNASSIGNED', # 1000::/4 + '0010000000000000' : 'RESERVED', # 2000::/16 Reserved + '0010000000000001' : 'ASSIGNABLE', # 2001::/16 Sub-TLA Assignments [RFC2450] + '00100000000000010000000': 'ASSIGNABLE IANA', # 2001:0000::/29 - 2001:01F8::/29 IANA + '00100000000000010000001': 'ASSIGNABLE APNIC', # 2001:0200::/29 - 2001:03F8::/29 APNIC + '00100000000000010000010': 'ASSIGNABLE ARIN', # 2001:0400::/29 - 2001:05F8::/29 ARIN + '00100000000000010000011': 'ASSIGNABLE RIPE', # 2001:0600::/29 - 2001:07F8::/29 RIPE NCC + '0010000000000010' : '6TO4', # 2002::/16 "6to4" [RFC3056] + '0011111111111110' : '6BONE', # 3FFE::/16 6bone Testing [RFC2471] + '0011111111111111' : 'RESERVED', # 3FFF::/16 Reserved + '010' : 'GLOBAL-UNICAST', # 4000::/3 + '011' : 'UNASSIGNED', # 6000::/3 + '100' : 'GEO-UNICAST', # 8000::/3 + '101' : 'UNASSIGNED', # A000::/3 + '110' : 'UNASSIGNED', # C000::/3 + '1110' : 'UNASSIGNED', # E000::/4 + '11110' : 'UNASSIGNED', # F000::/5 + '111110' : 'UNASSIGNED', # F800::/6 + '1111110' : 'UNASSIGNED', # FC00::/7 + '111111100' : 'UNASSIGNED', # FE00::/9 + '1111111010' : 'LINKLOCAL', # FE80::/10 + '1111111011' : 'SITELOCAL', # FEC0::/10 + '11111111' : 'MULTICAST', # FF00::/8 + '0' * 96 : 'IPV4COMP', # ::/96 + '0' * 80 + '1' * 16 : 'IPV4MAP', # ::FFFF:0:0/96 + '0' * 128 : 'UNSPECIFIED', # ::/128 + '0' * 127 + '1' : 'LOOPBACK' # ::1/128 + } + + +class IPint: + """Handling of IP addresses returning integers. + + Use class IP instead because some features are not implemented for + IPint.""" + + def __init__(self, data, ipversion=0, make_net=0): + """Create an instance of an IP object. + + Data can be a network specification or a single IP. IP + addresses can be specified in all forms understood by + parseAddress(). The size of a network can be specified as + + /prefixlen a.b.c.0/24 2001:658:22a:cafe::/64 + -lastIP a.b.c.0-a.b.c.255 2001:658:22a:cafe::-2001:658:22a:cafe:ffff:ffff:ffff:ffff + /decimal netmask a.b.c.d/255.255.255.0 not supported for IPv6 + + If no size specification is given a size of 1 address (/32 for + IPv4 and /128 for IPv6) is assumed. + + If make_net is True, an IP address will be transformed into the network + address by applying the specified netmask. + + >>> print IP('127.0.0.0/8') + 127.0.0.0/8 + >>> print IP('127.0.0.0/255.0.0.0') + 127.0.0.0/8 + >>> print IP('127.0.0.0-127.255.255.255') + 127.0.0.0/8 + >>> print IP('127.0.0.1/255.0.0.0', make_net=True) + 127.0.0.0/8 + + See module documentation for more examples. + """ + + # Print no Prefixlen for /32 and /128 + self.NoPrefixForSingleIp = 1 + + # Do we want prefix printed by default? see _printPrefix() + self.WantPrefixLen = None + + netbits = 0 + prefixlen = -1 + + # handling of non string values in constructor + if type(data) == types.IntType or type(data) == types.LongType: + self.ip = long(data) + if ipversion == 0: + if self.ip < 0x100000000L: + ipversion = 4 + else: + ipversion = 6 + if ipversion == 4: + prefixlen = 32 + elif ipversion == 6: + prefixlen = 128 + else: + raise ValueError, "only IPv4 and IPv6 supported" + self._ipversion = ipversion + self._prefixlen = prefixlen + # handle IP instance as an parameter + elif isinstance(data, IPint): + self._ipversion = data._ipversion + self._prefixlen = data._prefixlen + self.ip = data.ip + else: + # TODO: refactor me! + # splitting of a string into IP and prefixlen et. al. + x = data.split('-') + if len(x) == 2: + # a.b.c.0-a.b.c.255 specification ? + (ip, last) = x + (self.ip, parsedVersion) = parseAddress(ip) + if parsedVersion != 4: + raise ValueError, "first-last notation only allowed for IPv4" + (last, lastversion) = parseAddress(last) + if lastversion != 4: + raise ValueError, "last address should be IPv4, too" + if last < self.ip: + raise ValueError, "last address should be larger than first" + size = last - self.ip + netbits = _count1Bits(size) + # make sure the broadcast is the same as the last ip + # otherwise it will return /16 for something like: + # 192.168.0.0-192.168.191.255 + if IP('%s/%s' % (ip, 32-netbits)).broadcast().int() != last: + raise ValueError, \ + "the range %s is not on a network boundary." % data + elif len(x) == 1: + x = data.split('/') + # if no prefix is given use defaults + if len(x) == 1: + ip = x[0] + prefixlen = -1 + elif len(x) > 2: + raise ValueError, "only one '/' allowed in IP Address" + else: + (ip, prefixlen) = x + if prefixlen.find('.') != -1: + # check if the user might have used a netmask like + # a.b.c.d/255.255.255.0 + (netmask, vers) = parseAddress(prefixlen) + if vers != 4: + raise ValueError, "netmask must be IPv4" + prefixlen = _netmaskToPrefixlen(netmask) + elif len(x) > 2: + raise ValueError, "only one '-' allowed in IP Address" + else: + raise ValueError, "can't parse" + + (self.ip, parsedVersion) = parseAddress(ip) + if ipversion == 0: + ipversion = parsedVersion + if prefixlen == -1: + if ipversion == 4: + prefixlen = 32 - netbits + elif ipversion == 6: + prefixlen = 128 - netbits + else: + raise ValueError, "only IPv4 and IPv6 supported" + self._ipversion = ipversion + self._prefixlen = int(prefixlen) + + if make_net: + self.ip = self.ip & _prefixlenToNetmask(self._prefixlen, self._ipversion) + + if not _checkNetaddrWorksWithPrefixlen(self.ip, + self._prefixlen, self._ipversion): + raise ValueError, "%s has invalid prefix length (%s)" % (repr(self), self._prefixlen) + + def int(self): + """Return the first / base / network addess as an (long) integer. + + The same as IP[0]. + + >>> "%X" % IP('10.0.0.0/8').int() + 'A000000' + """ + return self.ip + + def version(self): + """Return the IP version of this Object. + + >>> IP('10.0.0.0/8').version() + 4 + >>> IP('::1').version() + 6 + """ + return self._ipversion + + def prefixlen(self): + """Returns Network Prefixlen. + + >>> IP('10.0.0.0/8').prefixlen() + 8 + """ + return self._prefixlen + + def net(self): + """ + Return the base (first) address of a network as an (long) integer. + """ + return self.int() + + def broadcast(self): + """ + Return the broadcast (last) address of a network as an (long) integer. + + The same as IP[-1].""" + return self.int() + self.len() - 1 + + def _printPrefix(self, want): + """Prints Prefixlen/Netmask. + + Not really. In fact it is our universal Netmask/Prefixlen printer. + This is considered an internal function. + + want == 0 / None don't return anything 1.2.3.0 + want == 1 /prefix 1.2.3.0/24 + want == 2 /netmask 1.2.3.0/255.255.255.0 + want == 3 -lastip 1.2.3.0-1.2.3.255 + """ + + if (self._ipversion == 4 and self._prefixlen == 32) or \ + (self._ipversion == 6 and self._prefixlen == 128): + if self.NoPrefixForSingleIp: + want = 0 + if want == None: + want = self.WantPrefixLen + if want == None: + want = 1 + if want: + if want == 2: + # this should work with IP and IPint + netmask = self.netmask() + if type(netmask) != types.IntType \ + and type(netmask) != types.LongType: + netmask = netmask.int() + return "/%s" % (intToIp(netmask, self._ipversion)) + elif want == 3: + return "-%s" % (intToIp(self.ip + self.len() - 1, self._ipversion)) + else: + # default + return "/%d" % (self._prefixlen) + else: + return '' + + # We have different flavours to convert to: + # strFullsize 127.0.0.1 2001:0658:022a:cafe:0200:c0ff:fe8d:08fa + # strNormal 127.0.0.1 2001:658:22a:cafe:200:c0ff:fe8d:08fa + # strCompressed 127.0.0.1 2001:658:22a:cafe::1 + # strHex 0x7F000001L 0x20010658022ACAFE0200C0FFFE8D08FA + # strDec 2130706433 42540616829182469433547974687817795834 + + def strBin(self, wantprefixlen = None): + """Return a string representation as a binary value. + + >>> print IP('127.0.0.1').strBin() + 01111111000000000000000000000001 + """ + + + if self._ipversion == 4: + bits = 32 + elif self._ipversion == 6: + bits = 128 + else: + raise ValueError, "only IPv4 and IPv6 supported" + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 0 + ret = _intToBin(self.ip) + return '0' * (bits - len(ret)) + ret + self._printPrefix(wantprefixlen) + + def strCompressed(self, wantprefixlen = None): + """Return a string representation in compressed format using '::' Notation. + + >>> IP('127.0.0.1').strCompressed() + '127.0.0.1' + >>> IP('2001:0658:022a:cafe:0200::1').strCompressed() + '2001:658:22a:cafe:200::1' + >>> IP('ffff:ffff:ffff:ffff:ffff:f:f:fffc/127').strCompressed() + 'ffff:ffff:ffff:ffff:ffff:f:f:fffc/127' + """ + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 1 + + if self._ipversion == 4: + return self.strFullsize(wantprefixlen) + else: + if self.ip >> 32 == 0xffff: + ipv4 = intToIp(self.ip & 0xffffffff, 4) + text = "::ffff:" + ipv4 + self._printPrefix(wantprefixlen) + return text + # find the longest sequence of '0' + hextets = [int(x, 16) for x in self.strFullsize(0).split(':')] + # every element of followingzeros will contain the number of zeros + # following the corresponding element of hextets + followingzeros = [0] * 8 + for i in range(len(hextets)): + followingzeros[i] = _countFollowingZeros(hextets[i:]) + # compressionpos is the position where we can start removing zeros + compressionpos = followingzeros.index(max(followingzeros)) + if max(followingzeros) > 1: + # genererate string with the longest number of zeros cut out + # now we need hextets as strings + hextets = [x for x in self.strNormal(0).split(':')] + while compressionpos < len(hextets) and hextets[compressionpos] == '0': + del(hextets[compressionpos]) + hextets.insert(compressionpos, '') + if compressionpos + 1 >= len(hextets): + hextets.append('') + if compressionpos == 0: + hextets = [''] + hextets + return ':'.join(hextets) + self._printPrefix(wantprefixlen) + else: + return self.strNormal(0) + self._printPrefix(wantprefixlen) + + def strNormal(self, wantprefixlen = None): + """Return a string representation in the usual format. + + >>> print IP('127.0.0.1').strNormal() + 127.0.0.1 + >>> print IP('2001:0658:022a:cafe:0200::1').strNormal() + 2001:658:22a:cafe:200:0:0:1 + """ + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 1 + + if self._ipversion == 4: + ret = self.strFullsize(0) + elif self._ipversion == 6: + ret = ':'.join([hex(x)[2:] for x in [int(x, 16) for x in self.strFullsize(0).split(':')]]) + else: + raise ValueError, "only IPv4 and IPv6 supported" + + + + return ret + self._printPrefix(wantprefixlen) + + def strFullsize(self, wantprefixlen = None): + """Return a string representation in the non-mangled format. + + >>> print IP('127.0.0.1').strFullsize() + 127.0.0.1 + >>> print IP('2001:0658:022a:cafe:0200::1').strFullsize() + 2001:0658:022a:cafe:0200:0000:0000:0001 + """ + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 1 + + return intToIp(self.ip, self._ipversion).lower() + self._printPrefix(wantprefixlen) + + def strHex(self, wantprefixlen = None): + """Return a string representation in hex format in lower case. + + >>> IP('127.0.0.1').strHex() + '0x7f000001' + >>> IP('2001:0658:022a:cafe:0200::1').strHex() + '0x20010658022acafe0200000000000001' + """ + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 0 + + x = hex(self.ip) + if x[-1] == 'L': + x = x[:-1] + return x.lower() + self._printPrefix(wantprefixlen) + + def strDec(self, wantprefixlen = None): + """Return a string representation in decimal format. + + >>> print IP('127.0.0.1').strDec() + 2130706433 + >>> print IP('2001:0658:022a:cafe:0200::1').strDec() + 42540616829182469433547762482097946625 + """ + + if self.WantPrefixLen == None and wantprefixlen == None: + wantprefixlen = 0 + + x = str(self.ip) + if x[-1] == 'L': + x = x[:-1] + return x + self._printPrefix(wantprefixlen) + + def iptype(self): + """Return a description of the IP type ('PRIVATE', 'RESERVERD', etc). + + >>> print IP('127.0.0.1').iptype() + PRIVATE + >>> print IP('192.168.1.1').iptype() + PRIVATE + >>> print IP('195.185.1.2').iptype() + PUBLIC + >>> print IP('::1').iptype() + LOOPBACK + >>> print IP('2001:0658:022a:cafe:0200::1').iptype() + ASSIGNABLE RIPE + + The type information for IPv6 is out of sync with reality. + """ + + # this could be greatly improved + + if self._ipversion == 4: + iprange = IPv4ranges + elif self._ipversion == 6: + iprange = IPv6ranges + else: + raise ValueError, "only IPv4 and IPv6 supported" + + bits = self.strBin() + for i in range(len(bits), 0, -1): + if iprange.has_key(bits[:i]): + return iprange[bits[:i]] + return "unknown" + + + def netmask(self): + """Return netmask as an integer. + + >>> "%X" % IP('195.185.0.0/16').netmask().int() + 'FFFF0000' + """ + + # TODO: unify with prefixlenToNetmask? + if self._ipversion == 4: + locallen = 32 - self._prefixlen + elif self._ipversion == 6: + locallen = 128 - self._prefixlen + else: + raise ValueError, "only IPv4 and IPv6 supported" + + return ((2L ** self._prefixlen) - 1) << locallen + + + def strNetmask(self): + """Return netmask as an string. Mostly useful for IPv6. + + >>> print IP('195.185.0.0/16').strNetmask() + 255.255.0.0 + >>> print IP('2001:0658:022a:cafe::0/64').strNetmask() + /64 + """ + + # TODO: unify with prefixlenToNetmask? + if self._ipversion == 4: + locallen = 32 - self._prefixlen + return intToIp(((2L ** self._prefixlen) - 1) << locallen, 4) + elif self._ipversion == 6: + locallen = 128 - self._prefixlen + return "/%d" % self._prefixlen + else: + raise ValueError, "only IPv4 and IPv6 supported" + + def len(self): + """Return the length of a subnet. + + >>> print IP('195.185.1.0/28').len() + 16 + >>> print IP('195.185.1.0/24').len() + 256 + """ + + if self._ipversion == 4: + locallen = 32 - self._prefixlen + elif self._ipversion == 6: + locallen = 128 - self._prefixlen + else: + raise ValueError, "only IPv4 and IPv6 supported" + + return 2L ** locallen + + + def __nonzero__(self): + """All IPy objects should evaluate to true in boolean context. + Ordinarily they do, but if handling a default route expressed as + 0.0.0.0/0, the __len__() of the object becomes 0, which is used + as the boolean value of the object. + """ + return 1 + + + def __len__(self): + """Return the length of a subnet. + + Called to implement the built-in function len(). + It breaks with IPv6 Networks. Anybody knows how to fix this.""" + + # Python < 2.2 has this silly restriction which breaks IPv6 + # how about Python >= 2.2 ... ouch - it persists! + + return int(self.len()) + + + def __getitem__(self, key): + """Called to implement evaluation of self[key]. + + >>> ip=IP('127.0.0.0/30') + >>> for x in ip: + ... print repr(x) + ... + IP('127.0.0.0') + IP('127.0.0.1') + IP('127.0.0.2') + IP('127.0.0.3') + >>> ip[2] + IP('127.0.0.2') + >>> ip[-1] + IP('127.0.0.3') + """ + + if type(key) != types.IntType and type(key) != types.LongType: + raise TypeError + if key < 0: + if abs(key) <= self.len(): + key = self.len() - abs(key) + else: + raise IndexError + else: + if key >= self.len(): + raise IndexError + + return self.ip + long(key) + + + + def __contains__(self, item): + """Called to implement membership test operators. + + Should return true if item is in self, false otherwise. Item + can be other IP-objects, strings or ints. + + >>> IP('195.185.1.1').strHex() + '0xc3b90101' + >>> 0xC3B90101L in IP('195.185.1.0/24') + 1 + >>> '127.0.0.1' in IP('127.0.0.0/24') + 1 + >>> IP('127.0.0.0/24') in IP('127.0.0.0/25') + 0 + """ + + item = IP(item) + if item.ip >= self.ip and item.ip < self.ip + self.len() - item.len() + 1: + return 1 + else: + return 0 + + + def overlaps(self, item): + """Check if two IP address ranges overlap. + + Returns 0 if the two ranges don't overlap, 1 if the given + range overlaps at the end and -1 if it does at the beginning. + + >>> IP('192.168.0.0/23').overlaps('192.168.1.0/24') + 1 + >>> IP('192.168.0.0/23').overlaps('192.168.1.255') + 1 + >>> IP('192.168.0.0/23').overlaps('192.168.2.0') + 0 + >>> IP('192.168.1.0/24').overlaps('192.168.0.0/23') + -1 + """ + + item = IP(item) + if item.ip >= self.ip and item.ip < self.ip + self.len(): + return 1 + elif self.ip >= item.ip and self.ip < item.ip + item.len(): + return -1 + else: + return 0 + + + def __str__(self): + """Dispatch to the prefered String Representation. + + Used to implement str(IP).""" + + return self.strCompressed() + + + def __repr__(self): + """Print a representation of the Object. + + Used to implement repr(IP). Returns a string which evaluates + to an identical Object (without the wantprefixlen stuff - see + module docstring. + + >>> print repr(IP('10.0.0.0/24')) + IP('10.0.0.0/24') + """ + + return("IPint('%s')" % (self.strCompressed(1))) + + + def __cmp__(self, other): + """Called by comparison operations. + + Should return a negative integer if self < other, zero if self + == other, a positive integer if self > other. + + Networks with different prefixlen are considered non-equal. + Networks with the same prefixlen and differing addresses are + considered non equal but are compared by their base address + integer value to aid sorting of IP objects. + + The version of Objects is not put into consideration. + + >>> IP('10.0.0.0/24') > IP('10.0.0.0') + 1 + >>> IP('10.0.0.0/24') < IP('10.0.0.0') + 0 + >>> IP('10.0.0.0/24') < IP('12.0.0.0/24') + 1 + >>> IP('10.0.0.0/24') > IP('12.0.0.0/24') + 0 + + """ + + # Im not really sure if this is "the right thing to do" + if self._prefixlen < other.prefixlen(): + return (other.prefixlen() - self._prefixlen) + elif self._prefixlen > other.prefixlen(): + + # Fixed bySamuel Krempp : + + # The bug is quite obvious really (as 99% bugs are once + # spotted, isn't it ? ;-) Because of precedence of + # multiplication by -1 over the substraction, prefixlen + # differences were causing the __cmp__ function to always + # return positive numbers, thus the function was failing + # the basic assumptions for a __cmp__ function. + + # Namely we could have (a > b AND b > a), when the + # prefixlen of a and b are different. (eg let + # a=IP("1.0.0.0/24"); b=IP("2.0.0.0/16");) thus, anything + # could happen when launching a sort algorithm.. + # everything's in order with the trivial, attached patch. + + return (self._prefixlen - other.prefixlen()) * -1 + else: + if self.ip < other.ip: + return -1 + elif self.ip > other.ip: + return 1 + elif self._ipversion != other._ipversion: + # IP('0.0.0.0'), IP('::/0') + return cmp(self._ipversion, other._ipversion) + else: + return 0 + + + def __hash__(self): + """Called for the key object for dictionary operations, and by + the built-in function hash(). Should return a 32-bit integer + usable as a hash value for dictionary operations. The only + required property is that objects which compare equal have the + same hash value + + >>> IP('10.0.0.0/24').__hash__() + -167772185 + """ + + thehash = int(-1) + ip = self.ip + while ip > 0: + thehash = thehash ^ (ip & 0x7fffffff) + ip = ip >> 32 + thehash = thehash ^ self._prefixlen + return int(thehash) + + +class IP(IPint): + """Class for handling IP addresses and networks.""" + + def net(self): + """Return the base (first) address of a network as an IP object. + + The same as IP[0]. + + >>> IP('10.0.0.0/8').net() + IP('10.0.0.0') + """ + return IP(IPint.net(self), ipversion=self._ipversion) + + def broadcast(self): + """Return the broadcast (last) address of a network as an IP object. + + The same as IP[-1]. + + >>> IP('10.0.0.0/8').broadcast() + IP('10.255.255.255') + """ + return IP(IPint.broadcast(self)) + + def netmask(self): + """Return netmask as an IP object. + + >>> IP('10.0.0.0/8').netmask() + IP('255.0.0.0') + """ + return IP(IPint.netmask(self)) + + + def reverseNames(self): + """Return a list with values forming the reverse lookup. + + >>> IP('213.221.113.87/32').reverseNames() + ['87.113.221.213.in-addr.arpa.'] + >>> IP('213.221.112.224/30').reverseNames() + ['224.112.221.213.in-addr.arpa.', '225.112.221.213.in-addr.arpa.', '226.112.221.213.in-addr.arpa.', '227.112.221.213.in-addr.arpa.'] + >>> IP('127.0.0.0/24').reverseNames() + ['0.0.127.in-addr.arpa.'] + >>> IP('127.0.0.0/23').reverseNames() + ['0.0.127.in-addr.arpa.', '1.0.127.in-addr.arpa.'] + >>> IP('127.0.0.0/16').reverseNames() + ['0.127.in-addr.arpa.'] + >>> IP('127.0.0.0/15').reverseNames() + ['0.127.in-addr.arpa.', '1.127.in-addr.arpa.'] + >>> IP('128.0.0.0/8').reverseNames() + ['128.in-addr.arpa.'] + >>> IP('128.0.0.0/7').reverseNames() + ['128.in-addr.arpa.', '129.in-addr.arpa.'] + >>> IP('::1:2').reverseNames() + ['2.0.0.0.1.ip6.arpa.'] + """ + + if self._ipversion == 4: + ret = [] + # TODO: Refactor. Add support for IPint objects + if self.len() < 2**8: + for x in self: + ret.append(x.reverseName()) + elif self.len() < 2**16L: + for i in range(0, self.len(), 2**8): + ret.append(self[i].reverseName()[2:]) + elif self.len() < 2**24L: + for i in range(0, self.len(), 2**16): + ret.append(self[i].reverseName()[4:]) + else: + for i in range(0, self.len(), 2**24): + ret.append(self[i].reverseName()[6:]) + return ret + elif self._ipversion == 6: + s = hex(self.ip)[2:].lower() + if s[-1] == 'l': + s = s[:-1] + if self._prefixlen % 4 != 0: + raise NotImplementedError, "can't create IPv6 reverse names at sub nibble level" + s = list(s) + s.reverse() + s = '.'.join(s) + first_nibble_index = int(32 - (self._prefixlen / 4)) * 2 + return ["%s.ip6.arpa." % s[first_nibble_index:]] + else: + raise ValueError, "only IPv4 and IPv6 supported" + + + + def reverseName(self): + """Return the value for reverse lookup/PTR records as RFC 2317 look alike. + + RFC 2317 is an ugly hack which only works for sub-/24 e.g. not + for /23. Do not use it. Better set up a zone for every + address. See reverseName for a way to achieve that. + + >>> print IP('195.185.1.1').reverseName() + 1.1.185.195.in-addr.arpa. + >>> print IP('195.185.1.0/28').reverseName() + 0-15.1.185.195.in-addr.arpa. + >>> IP('::1:2').reverseName() + '2.0.0.0.1.ip6.arpa.' + """ + + if self._ipversion == 4: + s = self.strFullsize(0) + s = s.split('.') + s.reverse() + first_byte_index = int(4 - (self._prefixlen / 8)) + if self._prefixlen % 8 != 0: + nibblepart = "%s-%s" % (s[3-(self._prefixlen / 8)], intToIp(self.ip + self.len() - 1, 4).split('.')[-1]) + if nibblepart[-1] == 'l': + nibblepart = nibblepart[:-1] + nibblepart += '.' + else: + nibblepart = "" + + s = '.'.join(s[first_byte_index:]) + return "%s%s.in-addr.arpa." % (nibblepart, s) + + elif self._ipversion == 6: + s = hex(self.ip)[2:].lower() + if s[-1] == 'l': + s = s[:-1] + if self._prefixlen % 4 != 0: + nibblepart = "%s-%s" % (s[self._prefixlen:], hex(self.ip + self.len() - 1)[2:].lower()) + if nibblepart[-1] == 'l': + nibblepart = nibblepart[:-1] + nibblepart += '.' + else: + nibblepart = "" + s = list(s) + s.reverse() + s = '.'.join(s) + first_nibble_index = int(32 - (self._prefixlen / 4)) * 2 + return "%s%s.ip6.arpa." % (nibblepart, s[first_nibble_index:]) + else: + raise ValueError, "only IPv4 and IPv6 supported" + + def make_net(self, netmask): + """Transform a single IP address into a network specification by + applying the given netmask. + + Returns a new IP instance. + + >>> print IP('127.0.0.1').make_net('255.0.0.0') + 127.0.0.0/8 + """ + if '/' in str(netmask): + raise ValueError, "invalid netmask (%s)" % netmask + return IP('%s/%s' % (self, netmask), make_net=True) + + def __getitem__(self, key): + """Called to implement evaluation of self[key]. + + >>> ip=IP('127.0.0.0/30') + >>> for x in ip: + ... print str(x) + ... + 127.0.0.0 + 127.0.0.1 + 127.0.0.2 + 127.0.0.3 + >>> print str(ip[2]) + 127.0.0.2 + >>> print str(ip[-1]) + 127.0.0.3 + """ + return IP(IPint.__getitem__(self, key)) + + def __repr__(self): + """Print a representation of the Object. + + >>> IP('10.0.0.0/8') + IP('10.0.0.0/8') + """ + + return("IP('%s')" % (self.strCompressed(1))) + + def __add__(self, other): + """Emulate numeric objects through network aggregation""" + if self.prefixlen() != other.prefixlen(): + raise ValueError, "Only networks with the same prefixlen can be added." + if self.prefixlen < 1: + raise ValueError, "Networks with a prefixlen longer than /1 can't be added." + if self.version() != other.version(): + raise ValueError, "Only networks with the same IP version can be added." + if self > other: + # fixed by Skinny Puppy + return other.__add__(self) + else: + ret = IP(self.int()) + ret._prefixlen = self.prefixlen() - 1 + return ret + + +def _parseAddressIPv6(ipstr): + """ + Internal function used by parseAddress() to parse IPv6 address with ':'. + + >>> _parseAddressIPv6('::') + 0L + >>> _parseAddressIPv6('::1') + 1L + >>> _parseAddressIPv6('0:0:0:0:0:0:0:1') + 1L + >>> _parseAddressIPv6('0:0:0::0:0:1') + 1L + >>> _parseAddressIPv6('0:0:0:0:0:0:0:0') + 0L + >>> _parseAddressIPv6('0:0:0::0:0:0') + 0L + + >>> _parseAddressIPv6('FEDC:BA98:7654:3210:FEDC:BA98:7654:3210') + 338770000845734292534325025077361652240L + >>> _parseAddressIPv6('1080:0000:0000:0000:0008:0800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('1080:0:0:0:8:800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('1080:0::8:800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('1080::8:800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('FF01:0:0:0:0:0:0:43') + 338958331222012082418099330867817087043L + >>> _parseAddressIPv6('FF01:0:0::0:0:43') + 338958331222012082418099330867817087043L + >>> _parseAddressIPv6('FF01::43') + 338958331222012082418099330867817087043L + >>> _parseAddressIPv6('0:0:0:0:0:0:13.1.68.3') + 218186755L + >>> _parseAddressIPv6('::13.1.68.3') + 218186755L + >>> _parseAddressIPv6('0:0:0:0:0:FFFF:129.144.52.38') + 281472855454758L + >>> _parseAddressIPv6('::FFFF:129.144.52.38') + 281472855454758L + >>> _parseAddressIPv6('1080:0:0:0:8:800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('1080::8:800:200C:417A') + 21932261930451111902915077091070067066L + >>> _parseAddressIPv6('::1:2:3:4:5:6') + 1208962713947218704138246L + >>> _parseAddressIPv6('1:2:3:4:5:6::') + 5192455318486707404433266432802816L + """ + + # Split string into a list, example: + # '1080:200C::417A' => ['1080', '200C', '417A'] and fill_pos=2 + # and fill_pos is the position of '::' in the list + items = [] + index = 0 + fill_pos = None + while index < len(ipstr): + text = ipstr[index:] + if text.startswith("::"): + if fill_pos is not None: + # Invalid IPv6, eg. '1::2::' + raise ValueError("%r: Invalid IPv6 address: more than one '::'" % ipstr) + fill_pos = len(items) + index += 2 + continue + pos = text.find(':') + if pos == 0: + # Invalid IPv6, eg. '1::2:' + raise ValueError("%r: Invalid IPv6 address" % ipstr) + if pos != -1: + items.append(text[:pos]) + if text[pos:pos+2] == "::": + index += pos + else: + index += pos+1 + + if index == len(ipstr): + # Invalid IPv6, eg. '1::2:' + raise ValueError("%r: Invalid IPv6 address" % ipstr) + else: + items.append(text) + break + + if items and '.' in items[-1]: + # IPv6 ending with IPv4 like '::ffff:192.168.0.1' + if not (fill_pos <= len(items)-1): + # Invalid IPv6: 'ffff:192.168.0.1::' + raise ValueError("%r: Invalid IPv6 address: '::' after IPv4" % ipstr) + value = parseAddress(items[-1])[0] + items = items[:-1] + ["%04x" % (value >> 16), "%04x" % (value & 0xffff)] + + # Expand fill_pos to fill with '0' + # ['1','2'] with fill_pos=1 => ['1', '0', '0', '0', '0', '0', '0', '2'] + if fill_pos is not None: + diff = 8 - len(items) + if diff <= 0: + raise ValueError("%r: Invalid IPv6 address: '::' is not needed" % ipstr) + items = items[:fill_pos] + ['0']*diff + items[fill_pos:] + + # Here we have a list of 8 strings + if len(items) != 8: + # Invalid IPv6, eg. '1:2:3' + raise ValueError("%r: Invalid IPv6 address: should have 8 hextets" % ipstr) + + # Convert strings to long integer + value = 0L + index = 0 + for item in items: + try: + item = int(item, 16) + error = not(0 <= item <= 0xFFFF) + except ValueError: + error = True + if error: + raise ValueError("%r: Invalid IPv6 address: invalid hexlet %r" % (ipstr, item)) + value = (value << 16) + item + index += 1 + return value + +def parseAddress(ipstr): + """ + Parse a string and return the corresponding IP address (as integer) + and a guess of the IP version. + + Following address formats are recognized: + + >>> parseAddress('0x0123456789abcdef') # IPv4 if <= 0xffffffff else IPv6 + (81985529216486895L, 6) + >>> parseAddress('123.123.123.123') # IPv4 + (2071690107L, 4) + >>> parseAddress('123.123') # 0-padded IPv4 + (2071658496L, 4) + >>> parseAddress('1080:0000:0000:0000:0008:0800:200C:417A') + (21932261930451111902915077091070067066L, 6) + >>> parseAddress('1080:0:0:0:8:800:200C:417A') + (21932261930451111902915077091070067066L, 6) + >>> parseAddress('1080:0::8:800:200C:417A') + (21932261930451111902915077091070067066L, 6) + >>> parseAddress('::1') + (1L, 6) + >>> parseAddress('::') + (0L, 6) + >>> parseAddress('0:0:0:0:0:FFFF:129.144.52.38') + (281472855454758L, 6) + >>> parseAddress('::13.1.68.3') + (218186755L, 6) + >>> parseAddress('::FFFF:129.144.52.38') + (281472855454758L, 6) + """ + + if ipstr.startswith('0x'): + ret = long(ipstr[2:], 16) + if ret > 0xffffffffffffffffffffffffffffffffL: + raise ValueError, "%r: IP Address can't be bigger than 2^128" % (ipstr) + if ret < 0x100000000L: + return (ret, 4) + else: + return (ret, 6) + + if ipstr.find(':') != -1: + return (_parseAddressIPv6(ipstr), 6) + + elif len(ipstr) == 32: + # assume IPv6 in pure hexadecimal notation + return (long(ipstr, 16), 6) + + elif ipstr.find('.') != -1 or (len(ipstr) < 4 and int(ipstr) < 256): + # assume IPv4 ('127' gets interpreted as '127.0.0.0') + bytes = ipstr.split('.') + if len(bytes) > 4: + raise ValueError, "IPv4 Address with more than 4 bytes" + bytes += ['0'] * (4 - len(bytes)) + bytes = [long(x) for x in bytes] + for x in bytes: + if x > 255 or x < 0: + raise ValueError, "%r: single byte must be 0 <= byte < 256" % (ipstr) + return ((bytes[0] << 24) + (bytes[1] << 16) + (bytes[2] << 8) + bytes[3], 4) + + else: + # we try to interprete it as a decimal digit - + # this ony works for numbers > 255 ... others + # will be interpreted as IPv4 first byte + ret = long(ipstr, 10) + if ret > 0xffffffffffffffffffffffffffffffffL: + raise ValueError, "IP Address can't be bigger than 2^128" + if ret <= 0xffffffffL: + return (ret, 4) + else: + return (ret, 6) + + +def intToIp(ip, version): + """Transform an integer string into an IP address.""" + + # just to be sure and hoping for Python 2.22 + ip = long(ip) + + if ip < 0: + raise ValueError, "IPs can't be negative: %d" % (ip) + + ret = '' + if version == 4: + if ip > 0xffffffffL: + raise ValueError, "IPv4 Addresses can't be larger than 0xffffffff: %s" % (hex(ip)) + for l in range(4): + ret = str(ip & 0xffL) + '.' + ret + ip = ip >> 8 + ret = ret[:-1] + elif version == 6: + if ip > 0xffffffffffffffffffffffffffffffffL: + raise ValueError, "IPv6 Addresses can't be larger than 0xffffffffffffffffffffffffffffffff: %s" % (hex(ip)) + l = '0' * 32 + hex(ip)[2:-1] + for x in range(1, 33): + ret = l[-x] + ret + if x % 4 == 0: + ret = ':' + ret + ret = ret[1:] + else: + raise ValueError, "only IPv4 and IPv6 supported" + + return ret + +def _ipVersionToLen(version): + """Return number of bits in address for a certain IP version. + + >>> _ipVersionToLen(4) + 32 + >>> _ipVersionToLen(6) + 128 + >>> _ipVersionToLen(5) + Traceback (most recent call last): + File "", line 1, in ? + File "IPy.py", line 1076, in _ipVersionToLen + raise ValueError, "only IPv4 and IPv6 supported" + ValueError: only IPv4 and IPv6 supported + """ + + if version == 4: + return 32 + elif version == 6: + return 128 + else: + raise ValueError, "only IPv4 and IPv6 supported" + + +def _countFollowingZeros(l): + """Return number of elements containing 0 at the beginning of the list.""" + if len(l) == 0: + return 0 + elif l[0] != 0: + return 0 + else: + return 1 + _countFollowingZeros(l[1:]) + + +_BitTable = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', + '4': '0100', '5': '0101', '6': '0110', '7': '0111', + '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', + 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'} + +def _intToBin(val): + """Return the binary representation of an integer as string.""" + + if val < 0: + raise ValueError, "Only positive values allowed" + s = hex(val).lower() + ret = '' + if s[-1] == 'l': + s = s[:-1] + for x in s[2:]: + if __debug__: + if not _BitTable.has_key(x): + raise AssertionError, "hex() returned strange result" + ret += _BitTable[x] + # remove leading zeros + while ret[0] == '0' and len(ret) > 1: + ret = ret[1:] + return ret + +def _count1Bits(num): + """Find the highest bit set to 1 in an integer.""" + ret = 0 + while num > 0: + num = num >> 1 + ret += 1 + return ret + +def _count0Bits(num): + """Find the highest bit set to 0 in an integer.""" + + # this could be so easy if _count1Bits(~long(num)) would work as excepted + num = long(num) + if num < 0: + raise ValueError, "Only positive Numbers please: %s" % (num) + ret = 0 + while num > 0: + if num & 1 == 1: + break + num = num >> 1 + ret += 1 + return ret + + +def _checkPrefix(ip, prefixlen, version): + """Check the validity of a prefix + + Checks if the variant part of a prefix only has 0s, and the length is + correct. + + >>> _checkPrefix(0x7f000000L, 24, 4) + 1 + >>> _checkPrefix(0x7f000001L, 24, 4) + 0 + >>> repr(_checkPrefix(0x7f000001L, -1, 4)) + 'None' + >>> repr(_checkPrefix(0x7f000001L, 33, 4)) + 'None' + """ + + # TODO: unify this v4/v6/invalid code in a function + bits = _ipVersionToLen(version) + + if prefixlen < 0 or prefixlen > bits: + return None + + if ip == 0: + zbits = bits + 1 + else: + zbits = _count0Bits(ip) + if zbits < bits - prefixlen: + return 0 + else: + return 1 + + +def _checkNetmask(netmask, masklen): + """Checks if a netmask is expressable as a prefixlen.""" + + num = long(netmask) + bits = masklen + + # remove zero bits at the end + while (num & 1) == 0 and bits != 0: + num = num >> 1 + bits -= 1 + if bits == 0: + break + # now check if the rest consists only of ones + while bits > 0: + if (num & 1) == 0: + raise ValueError, "Netmask %s can't be expressed as an prefix." % (hex(netmask)) + num = num >> 1 + bits -= 1 + + +def _checkNetaddrWorksWithPrefixlen(net, prefixlen, version): + """Check if a base addess of a network is compatible with a prefixlen""" + if net & _prefixlenToNetmask(prefixlen, version) == net: + return 1 + else: + return 0 + + +def _netmaskToPrefixlen(netmask): + """Convert an Integer representing a netmask to a prefixlen. + + E.g. 0xffffff00 (255.255.255.0) returns 24 + """ + + netlen = _count0Bits(netmask) + masklen = _count1Bits(netmask) + _checkNetmask(netmask, masklen) + return masklen - netlen + + +def _prefixlenToNetmask(prefixlen, version): + """Return a mask of n bits as a long integer. + + From 'IP address conversion functions with the builtin socket module' + by Alex Martelli + http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66517 + """ + if prefixlen == 0: + return 0 + elif prefixlen < 0: + raise ValueError, "Prefixlen must be > 0" + return ((2L< + +Twisted Core 10.0.0 (2010-03-01) +================================ + +Features +-------- + - The twistd man page now has a SIGNALS section. (#689) + + - reactor.spawnProcess now will not emit a PotentialZombieWarning + when called before reactor.run, and there will be no potential for + zombie processes in this case. (#2078) + + - High-throughput applications based on Perspective Broker should now + run noticably faster thanks to the use of a more efficient decoding + function in Twisted Spread. (#2310) + + - Documentation for trac-post-commit-hook functionality in svn-dev + policy. (#3867) + + - twisted.protocols.socks.SOCKSv4 now supports the SOCKSv4a protocol. + (#3886) + + - Trial can now output test results according to the subunit + protocol, as long as Subunit is installed (see + https://launchpad.net/subunit). (#4004) + + - twisted.protocols.amp now provides a ListOf argument type which can + be composed with some other argument types to create a zero or more + element sequence of that type. (#4116) + + - If returnValue is invoked outside of a function decorated with + @inlineCallbacks, but causes a function thusly decorated to exit, a + DeprecationWarning will be emitted explaining this potentially + confusing behavior. In a future release, this will cause an + exception. (#4157) + + - twisted.python.logfile.BaseLogFile now has a reopen method allowing + you to use an external logrotate mechanism. (#4255) + +Bugfixes +-------- + - FTP.ftp_NLST now handles requests on invalid paths in a way + consistent with RFC 959. (#1342) + + - twisted.python.util.initgroups now calls the low-level C initgroups + by default if available: the python version can create lots of I/O + with certain authentication setup to retrieve all the necessary + information. (#3226) + + - startLogging now does nothing on subsequent invocations, thus + fixing a terrible infinite recursion bug that's only on edge case. + (#3289) + + - Stringify non-string data to NetstringReceiver.sendString before + calculating the length so that the calculated length is equal to + the actual length of the transported data. (#3299) + + - twisted.python.win32.cmdLineQuote now correctly quotes empty + strings arguments (#3876) + + - Change the behavior of the Gtk2Reactor to register only one source + watch for each file descriptor, instead of one for reading and one + for writing. In particular, it fixes a bug with Glib under Windows + where we failed to notify when a client is connected. (#3925) + + - Twisted Trial no longer crashes if it can't remove an old + _trial_temp directory. (#4020) + + - The optional _c_urlarg extension now handles unquote("") correctly + on platforms where malloc(0) returns NULL, such as AIX. It also + compiles with less warnings. (#4142) + + - On POSIX, child processes created with reactor.spawnProcess will no + longer automatically ignore the signals which the parent process + has set to be ignored. (#4199) + + - All SOCKSv4a tests now use a dummy reactor with a deterministic + resolve method. (#4275) + + - Prevent extraneous server, date and content-type headers in proxy + responses. (#4277) + +Deprecations and Removals +------------------------- + - twisted.internet.error.PotentialZombieWarning is now deprecated. + (#2078) + + - twisted.test.time_helpers is now deprecated. (#3719) + + - The deprecated connectUDP method of IReactorUDP has now been + removed. (#4075) + + - twisted.trial.unittest.TestCase now ignores the previously + deprecated setUpClass and tearDownClass methods. (#4175) + +Other +----- + - #917, #2406, #2481, #2608, #2689, #2884, #3056, #3082, #3199, + #3480, #3592, #3718, #3935, #4066, #4083, #4154, #4166, #4169, + #4176, #4183, #4186, #4188, #4189, #4194, #4201, #4204, #4209, + #4222, #4234, #4235, #4238, #4240, #4245, #4251, #4264, #4268, + #4269, #4282 + + +Twisted Conch 10.0.0 (2010-03-01) +================================= + +Bugfixes +-------- + - twisted.conch.checkers.SSHPublicKeyDatabase now looks in the + correct user directory for authorized_keys files. (#3984) + + - twisted.conch.ssh.SSHUserAuthClient now honors preferredOrder when + authenticating. (#4266) + +Other +----- + - #2391, #4203, #4265 + + +Twisted Lore 10.0.0 (2010-03-01) +================================ + +Other +----- + - #4241 + + +Twisted Mail 10.0.0 (2010-03-01) +================================ + +Bugfixes +-------- + - twisted.mail.smtp.ESMTPClient and + twisted.mail.smtp.LOGINAuthenticator now implement the (obsolete) + LOGIN SASL mechanism according to the draft specification. (#4031) + + - twisted.mail.imap4.IMAP4Client will no longer misparse all html- + formatted message bodies received in response to a fetch command. + (#4049) + + - The regression in IMAP4 search handling of "OR" and "NOT" terms has + been fixed. (#4178) + +Other +----- + - #4028, #4170, #4200 + + +Twisted Names 10.0.0 (2010-03-01) +================================= + +Bugfixes +-------- + - twisted.names.root.Resolver no longer leaks UDP sockets while + resolving names. (#970) + +Deprecations and Removals +------------------------- + - Several top-level functions in twisted.names.root are now + deprecated. (#970) + +Other +----- + - #4066 + + +Twisted Pair 10.0.0 (2010-03-01) +================================ + +Other +----- + - #4170 + + +Twisted Runner 10.0.0 (2010-03-01) +================================== + +Other +----- + - #3961 + + +Twisted Web 10.0.0 (2010-03-01) +=============================== + +Features +-------- + - Twisted Web in 60 Seconds, a series of short tutorials with self- + contained examples on a range of common web topics, is now a part + of the Twisted Web howto documentation. (#4192) + +Bugfixes +-------- + - Data and File from twisted.web.static and + twisted.web.distrib.UserDirectory will now only generate a 200 + response for GET or HEAD requests. + twisted.web.client.HTTPPageGetter will no longer ignore the case of + a request method when considering whether to apply special HEAD + processing to a response. (#446) + + - twisted.web.http.HTTPClient now supports multi-line headers. + (#2062) + + - Resources served via twisted.web.distrib will no longer encounter a + Banana error when writing more than 640kB at once to the request + object. (#3212) + + - The Error, PageRedirect, and InfiniteRedirection exception in + twisted.web now initialize an empty message parameter by mapping + the HTTP status code parameter to a descriptive string. Previously + the lookup would always fail, leaving message empty. (#3806) + + - The 'wsgi.input' WSGI environment object now supports -1 and None + as arguments to the read and readlines methods. (#4114) + + - twisted.web.wsgi doesn't unquote QUERY_STRING anymore, thus + complying with the WSGI reference implementation. (#4143) + + - The HTTP proxy will no longer pass on keep-alive request headers + from the client, preventing pages from loading then "hanging" + (leaving the connection open with no hope of termination). (#4179) + +Deprecations and Removals +------------------------- + - Remove '--static' option from twistd web, that served as an alias + for the '--path' option. (#3907) + +Other +----- + - #3784, #4216, #4242 + + +Twisted Words 10.0.0 (2010-03-01) +================================= + +Features +-------- + - twisted.words.protocols.irc.IRCClient.irc_MODE now takes ISUPPORT + parameters into account when parsing mode messages with arguments + that take parameters (#3296) + +Bugfixes +-------- + - When twisted.words.protocols.irc.IRCClient's versionNum and + versionEnv attributes are set to None, they will no longer be + included in the client's response to CTCP VERSION queries. (#3660) + + - twisted.words.protocols.jabber.xmlstream.hashPassword now only + accepts unicode as input (#3741, #3742, #3847) + +Other +----- + - #2503, #4066, #4261 + + +Twisted Core 9.0.0 (2009-11-24) +=============================== + +Features +-------- + - LineReceiver.clearLineBuffer now returns the bytes that it cleared (#3573) + - twisted.protocols.amp now raises InvalidSignature when bad arguments are + passed to Command.makeArguments (#2808) + - IArgumentType was added to represent an existing but previously unspecified + interface in amp (#3468) + - Obscure python tricks have been removed from the finger tutorials (#2110) + - The digest auth implementations in twisted.web and twisted.protocolos.sip + have been merged together in twisted.cred (#3575) + - FilePath and ZipPath now has a parents() method which iterates up all of its + parents (#3588) + - reactors which support threads now have a getThreadPool method (#3591) + - The MemCache client implementation now allows arguments to the "stats" + command (#3661) + - The MemCache client now has a getMultiple method which allows fetching of + multiple values (#3171) + - twisted.spread.jelly can now unserialize some new-style classes (#2950) + - twisted.protocols.loopback.loopbackAsync now accepts a parameter to control + the data passed between client and server (#3820) + - The IOCP reactor now supports SSL (#593) + - Tasks in a twisted.internet.task.Cooperator can now be paused, resumed, and + cancelled (#2712) + - AmpList arguments can now be made optional (#3891) + - The syslog output observer now supports log levels (#3300) + - LoopingCall now supports reporting the number of intervals missed if it + isn't able to schedule calls fast enough (#3671) + +Fixes +----- + - The deprecated md5 and sha modules are no longer used if the stdlib hashlib + module is available (#2763) + - An obscure deadlock involving waking up the reactor within signal handlers + in particular threads was fixed (#1997) + - The passivePortRange attribute of FTPFactory is now honored (#3593) + - TestCase.flushWarnings now flushes warnings even if they were produced by a + file that was renamed since it was byte compiled (#3598) + - Some internal file descriptors are now marked as close-on-exec, so these will + no longer be leaked to child processes (#3576) + - twisted.python.zipstream now correctly extracts the first file in a directory + as a file, and not an empty directory (#3625) + - proxyForInterface now returns classes which correctly *implement* interfaces + rather than *providing* them (#3646) + - SIP Via header parameters should now be correctly generated (#2194) + - The Deferred returned by stopListening would sometimes previously never fire + if an exception was raised by the underlying file descriptor's connectionLost + method. Now the Deferred will fire with a failure (#3654) + - The command-line tool "manhole" should now work with newer versions of pygtk + (#2464) + - When a DefaultOpenSSLContextFactory is instantiated with invalid parameters, + it will now raise an exception immediately instead of waiting for the first + connection (#3700) + - Twisted command line scripts should now work when installed in a virtualenv + (#3750) + - Trial will no longer delete temp directories which it did not create (#3481) + - Processes started on Windows should now be cleaned up properly in more cases + (#3893) + - Certain misbehaving importers will no longer cause twisted.python.modules + (and thus trial) to raise an exception, but rather issue a warning (#3913) + - MemCache client protocol methods will now fail when the transport has been + disconnected (#3643) + - In the AMP method callRemoteString, the requiresAnswer parameter is now + honored (#3999) + - Spawning a "script" (a file which starts with a #! line) on Windows running + Python 2.6 will now work instead of raising an exception about file mode + "ru" (#3567) + - FilePath's walk method now calls its "descend" parameter even on the first + level of children, instead of only on grandchildren. This allows for better + symlink cycle detection (#3911) + - Attempting to write unicode data to process pipes on Windows will no longer + result in arbitrarily encoded messages being written to the pipe, but instead + will immediately raise an error (#3930) + - The various twisted command line utilities will no longer print + ModuleType.__doc__ when Twisted was installed with setuptools (#4030) + - A Failure object will now be passed to connectionLost on stdio connections + on Windows, instead of an Exception object (#3922) + +Deprecations and Removals +------------------------- + - twisted.persisted.marmalade was deleted after a long period of deprecation + (#876) + - Some remaining references to the long-gone plugins.tml system were removed + (#3246) + - SSLv2 is now disabled by default, but it can be re-enabled explicitly + (#3330) + - twisted.python.plugin has been removed (#1911) + - reactor.run will now raise a ReactorAlreadyRunning exception when it is + called reentrantly instead of warning a DeprecationWarning (#1785) + - twisted.spread.refpath is now deprecated because it is unmaintained, + untested, and has dubious value (#3723) + - The unused --quiet flag has been removed from the twistd command (#3003) + +Other +----- + - #3545, #3490, #3544, #3537, #3455, #3315, #2281, #3564, #3570, #3571, #3486, + #3241, #3599, #3220, #1522, #3611, #3596, #3606, #3609, #3602, #3637, #3647, + #3632, #3675, #3673, #3686, #2217, #3685, #3688, #2456, #506, #3635, #2153, + #3581, #3708, #3714, #3717, #3698, #3747, #3704, #3707, #3713, #3720, #3692, + #3376, #3652, #3695, #3735, #3786, #3783, #3699, #3340, #3810, #3822, #3817, + #3791, #3859, #2459, #3677, #3883, #3894, #3861, #3822, #3852, #3875, #2722, + #3768, #3914, #3885, #2719, #3905, #3942, #2820, #3990, #3954, #1627, #2326, + #2972, #3253, #3937, #4058, #1200, #3639, #4079, #4063, #4050 + + +Twisted Conch 9.0.0 (2009-11-24) +================================ + +Fixes +----- + - The SSH key parser has been removed and conch now uses pyASN1 to parse keys. + This should fix a number of cases where parsing a key would fail, but it now + requires users to have pyASN1 installed (#3391) + - The time field on SFTP file listings should now be correct (#3503) + - The day field on SFTP file listings should now be correct on Windows (#3503) + - The "cftp" sftp client now truncates files it is uploading over (#2519) + - The telnet server protocol can now properly respond to subnegotiation + requests (#3655) + - Tests and factoring of the SSHv2 server implementation are now much better + (#2682) + - The SSHv2 server now sends "exit-signal" messages to the client, instead of + raising an exception, when a process dies due to a signal (#2687) + - cftp's client-side "exec" command now uses /bin/sh if the current user has + no shell (#3914) + +Deprecations and Removals +------------------------- + - The buggy SSH connection sharing feature of the SSHv2 client was removed + (#3498) + - Use of strings and PyCrypto objects to represent keys is deprecated in favor + of using Conch Key objects (#2682) + +Other +----- + - #3548, #3537, #3551, #3220, #3568, #3689, #3709, #3809, #2763, #3540, #3750, + #3897, #3813, #3871, #3916, #4047, #3940, #4050 + + +Twisted Lore 9.0.0 (2009-11-24) +=============================== + +Features +-------- + - Python source listings now include line numbers (#3486) + +Fixes +----- + - Lore now uses minidom instead of Twisted's microdom, which incidentally + fixes some Lore bugs such as throwing away certain whitespace + (#3560, #414, #3619) + - Lore's "lint" command should no longer break on documents with links in them + (#4051, #4115) + +Deprecations and Removals +------------------------- + - Lore no longer uses the ancient "tml" Twisted plugin system (#1911) + +Other +----- + - #3565, #3246, #3540, #3750, #4050 + + +Twisted Mail 9.0.0 (2009-11-24) +=============================== + +Features +-------- + - maildir.StringListMailbox, an in-memory maildir mailbox, now supports + deletion, undeletion, and syncing (#3547) + - SMTPClient's callbacks are now more completely documented (#684) + +Fixes +----- + - Parse UNSEEN response data and include it in the result of + IMAP4Client.examine (#3550) + - The IMAP4 client now delivers more unsolicited server responses to callbacks + rather than ignoring them, and also won't ignore solicited responses that + arrive on the same line as an unsolicited one (#1105) + - Several bugs in the SMTP client's idle timeout support were fixed (#3641, + #1219) + - A case where the SMTP client could skip some recipients when retrying + delivery has been fixed (#3638) + - Errors during certain data transfers will no longer be swallowed. They will + now bubble up to the higher-level API (such as the sendmail function) (#3642) + - Escape sequences inside quoted strings in IMAP4 should now be parsed + correctly by the IMAP4 server protocol (#3659) + - The "imap4-utf-7" codec that is registered by twisted.mail.imap4 had a number + of fixes that allow it to work better with the Python codecs system, and to + actually work (#3663) + - The Maildir implementation now ensures time-based ordering of filenames so + that the lexical sorting of messages matches the order in which they were + received (#3812) + - SASL PLAIN credentials generated by the IMAP4 protocol implementations + (client and server) should now be RFC-compliant (#3939) + - Searching for a set of sequences using the IMAP4 "SEARCH" command should + now work on the IMAP4 server protocol implementation. This at least improves + support for the Pine mail client (#1977) + +Other +----- + - #2763, #3647, #3750, #3819, #3540, #3846, #2023, #4050 + + +Twisted Names 9.0.0 (2009-11-24) +================================ + +Deprecations and Removals +------------------------- + - client.ThreadedResolver is deprecated in favor of + twisted.internet.base.ThreadedResolver (#3710) + +Other +----- + - #3540, #3560, #3712, #3750, #3990 + + +Twisted News 9.0.0 (2009-11-24) +=============================== + +Other +----- + - #2763, #3540 + + +Twisted Pair 9.0.0 (2009-11-24) +=============================== + +Other +----- + - #3540, #4050 + + +Twisted Runner 9.0.0 (2009-11-24) +================================= + +Features +-------- + - procmon.ProcessMonitor.addProcess now accepts an 'env' parameter which allows + users to specify the environment in which a process will be run (#3691) + +Other +----- + - #3540 + + +Twisted Web 9.0.0 (2009-11-24) +============================== + +Features +-------- + - There is now an iweb.IRequest interface which specifies the interface that + request objects provide (#3416) + - downloadPage now supports the same cookie, redirect, and timeout features + that getPage supports (#2971) + - A chapter about WSGI has been added to the twisted.web documentation (#3510) + - The HTTP auth support in the web server now allows anonymous sessions by + logging in with ANONYMOUS credentials when no Authorization header is + provided in a request (#3924, #3936) + - HTTPClientFactory now accepts a parameter to enable a common deviation from + the HTTP 1.1 standard by responding to redirects in a POSTed request with a + GET instead of another POST (#3624) + - A new basic HTTP/1.1 client API is included in twisted.web.client.Agent + (#886, #3987) + +Fixes +----- + - Requests for "insecure" children of a static.File (such as paths containing + encoded directory separators) will now result in a 404 instead of a 500 + (#3549, #3469) + - When specifying a followRedirect argument to the getPage function, the state + of redirect-following for other getPage calls should now be unaffected. It + was previously overwriting a class attribute which would affect outstanding + getPage calls (#3192) + - Downloading an URL of the form "http://example.com:/" will now work, + ignoring the extraneous colon (#2402) + - microdom's appendChild method will no longer issue a spurious warning, and + microdom's methods in general should now issue more meaningful exceptions + when invalid parameters are passed (#3421) + - WSGI applications will no longer have spurious Content-Type headers added to + their responses by the twisted.web server. In addition, WSGI applications + will no longer be able to specify the server-restricted headers Server and + Date (#3569) + - http_headers.Headers now normalizes the case of raw headers passed directly + to it in the same way that it normalizes the headers passed to setRawHeaders + (#3557) + - The distrib module no longer relies on the deprecated woven package (#3559) + - twisted.web.domhelpers now works with both microdom and minidom (#3600) + - twisted.web servers will now ignore invalid If-Modified-Since headers instead + of returning a 500 error (#3601) + - Certain request-bound memory and file resources are cleaned up slightly + sooner by the request when the connection is lost (#1621, #3176) + - xmlrpclib.DateTime objects should now correctly round-trip over twisted.web's + XMLRPC support in all supported versions of Python, and errors during error + serialization will no longer hang a twisted.web XMLRPC response (#2446) + - request.content should now always be seeked to the beginning when + request.process is called, so application code should never need to seek + back manually (#3585) + - Fetching a child of static.File with a double-slash in the URL (such as + "example//foo.html") should now return a 404 instead of a traceback and + 500 error (#3631) + - downloadPage will now fire a Failure on its returned Deferred instead of + indicating success when the connection is prematurely lost (#3645) + - static.File will now provide a 404 instead of a 500 error when it was + constructed with a non-existent file (#3634) + - microdom should now serialize namespaces correctly (#3672) + - The HTTP Auth support resource wrapper should no longer corrupt requests and + cause them to skip a segment in the request path (#3679) + - The twisted.web WSGI support should now include leading slashes in PATH_INFO, + and SCRIPT_NAME will be empty if the application is at the root of the + resource tree. This means that WSGI applications should no longer generate + URLs with double-slashes in them even if they naively concatenate the values + (#3721) + - WSGI applications should now receive the requesting client's IP in the + REMOTE_ADDR environment variable (#3730) + - The distrib module should work again. It was unfortunately broken with the + refactoring of twisted.web's header support (#3697) + - static.File now supports multiple ranges specified in the Range header + (#3574) + - static.File should now generate a correct Content-Length value when the + requested Range value doesn't fit entirely within the file's contents (#3814) + - Attempting to call request.finish() after the connection has been lost will + now immediately raise a RuntimeError (#4013) + - An HTTP-auth resource should now be able to directly render the wrapped + avatar, whereas before it would only allow retrieval of child resources + (#4014) + - twisted.web's wsgi support should no longer attempt to call request.finish + twice, which would cause errors in certain cases (#4025) + - WSGI applications should now be able to handle requests with large bodies + (#4029) + - Exceptions raised from WSGI applications should now more reliably be turned + into 500 errors on the HTTP level (#4019) + - DeferredResource now correctly passes through exceptions raised from the + wrapped resource, instead of turning them all into 500 errors (#3932) + - Agent.request now generates a Host header when no headers are passed at + (#4131) + +Deprecations and Removals +------------------------- + - The unmaintained and untested twisted.web.monitor module was removed (#2763) + - The twisted.web.woven package has been removed (#1522) + - All of the error resources in twisted.web.error are now in + twisted.web.resource, and accessing them through twisted.web.error is now + deprecated (#3035) + - To facilitate a simplification of the timeout logic in server.Session, + various things have been deprecated (#3457) + - the loopFactory attribute is now ignored + - the checkExpired method now does nothing + - the lifetime parameter to startCheckingExpiration is now ignored + - The twisted.web.trp module is now deprecated (#2030) + +Other +----- + - #2763, #3540, #3575, #3610, #3605, #1176, #3539, #3750, #3761, #3779, #2677, + #3782, #3904, #3919, #3418, #3990, #1404, #4050 + + +Twisted Words 9.0.0 (2009-11-24) +================================ + +Features +-------- + - IRCClient.describe is a new method meant to replace IRCClient.me to send + CTCP ACTION messages with less confusing behavior (#3910) + - The XMPP client protocol implementation now supports ANONYMOUS SASL + authentication (#4067) + - The IRC client protocol implementation now has better support for the + ISUPPORT server->client message, storing the data in a new + ServerSupportedFeatures object accessible via IRCClient.supported (#3285) + +Fixes +----- + - The twisted.words IRC server now always sends an MOTD, which at least makes + Pidgin able to successfully connect to a twisted.words IRC server (#2385) + - The IRC client will now dispatch "RPL MOTD" messages received before a + "RPL MOTD START" instead of raising an exception (#3676) + - The IRC client protocol implementation no longer updates its 'nickname' + attribute directly; instead, that attribute will be updated when the server + acknowledges the change (#3377) + - The IRC client protocol implementation now supports falling back to another + nickname when a nick change request fails (#3377, #4010) + +Deprecations and Removals +------------------------- + - The TOC protocol implementation is now deprecated, since the protocol itself + has been deprecated and obselete for quite a long time (#3580) + - The gui "im" application has been removed, since it relied on GTK1, which is + hard to find these days (#3699, #3340) + +Other +----- + - #2763, #3540, #3647, #3750, #3895, #3968, #4050 + + +Core 8.2.0 (2008-12-16) +======================= + +Features +-------- + - Reactors are slowly but surely becoming more isolated, thus improving + testability (#3198) + - FilePath has gained a realpath method, and FilePath.walk no longer infinitely + recurses in the case of a symlink causing a self-recursing filesystem tree + (#3098) + - FilePath's moveTo and copyTo methods now have an option to disable following + of symlinks (#3105) + - Private APIs are now included in the API documentation (#3268) + - hotshot is now the default profiler for the twistd --profile parameter and + using cProfile is now documented (#3355, #3356) + - Process protocols can now implement a processExited method, which is + distinct from processEnded in that it is called immediately when the child + has died, instead of waiting for all the file descriptors to be closed + (#1291) + - twistd now has a --umask option (#966, #3024) + - A new deferToThreadPool function exists in twisted.internet.threads (#2845) + - There is now an example of writing an FTP server in examples/ftpserver.py + (#1579) + - A new runAsEffectiveUser function has been added to twisted.python.util + (#2607) + - twisted.internet.utils.getProcessOutput now offers a mechanism for + waiting for the process to actually end, in the event of data received on + stderr (#3239) + - A fullyQualifiedName function has been added to twisted.python.reflect + (#3254) + - strports now defaults to managing access to a UNIX socket with a lock; + lockfile=0 can be included in the strports specifier to disable this + behavior (#2295) + - FTPClient now has a 'rename' method (#3335) + - FTPClient now has a 'makeDirectory' method (#3500) + - FTPClient now has a 'removeFile' method (#3491) + - flushWarnings, A new Trial method for testing warnings, has been added + (#3487, #3427, #3506) + - The log observer can now be configured in .tac files (#3534) + +Fixes +----- + - TLS Session Tickets are now disabled by default, allowing connections to + certain servers which hang when an empty session ticket is received (like + GTalk) (#3463) + - twisted.enterprise.adbapi.ConnectionPool's noisy attribute now defaults to + False, as documented (#1806) + - Error handling and logging in adbapi is now much improved (#3244) + - TCP listeners can now be restarted (#2913) + - Doctests can now be rerun with trial's --until-failure option (#2713) + - Some memory leaks have been fixed in trial's --until-failure + implementation (#3119, #3269) + - Trial's summary reporter now prints correct runtime information and handles + the case of 0 tests (#3184) + - Trial and any other user of the 'namedAny' function now has better error + reporting in the case of invalid module names (#3259) + - Multiple instances of trial can now run in parallel in the same directory + by creating _trial_temp directories with an incremental suffix (#2338) + - Trial's failUnlessWarns method now works on Python 2.6 (#3223) + - twisted.python.log now hooks into the warnings system in a way compatible + with Python 2.6 (#3211) + - The GTK2 reactor is now better supported on Windows, but still not passing + the entire test suite (#3203) + - low-level failure handling in spawnProcess has been improved and no longer + leaks file descriptors (#2305, #1410) + - Perspective Broker avatars now have their logout functions called in more + cases (#392) + - Log observers which raise exceptions are no longer removed (#1069) + - transport.getPeer now always includes an IP address in the Address returned + instead of a hostname (#3059) + - Functions in twisted.internet.utils which spawn processes now avoid calling + chdir in the case where no working directory is passed, to avoid some + obscure permission errors (#3159) + - twisted.spread.publish.Publishable no longer corrupts line endings on + Windows (#2327) + - SelectReactor now properly detects when a TLS/TCP connection has been + disconnected (#3218) + - twisted.python.lockfile no longer raises an EEXIST OSError and is much + better supported on Windows (#3367) + - When ITLSTransport.startTLS is called while there is data in the write + buffer, TLS negotiation will now be delayed instead of the method raising + an exception (#686) + - The userAnonymous argument to FTPFactory is now honored (#3390) + - twisted.python.modules no longer tries to "fix" sys.modules after an import + error, which was just causing problems (#3388) + - setup.py no longer attempts to build extension modules when run with Jython + (#3410) + - AMP boxes can now be sent in IBoxReceiver.startReceivingBoxes (#3477) + - AMP connections are closed as soon as a key length larger than 255 is + received (#3478) + - Log events with timezone offsets between -1 and -59 minutes are now + correctly reported as negative (#3515) + +Deprecations and Removals +------------------------- + - Trial's setUpClass and tearDownClass methods are now deprecated (#2903) + - problemsFromTransport has been removed in favor of the argument passed to + connectionLost (#2874) + - The mode parameter to methods of IReactorUNIX and IReactorUNIXDatagram are + deprecated in favor of applications taking other security precautions, since + the mode of a Unix socket is often not respected (#1068) + - Index access on instances of twisted.internet.defer.FirstError has been + removed in favor of the subFailure attribute (#3298) + - The 'changeDirectory' method of FTPClient has been deprecated in favor of + the 'cwd' method (#3491) + +Other +----- + + - #3202, #2869, #3225, #2955, #3237, #3196, #2355, #2881, #3054, #2374, #2918, + #3210, #3052, #3267, #3288, #2985, #3295, #3297, #2512, #3302, #1222, #2631, + #3306, #3116, #3215, #1489, #3319, #3320, #3321, #1255, #2169, #3182, #3323, + #3301, #3318, #3029, #3338, #3346, #1144, #3173, #3165, #685, #3357, #2582, + #3370, #2438, #1253, #637, #1971, #2208, #979, #1790, #1888, #1882, #1793, + #754, #1890, #1931, #1246, #1025, #3177, #2496, #2567, #3400, #2213, #2027, + #3415, #1262, #3422, #2500, #3414, #3045, #3111, #2974, #2947, #3222, #2878, + #3402, #2909, #3423, #1328, #1852, #3382, #3393, #2029, #3489, #1853, #2026, + #2375, #3502, #3482, #3504, #3505, #3507, #2605, #3519, #3520, #3121, #3484, + #3439, #3216, #3511, #3524, #3521, #3197, #2486, #2449, #2748, #3381, #3236, + #671 + + +Conch 8.2.0 (2008-12-16) +======================== + +Features +-------- + - The type of the protocols instantiated by SSHFactory is now parameterized + (#3443) + +Fixes +----- + - A file descriptor leak has been fixed (#3213, #1789) + - "File Already Exists" errors are now handled more correctly (#3033) + - Handling of CR IAC in TelnetClient is now improved (#3305) + - SSHAgent is no longer completely unusable (#3332) + - The performance of insults.ClientProtocol is now greatly increased by + delivering more than one byte at a time to application code (#3386) + - Manhole and the conch server no longer need to be run as root when not + necessary (#2607) + - The value of FILEXFER_ATTR_ACMODTIME has been corrected (#2902) + - The management of known_hosts and host key verification has been overhauled + (#1376, #1301, #3494, #3496, #1292, #3499) + +Other +----- + - #3193, #1633 + + +Lore 8.2.0 (2008-12-16) +======================= + +Other +----- + - #2207, #2514 + + +Mail 8.2.0 (2008-12-16) +======================= + +Fixes +----- + - The mailmail tool now provides better error messages for usage errors (#3339) + - The SMTP protocol implementation now works on PyPy (#2976) + +Other +----- + - #3475 + + +Names 8.2.0 (2008-12-16) +======================== + +Features +-------- + - The NAPTR record type is now supported (#2276) + +Fixes +----- + - Make client.Resolver less vulnerable to the Birthday Paradox attack by + avoiding sending duplicate queries when it's not necessary (#3347) + - client.Resolver now uses a random source port for each DNS request (#3342) + - client.Resolver now uses a full 16 bits of randomness for message IDs, + instead of 10 which it previously used (#3342) + - All record types now have value-based equality and a string representation + (#2935) + +Other +----- + - #1622, #3424 + + +Web 8.2.0 (2008-12-16) +====================== + +Features +-------- + - The web server can now deal with multi-value headers in the new attributes of + Request, requestHeaders and responseHeaders (#165) + - There is now a resource-wrapper which implements HTTP Basic and Digest auth + in terms of twisted.cred (#696) + - It's now possible to limit the number of redirects that client.getPage will + follow (#2412) + - The directory-listing code no longer uses Woven (#3257) + - static.File now supports Range headers with a single range (#1493) + - twisted.web now has a rudimentary WSGI container (#2753) + - The web server now supports chunked encoding in requests (#3385) + +Fixes +----- + - The xmlrpc client now raises an error when the server sends an empty + response (#3399) + - HTTPPageGetter no longer duplicates default headers when they're explicitly + overridden in the headers parameter (#1382) + - The server will no longer timeout clients which are still sending request + data (#1903) + - microdom's isEqualToNode now returns False when the nodes aren't equal + (#2542) + +Deprecations and Removals +------------------------- + + - Request.headers and Request.received_headers are not quite deprecated, but + they are discouraged in favor of requestHeaders and responseHeaders (#165) + +Other +----- + - #909, #687, #2938, #1152, #2930, #2025, #2683, #3471 + + +Web2 8.2.0 (2008-12-16) +======================= + +Note: Twisted Web2 is being phased out in preference for Twisted Web, but some +maintenance changes have been made. + +Fixes +----- + - The main twisted.web2 docstring now indicates the current state of the + project (#2028) + - Headers which require unusual bytes are now quoted (#2346) + - Some links in the introduction documentation have been fixed (#2552) + + +Words 8.2.0 (2008-12-16) +======================== + +Feature +------- + - There is now a standalone XMPP router included in twisted.words: it can be + used with the 'twistd xmpp-router' command line (#3407) + - A server factory for Jabber XML Streams has been added (#3435) + - Domish now allows for iterating child elements with specific qualified names + (#2429) + - IRCClient now has a 'back' method which removes the away status (#3366) + - IRCClient now has a 'whois' method (#3133) + +Fixes +----- + - The IRC Client implementation can now deal with compound mode changes (#3230) + - The MSN protocol implementation no longer requires the CVR0 protocol to + be included in the VER command (#3394) + - In the IRC server implementation, topic messages will no longer be sent for + a group which has no topic (#2204) + - An infinite loop (which caused infinite memory usage) in irc.split has been + fixed. This was triggered any time a message that starts with a delimiter + was sent (#3446) + - Jabber's toResponse now generates a valid stanza even when stanzaType is not + specified (#3467) + - The lifetime of authenticator instances in XmlStreamServerFactory is no + longer artificially extended (#3464) + +Other +----- + - #3365 + + +Core 8.1.0 (2008-05-18) +======================= + +Features +-------- + + - twisted.internet.error.ConnectionClosed is a new exception which is the + superclass of ConnectionLost and ConnectionDone (#3137) + - Trial's CPU and memory performance should be better now (#3034) + - twisted.python.filepath.FilePath now has a chmod method (#3124) + +Fixes +----- + + - Some reactor re-entrancy regressions were fixed (#3146, #3168) + - A regression was fixed whereby constructing a Failure for an exception and + traceback raised out of a Pyrex extension would fail (#3132) + - CopyableFailures in PB can again be created from CopiedFailures (#3174) + - FilePath.remove, when called on a FilePath representing a symlink to a + directory, no longer removes the contents of the targeted directory, and + instead removes the symlink (#3097) + - FilePath now has a linkTo method for creating new symlinks (#3122) + - The docstring for Trial's addCleanup method now correctly specifies when + cleanup functions are run (#3131) + - assertWarns now deals better with multiple identical warnings (#2904) + - Various windows installer bugs were fixed (#3115, #3144, #3150, #3151, #3164) + - API links in the howto documentation have been corrected (#3130) + - The Win32 Process transport object now has a pid attribute (#1836) + - A doc bug in the twistd plugin howto which would inevitably lead to + confusion was fixed (#3183) + - A regression breaking IOCP introduced after the last release was fixed + (#3200) + +Deprecations and Removals +------------------------- + + - mktap is now fully deprecated, and will emit DeprecationWarnings when used + (#3127) + +Other +----- + - #3079, #3118, #3120, #3145, #3069, #3149, #3186, #3208, #2762 + + +Conch 8.1.0 (2008-05-18) +======================== + +Fixes +----- + - A regression was fixed whereby the publicKeys and privateKeys attributes of + SSHFactory would not be interpreted as strings (#3141) + - The sshsimpleserver.py example had a minor bug fix (#3135) + - The deprecated mktap API is no longer used (#3127) + - An infelicity was fixed whereby a NameError would be raised in certain + circumstances during authentication when a ConchError should have been + (#3154) + - A workaround was added to conch.insults for a bug in gnome-terminal whereby + it would not scroll correctly (#3189) + + +Lore 8.1.0 (2008-05-18) +======================= + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + + +News 8.1.0 (2008-05-18) +======================= + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + + +Web 8.1.0 (2008-05-18) +====================== + +Fixes +----- + - Fixed an XMLRPC bug whereby sometimes a callRemote Deferred would + accidentally be fired twice when a connection was lost during the handling of + a response (#3152) + - Fixed a bug in the "Using Twisted Web" document which prevented an example + resource from being renderable (#3147) + - The deprecated mktap API is no longer used (#3127) + + +Words 8.1.0 (2008-05-18) +======================== + +Features +-------- + - JID objects now have a nice __repr__ (#3156) + - Extending XMPP protocols is now easier (#2178) + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + - A bug whereby one-time XMPP observers would be enabled permanently was fixed + (#3066) + + +Mail 8.1.0 (2008-05-18) +======================= + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + + +Names 8.1.0 (2008-05-18) +======================== + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + + +Web2 8.1.0 (2008-05-18) +======================= + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + + +Core 8.0.1 (2008-03-26) +======================= + +Fixes +----- + - README no longer refers to obsolete trial command line option + - twistd no longer causes a bizarre DeprecationWarning about mktap + + +Core 8.0.0 (2008-03-17) +======================= + +Features +-------- + + - The IOCP reactor has had many changes and is now greatly improved + (#1760, #3055) + - The main Twisted distribution is now easy_installable (#1286, #3110) + - twistd can now profile with cProfile (#2469) + - twisted.internet.defer contains a DeferredFilesystemLock which gives a + Deferred interface to lock file acquisition (#2180) + - twisted.python.modules is a new system for representing and manipulating + module paths (i.e. sys.path) (#1951) + - twisted.internet.fdesc now contains a writeToFD function, along with other + minor fixes (#2419) + - twisted.python.usage now allows optional type enforcement (#739) + - The reactor now has a blockingCallFromThread method for non-reactor threads + to use to wait for a reactor-scheduled call to return a result (#1042, #3030) + - Exceptions raised inside of inlineCallbacks-using functions now have a + better chance of coming with a meaningful traceback (#2639, #2803) + - twisted.python.randbytes now contains code for generating secure random + bytes (#2685) + - The classes in twisted.application.internet now accept a reactor parameter + for specifying the reactor to use for underlying calls to allow for better + testability (#2937) + - LoopingCall now allows you to specify the reactor to use to schedule new + calls, allowing much better testing techniques (#2633, #2634) + - twisted.internet.task.deferLater is a new API for scheduling calls and + getting deferreds which are fired with their results (#1875) + - objgrep now knows how to search through deque objects (#2323) + - twisted.python.log now contains a Twisted log observer which can forward + messages to the Python logging system (#1351) + - Log files now include seconds in the timestamps (#867) + - It is now possible to limit the number of log files to create during log + rotation (#1095) + - The interface required by the log context system is now documented as + ILoggingContext, and abstract.FileDescriptor now declares that it implements + it (#1272) + - There is now an example cred checker that uses a database via adbapi (#460) + - The epoll reactor is now documented in the choosing-reactors howto (#2539) + - There were improvements to the client howto (#222) + - Int8Receiver was added (#2315) + - Various refactorings to AMP introduced better testability and public + interfaces (#2657, #2667, #2656, #2664, #2810) + - twisted.protocol.policies.TrafficLoggingFactory now has a resetCounter + method (#2757) + - The FTP client can be told which port range within which to bind passive + transfer ports (#1904) + - twisted.protocols.memcache contains a new asynchronous memcache client + (#2506, #2957) + - PB now supports anonymous login (#439, #2312) + - twisted.spread.jelly now supports decimal objects (#2920) + - twisted.spread.jelly now supports all forms of sets (#2958) + - There is now an interface describing the API that process protocols must + provide (#3020) + - Trial reporting to core unittest TestResult objects has been improved (#2495) + - Trial's TestCase now has an addCleanup method which allows easy setup of + tear-down code (#2610, #2899) + - Trial's TestCase now has an assertIsInstance method (#2749) + - Trial's memory footprint and speed are greatly improved (#2275) + - At the end of trial runs, "PASSED" and "FAILED" messages are now colorized + (#2856) + - Tests which leave global state around in the reactor will now fail in + trial. A new option, --unclean-warnings, will convert these errors back into + warnings (#2091) + - Trial now has a --without-module command line for testing code in an + environment that lacks a particular Python module (#1795) + - Error reporting of failed assertEquals assertions now has much nicer + formatting (#2893) + - Trial now has methods for monkey-patching (#2598) + - Trial now has an ITestCase (#2898, #1950) + - The trial reporter API now has a 'done' method which is called at the end of + a test run (#2883) + - TestCase now has an assertWarns method which allows testing that functions + emit warnings (#2626, #2703) + - There are now no string exceptions in the entire Twisted code base (#2063) + - There is now a system for specifying credentials checkers with a string + (#2570) + +Fixes +----- + + - Some tests which were asserting the value of stderr have been changed + because Python uncontrollably writes bytes to stderr (#2405) + - Log files handle time zones with DST better (#2404) + - Subprocesses using PTYs on OS X that are handled by Twisted will now be able + to more reliably write the final bytes before they exit, allowing Twisted + code to more reliably receive them (#2371, #2858) + - Trial unit test reporting has been improved (#1901) + - The kqueue reactor handles connection failures better (#2172) + - It's now possible to run "trial foo/bar/" without an exception: trailing + slashes no longer cause problems (#2005) + - cred portals now better deal with implementations of inherited interfaces + (#2523) + - FTP error handling has been improved (#1160, 1107) + - Trial behaves better with respect to file locking on Windows (#2482) + - The FTP server now gives a better error when STOR is attempted during an + anonymous session (#1575) + - Trial now behaves better with tests that use the reactor's threadpool (#1832) + - twisted.python.reload now behaves better with new-style objects (#2297) + - LogFile's defaultMode parameter is now better implemented, preventing + potential security exploits (#2586) + - A minor obscure leak in thread pools was corrected (#1134) + - twisted.internet.task.Clock now returns the correct DelayedCall from + callLater, instead of returning the one scheduled for the furthest in the + future (#2691) + - twisted.spread.util.FilePager no longer unnecessarily buffers data in + memory (#1843, 2321) + - Asking for twistd or trial to use an unavailable reactor no longer prints a + traceback (#2457) + - System event triggers have fewer obscure bugs (#2509) + - Plugin discovery code is much better behaved, allowing multiple + installations of a package with plugins (#2339, #2769) + - Process and PTYProcess have been merged and some minor bugs have been fixed + (#2341) + - The reactor has less global state (#2545) + - Failure can now correctly represent and format errors caused by string + exceptions (#2830) + - The epoll reactor now has better error handling which now avoids the bug + causing 100% CPU usage in some cases (#2809) + - Errors raised during trial setUp or tearDown methods are now handled better + (#2837) + - A problem when deferred callbacks add new callbacks to the deferred that + they are a callback of was fixed (#2849) + - Log messages that are emitted during connectionMade now have the protocol + prefix correctly set (#2813) + - The string representation of a TCP Server connection now contains the actual + port that it's bound to when it was configured to listen on port 0 (#2826) + - There is better reporting of error codes for TCP failures on Windows (#2425) + - Process spawning has been made slightly more robust by disabling garbage + collection temporarily immediately after forking so that finalizers cannot + be executed in an unexpected environment (#2483) + - namedAny now detects import errors better (#698) + - Many fixes and improvements to the twisted.python.zipstream module have + been made (#2996) + - FilePager no longer blows up on empty files (#3023) + - twisted.python.util.FancyEqMixin has been improved to cooperate with objects + of other types (#2944) + - twisted.python.FilePath.exists now restats to prevent incorrect result + (#2896) + - twisted.python.util.mergeFunctionMetadata now also merges the __module__ + attribute (#3049) + - It is now possible to call transport.pauseProducing within connectionMade on + TCP transports without it being ignored (#1780) + - twisted.python.versions now understands new SVN metadata format for fetching + the SVN revision number (#3058) + - It's now possible to use reactor.callWhenRunning(reactor.stop) on gtk2 and + glib2 reactors (#3011) + +Deprecations and removals +------------------------- + - twisted.python.timeoutqueue is now deprecated (#2536) + - twisted.enterprise.row and twisted.enterprise.reflector are now deprecated + (#2387) + - twisted.enterprise.util is now deprecated (#3022) + - The dispatch and dispatchWithCallback methods of ThreadPool are now + deprecated (#2684) + - Starting the same reactor multiple times is now deprecated (#1785) + - The visit method of various test classes in trial has been deprecated (#2897) + - The --report-profile option to twistd and twisted.python.dxprofile are + deprecated (#2908) + - The upDownError method of Trial reporters is deprecated (#2883) + +Other +----- + + - #2396, #2211, #1921, #2378, #2247, #1603, #2463, #2530, #2426, #2356, #2574, + - #1844, #2575, #2655, #2640, #2670, #2688, #2543, #2743, #2744, #2745, #2746, + - #2742, #2741, #1730, #2831, #2216, #1192, #2848, #2767, #1220, #2727, #2643, + - #2669, #2866, #2867, #1879, #2766, #2855, #2547, #2857, #2862, #1264, #2735, + - #942, #2885, #2739, #2901, #2928, #2954, #2906, #2925, #2942, #2894, #2793, + - #2761, #2977, #2968, #2895, #3000, #2990, #2919, #2969, #2921, #3005, #421, + - #3031, #2940, #1181, #2783, #1049, #3053, #2847, #2941, #2876, #2886, #3086, + - #3095, #3109 + + +Conch 8.0.0 (2008-03-17) +======================== + +Features +-------- + - Add DEC private mode manipulation methods to ITerminalTransport. (#2403) + +Fixes +----- + - Parameterize the scheduler function used by the insults TopWindow widget. + This change breaks backwards compatibility in the TopWindow initializer. + (#2413) + - Notify subsystems, like SFTP, of connection close. (#2421) + - Change the process file descriptor "connection lost" code to reverse the + setNonBlocking operation done during initialization. (#2371) + - Change ConsoleManhole to wait for connectionLost notification before + stopping the reactor. (#2123, #2371) + - Make SSHUserAuthServer.ssh_USERAUTH_REQUEST return a Deferred. (#2528) + - Manhole's initializer calls its parent class's initializer with its + namespace argument. (#2587) + - Handle ^C during input line continuation in manhole by updating the prompt + and line buffer correctly. (#2663) + - Make twisted.conch.telnet.Telnet by default reject all attempts to enable + options. (#1967) + - Reduce the number of calls into application code to deliver application-level + data in twisted.conch.telnet.Telnet.dataReceived (#2107) + - Fix definition and management of extended attributes in conch file transfer. + (#3010) + - Fix parsing of OpenSSH-generated RSA keys with differing ASN.1 packing style. + (#3008) + - Fix handling of missing $HOME in twisted.conch.client.unix. (#3061) + +Misc +---- + - #2267, #2378, #2604, #2707, #2341, #2685, #2679, #2912, #2977, #2678, #2709 + #2063, #2847 + + +Lore 8.0.0 (2008-03-17) +======================= + +Fixes +----- + - Change twisted.lore.tree.setIndexLin so that it removes node with index-link + class when the specified index filename is None. (#812) + - Fix the conversion of the list of options in man pages to Lore format. + (#3017) + - Fix conch man pages generation. (#3075) + - Fix management of the interactive command tag in man2lore. (#3076) + +Misc +---- + - #2847 + + +News 8.0.0 (2008-03-17) +======================= + +Misc +---- + - Remove all "API Stability" markers (#2847) + + +Runner 8.0.0 (2008-03-17) +========================= + +Misc +---- + - Remove all "API Stability" markers (#2847) + + +Web 8.0.0 (2008-03-17) +====================== + +Features +-------- + - Add support to twisted.web.client.getPage for the HTTP HEAD method. (#2750) + +Fixes +----- + - Set content-type in xmlrpc responses to "text/xml" (#2430) + - Add more error checking in the xmlrpc.XMLRPC render method, and enforce + POST requests. (#2505) + - Reject unicode input to twisted.web.client._parse to reject invalid + unicode URLs early. (#2628) + - Correctly re-quote URL path segments when generating an URL string to + return from Request.prePathURL. (#2934) + - Make twisted.web.proxy.ProxyClientFactory close the connection when + reporting a 501 error. (#1089) + - Fix twisted.web.proxy.ReverseProxyResource to specify the port in the + host header if different from 80. (#1117) + - Change twisted.web.proxy.ReverseProxyResource so that it correctly encodes + the request URI it sends on to the server for which it is a proxy. (#3013) + - Make "twistd web --personal" use PBServerFactory (#2681) + +Misc +---- + - #1996, #2382, #2211, #2633, #2634, #2640, #2752, #238, #2905 + + +Words 8.0.0 (2008-03-17) +======================== + +Features +-------- + - Provide function for creating XMPP response stanzas. (#2614, #2614) + - Log exceptions raised in Xish observers. (#2616) + - Add 'and' and 'or' operators for Xish XPath expressions. (#2502) + - Make JIDs hashable. (#2770) + +Fixes +----- + - Respect the hostname and servername parameters to IRCClient.register. (#1649) + - Make EventDispatcher remove empty callback lists. (#1652) + - Use legacy base64 API to support Python 2.3 (#2461) + - Fix support of DIGEST-MD5 challenge parsing with multi-valued directives. + (#2606) + - Fix reuse of dict of prefixes in domish.Element.toXml (#2609) + - Properly process XMPP stream headers (#2615) + - Use proper namespace for XMPP stream errors. (#2630) + - Properly parse XMPP stream errors. (#2771) + - Fix toResponse for XMPP stanzas without an id attribute. (#2773) + - Move XMPP stream header procesing to authenticators. (#2772) + +Misc +---- + - #2617, #2640, #2741, #2063, #2570, #2847 + + +Mail 8.0.0 (2008-03-17) +======================= + +Features +-------- + - Support CAPABILITY responses that include atoms of the form "FOO" and + "FOO=BAR" in IMAP4 (#2695) + - Parameterize error handling behavior of imap4.encoder and imap4.decoder. + (#2929) + +Fixes +----- + - Handle empty passwords in SMTP auth. (#2521) + - Fix IMAP4Client's parsing of literals which are not preceeded by whitespace. + (#2700) + - Handle MX lookup suceeding without answers. (#2807) + - Fix issues with aliases(5) process support. (#2729) + +Misc +---- + - #2371, #2123, #2378, #739, #2640, #2746, #1917, #2266, #2864, #2832, #2063, + #2865, #2847 + + +Names 8.0.0 (2008-03-17) +======================== + +Fixes +----- + + - Refactor DNSDatagramProtocol and DNSProtocol to use same base class (#2414) + - Change Resolver to query specified nameservers in specified order, instead + of reverse order. (#2290) + - Make SRVConnector work with bad results and NXDOMAIN responses. + (#1908, #2777) + - Handle write errors happening in dns queries, to have correct deferred + failures. (#2492) + - Fix the value of OP_NOTIFY and add a definition for OP_UPDATE. (#2945) + +Misc +---- + - #2685, #2936, #2581, #2847 + diff --git a/vendor/Twisted-10.0.0/README b/vendor/Twisted-10.0.0/README new file mode 100644 index 000000000000..4ccf085e0987 --- /dev/null +++ b/vendor/Twisted-10.0.0/README @@ -0,0 +1,118 @@ +Twisted 10.0.0 + +Quote of the Release: + + [on picking the quote of the release] + Man, we're going to have to get a lot funnier if we're going + to do time-based releases + + +For information on what's new in Twisted 10.0.0, see the NEWS file that comes +with the distribution. + +What is this? +============= + + Twisted is an event-based framework for internet applications which works on + Python 2.4 through 2.6. The following are some of the modules included + with Twisted:: + + - twisted.application + A "Service" system that allows you to organize your application in + hierarchies with well-defined startup and dependency semantics, + - twisted.cred + A general credentials and authentication system that facilitates + pluggable authentication backends, + - twisted.enterprise + Asynchronous database access, compatible with any Python DBAPI2.0 + modules, + - twisted.internet + Low-level asynchronous networking APIs that allow you to define + your own protocols that run over certain transports, + - twisted.manhole + A tool for remote debugging of your services which gives you a + Python interactive interpreter, + - twisted.protocols + Basic protocol implementations and helpers for your own protocol + implementations, + - twisted.python + A large set of utilities for Python tricks, reflection, text + processing, and anything else, + - twisted.spread + A secure, fast remote object system, + - twisted.trial + A unit testing framework that integrates well with Twisted-based code. + + Twisted supports integration of the Tk, GTK+, GTK+ 2, Qt, Mac OS X, + or wxPython event loop with its main event loop. The Win32 event + loop is also supported. + + For more information, visit http://www.twistedmatrix.com, or join the list + at http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-python + + There are many official Twisted subprojects, including clients and + servers for web, mail, DNS, and more. You can find out more about + these projects at http://twistedmatrix.com/trac/wiki/TwistedProjects + + +Installing +========== + + Instructions for installing this software are in INSTALL. + +Unit Tests +========== + + + See our unit tests run proving that the software is BugFree(TM):: + + % trial twisted + + Some of these tests may fail if you + * don't have the dependancies required for a particular subsystem installed, + * have a firewall blocking some ports (or things like Multicast, which Linux + NAT has shown itself to do), or + * run them as root. + + +Documentation and Support +========================= + + Examples on how to use Twisted APIs are located in doc/examples; + this might ease the learning curve a little bit, since all these + files are kept as short as possible. The file doc/howto/index.xhtml + contains an index of all the HOWTOs: this should be your starting + point when looking for documentation. + + Help is available on the Twisted mailing list:: + + http://twistedmatrix.com/cgi-bin/mailman/listinfo/twisted-python + + There is also a very lively IRC channel, #twisted, on + irc.freenode.net. + + +Copyright +========= + + All of the code in this distribution is Copyright (c) 2001-2010 + Twisted Matrix Laboratories. + + Twisted is made available under the MIT license. The included + LICENSE file describes this in detail. + + +Warranty +======== + + THIS SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER + EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE USE OF THIS SOFTWARE IS WITH YOU. + + IN NO EVENT WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY, BE LIABLE TO YOU FOR ANY DAMAGES, EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + Again, see the included LICENSE file for specific legal details. diff --git a/vendor/Twisted-10.0.0/bin/.twistd.swp b/vendor/Twisted-10.0.0/bin/.twistd.swp new file mode 100644 index 0000000000000000000000000000000000000000..2f2e963230db47da3e9719bf4c8ee58131bcae62 GIT binary patch literal 12288 zcmeI2v2GJV5QaAtbRmeeAdT(9K4G8FrU*np6Q!_}AfyBuM6vem*$cki)9l*84Wi`% zl2p`)mS+GS0>lGA!y~}#`7BWck~*5Te%m`cJ2U>K^UW}MI@pJ;xG!j37vlY`?}Nqh zP4V`d5ME)fRXF(nDo~BIojJ6Ncw|kc$7lP$UIy$I-9j7X<7zusCbif-Jkrjqw9`vs zdUc1|bbTwffTYLkTkArc zlEU-<@E^d>4IzGzK9WvJC!}N28`6E!Rno6(LVO@SByEts()}-_&!kVJccc*Wgv5&p zFaajO1egF5U;<2l2`~XBzy$tD0B5EZi41KHd!wco#2+qCw|{MGxdiK^lz) z&=fV;3}_fz=EJEHK+7#$Tu48OwQ&l4D`|tREOO|=NsIPl_N@qN4XMT9#!>Ws`0O{ zRb|ejHsd(cCOtohQ^oCOHB;U!>+STmMMaHLhVX4zh7=8O12v|wfW=fHcpK<~ zpD3pYs~w}v9h5x@!|5^II-F<+xqPV{xEU()l^$k<+Cgef(-X`&SuHq4_9Dd6NS&H% zv?f=^mz}GsgvYDaQA6QCDcBqA>^?f!h0G$P%1b?UA?uDU7O2M)53NxI{Uqsi= + +""" +tap2rpm +""" + +### Twisted Preamble +# This makes sure that users don't have to set up their environment +# specially in order to run these programs from bin/. +import sys, os, string +if string.find(os.path.abspath(sys.argv[0]), os.sep+'Twisted') != -1: + sys.path.insert(0, os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))) +### end of preamble + +from twisted.scripts import tap2rpm +tap2rpm.run() diff --git a/vendor/Twisted-10.0.0/bin/tapconvert b/vendor/Twisted-10.0.0/bin/tapconvert new file mode 100755 index 000000000000..6ad2d7f42c9d --- /dev/null +++ b/vendor/Twisted-10.0.0/bin/tapconvert @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +### Twisted Preamble +# This makes sure that users don't have to set up their environment +# specially in order to run these programs from bin/. +import sys, os, string +if string.find(os.path.abspath(sys.argv[0]), os.sep+'Twisted') != -1: + sys.path.insert(0, os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))) +if not hasattr(os, "getuid") or os.getuid() != 0: + sys.path.insert(0, os.getcwd()) +### end of preamble + +from twisted.scripts.tapconvert import run +run() diff --git a/vendor/Twisted-10.0.0/bin/trial b/vendor/Twisted-10.0.0/bin/trial new file mode 100755 index 000000000000..963a9068d8d2 --- /dev/null +++ b/vendor/Twisted-10.0.0/bin/trial @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +### Twisted Preamble +# This makes sure that users don't have to set up their environment +# specially in order to run these programs from bin/. +import sys, os, string +if string.find(os.path.abspath(sys.argv[0]), os.sep+'Twisted') != -1: + sys.path.insert(0, os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))) +if hasattr(os, "getuid") and os.getuid() != 0: + sys.path.insert(0, os.curdir) +### end of preamble + +# begin chdir armor +sys.path[:] = map(os.path.abspath, sys.path) +# end chdir armor + +from twisted.scripts.trial import run +run() diff --git a/vendor/Twisted-10.0.0/bin/twistd b/vendor/Twisted-10.0.0/bin/twistd new file mode 100755 index 000000000000..7ec65ded79be --- /dev/null +++ b/vendor/Twisted-10.0.0/bin/twistd @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +### Twisted Preamble +# This makes sure that users don't have to set up their environment +# specially in order to run these programs from bin/. +import sys, os, string +if string.find(os.path.abspath(sys.argv[0]), os.sep+'Twisted') != -1: + sys.path.insert(0, os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))) +if hasattr(os, "getuid") and os.getuid() != 0: + sys.path.insert(0, os.path.abspath(os.getcwd())) +### end of preamble + + +from twisted.scripts.twistd import run +run() diff --git a/vendor/Twisted-10.0.0/doc/conch/benchmarks/README b/vendor/Twisted-10.0.0/doc/conch/benchmarks/README new file mode 100644 index 000000000000..233bc8e4f788 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/benchmarks/README @@ -0,0 +1,15 @@ +This directory contains various simple programs intended to exercise various +features of Twisted Conch as a way to learn about and track their +performance characteristics. As there is currently no record of past +benchmark results, the tracking aspect of this is currently somewhat +fantastic. However, the intent is for this to change at some future point. + +All (one) of the programs in this directory are currently intended to be +invoked directly and to report some timing information on standard out. + +The following benchmarks are currently available: + +buffering_mixin.py: + + This deals with twisted.conch.mixin.BufferingMixin which provides + Nagle-like write coalescing for Protocol classes. diff --git a/vendor/Twisted-10.0.0/doc/conch/benchmarks/buffering_mixin.py b/vendor/Twisted-10.0.0/doc/conch/benchmarks/buffering_mixin.py new file mode 100755 index 000000000000..b3c506a727f6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/benchmarks/buffering_mixin.py @@ -0,0 +1,182 @@ +# Copyright (c) 2006 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Benchmarks comparing the write performance of a "normal" Protocol instance +and an instance of a Protocol class which has had L{twisted.conch.mixin}'s +L{BufferingMixin} mixed in to perform +Nagle-like write coalescing. +""" + +from sys import stdout +from pprint import pprint +from time import time + +from twisted.python.usage import Options +from twisted.python.log import startLogging + +from twisted.internet.protocol import ServerFactory, Protocol, ClientCreator +from twisted.internet.defer import Deferred +from twisted.internet import reactor + +from twisted.conch.mixin import BufferingMixin + + +class BufferingBenchmark(Options): + """ + Options for configuring the execution parameters of a benchmark run. + """ + + optParameters = [ + ('scale', 's', '1', + 'Work multiplier (bigger takes longer, might resist noise better)')] + + def postOptions(self): + self['scale'] = int(self['scale']) + + + +class ServerProtocol(Protocol): + """ + A silent protocol which only waits for a particular amount of input and + then fires a Deferred. + """ + def __init__(self, expected, finished): + self.expected = expected + self.finished = finished + + + def dataReceived(self, bytes): + self.expected -= len(bytes) + if self.expected == 0: + finished, self.finished = self.finished, None + finished.callback(None) + + + +class BufferingProtocol(Protocol, BufferingMixin): + """ + A protocol which uses the buffering mixin to provide a write method. + """ + + + +class UnbufferingProtocol(Protocol): + """ + A protocol which provides a naive write method which simply passes through + to the transport. + """ + + def connectionMade(self): + """ + Bind write to the transport's write method and flush to a no-op + function in order to provide the same API as is provided by + BufferingProtocol. + """ + self.write = self.transport.write + self.flush = lambda: None + + + +def _write(proto, byteCount): + write = proto.write + flush = proto.flush + + for i in range(byteCount): + write('x') + flush() + + + +def _benchmark(byteCount, clientProtocol): + result = {} + finished = Deferred() + def cbFinished(ignored): + result[u'disconnected'] = time() + result[u'duration'] = result[u'disconnected'] - result[u'connected'] + return result + finished.addCallback(cbFinished) + + f = ServerFactory() + f.protocol = lambda: ServerProtocol(byteCount, finished) + server = reactor.listenTCP(0, f) + + f2 = ClientCreator(reactor, clientProtocol) + proto = f2.connectTCP('127.0.0.1', server.getHost().port) + def connected(proto): + result[u'connected'] = time() + return proto + proto.addCallback(connected) + proto.addCallback(_write, byteCount) + return finished + + + +def _benchmarkBuffered(byteCount): + return _benchmark(byteCount, BufferingProtocol) + + + +def _benchmarkUnbuffered(byteCount): + return _benchmark(byteCount, UnbufferingProtocol) + + + +def benchmark(scale=1): + """ + Benchmark and return information regarding the relative performance of a + protocol which does not use the buffering mixin and a protocol which + does. + + @type scale: C{int} + @param scale: A multipler to the amount of work to perform + + @return: A Deferred which will fire with a dictionary mapping each of + the two unicode strings C{u'buffered'} and C{u'unbuffered'} to + dictionaries describing the performance of a protocol of each type. + These value dictionaries will map the unicode strings C{u'connected'} + and C{u'disconnected'} to the times at which each of those events + occurred and C{u'duration'} two the difference between these two values. + """ + overallResult = {} + + byteCount = 1024 + + bufferedDeferred = _benchmarkBuffered(byteCount * scale) + def didBuffered(bufferedResult): + overallResult[u'buffered'] = bufferedResult + unbufferedDeferred = _benchmarkUnbuffered(byteCount * scale) + def didUnbuffered(unbufferedResult): + overallResult[u'unbuffered'] = unbufferedResult + return overallResult + unbufferedDeferred.addCallback(didUnbuffered) + return unbufferedDeferred + bufferedDeferred.addCallback(didBuffered) + return bufferedDeferred + + + +def main(args=None): + """ + Perform a single benchmark run, starting and stopping the reactor and + logging system as necessary. + """ + startLogging(stdout) + + options = BufferingBenchmark() + options.parseOptions(args) + + d = benchmark(options['scale']) + def cbBenchmark(result): + pprint(result) + def ebBenchmark(err): + print err.getTraceback() + d.addCallbacks(cbBenchmark, ebBenchmark) + def stopReactor(ign): + reactor.stop() + d.addBoth(stopReactor) + reactor.run() + + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo.tac new file mode 100644 index 000000000000..a853b6a7fc47 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo.tac @@ -0,0 +1,25 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo.tac + +"""Nearly pointless demonstration of the manhole interactive interpreter. + +This does about the same thing as demo_manhole, but uses the tap +module's makeService method instead. The only interesting difference +is that in this version, the telnet server also requires +authentication. + +Note, you will have to create a file named \"passwd\" and populate it +with credentials (in the format of passwd(5)) to use this demo. +""" + +from twisted.application import service +application = service.Application("TAC Demo") + +from twisted.conch import manhole_tap +manhole_tap.makeService({"telnetPort": "tcp:6023", + "sshPort": "tcp:6022", + "namespace": {"foo": "bar"}, + "passwd": "passwd"}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo_draw.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo_draw.tac new file mode 100644 index 000000000000..55a53c79411a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo_draw.tac @@ -0,0 +1,80 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo_draw.tac + +"""A trivial drawing application. + +Clients are allowed to connect and spew various characters out over +the terminal. Spacebar changes the drawing character, while the arrow +keys move the cursor. +""" + +from twisted.conch.insults import insults +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +from twisted.internet import protocol +from twisted.application import internet, service +from twisted.cred import checkers, portal + +class Draw(insults.TerminalProtocol): + """Protocol which accepts arrow key and spacebar input and places + the requested characters onto the terminal. + """ + cursors = list('!@#$%^&*()_+-=') + + def connectionMade(self): + self.terminal.eraseDisplay() + self.terminal.resetModes([insults.IRM]) + self.cursor = self.cursors[0] + + def keystrokeReceived(self, keyID, modifier): + if keyID == self.terminal.UP_ARROW: + self.terminal.cursorUp() + elif keyID == self.terminal.DOWN_ARROW: + self.terminal.cursorDown() + elif keyID == self.terminal.LEFT_ARROW: + self.terminal.cursorBackward() + elif keyID == self.terminal.RIGHT_ARROW: + self.terminal.cursorForward() + elif keyID == ' ': + self.cursor = self.cursors[(self.cursors.index(self.cursor) + 1) % len(self.cursors)] + else: + return + self.terminal.write(self.cursor) + self.terminal.cursorBackward() + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Insults Demo App") +makeService({'protocolFactory': Draw, + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo_insults.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo_insults.tac new file mode 100644 index 000000000000..a49f011c984d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo_insults.tac @@ -0,0 +1,252 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo_insults.tac + +"""Various simple terminal manipulations using the insults module. + +This demo sets up two listening ports: one on 6022 which accepts ssh +connections; one on 6023 which accepts telnet connections. No login +for the telnet server is required; for the ssh server, \"username\" is +the username and \"password\" is the password. + +The TerminalProtocol subclass defined here ignores most user input +(except to print it out to the server log) and spends the duration of +the connection drawing (the author's humble approximation of) +raindrops at random locations on the client's terminal. +, -, *, and +/ are respected and each adjusts an aspect of the timing of the +animation process. +""" + +import random, string + +from twisted.python import log +from twisted.internet import protocol, task +from twisted.application import internet, service +from twisted.cred import checkers, portal + +from twisted.conch.insults import insults +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +class DrawingFinished(Exception): + """Sentinel exception, raised when no \"frames\" for a particular + \"animation\" remain to be drawn. + """ + +class Drawable: + """Representation of an animation. + + Constructed with a protocol instance and a coordinate on the + screen, waits for invocations of iterate() at which point it + erases the previous frame of the animation and draws the next one, + using its protocol instance and always placing the upper left hand + corner of the frame at the given coordinates. + + Frames are defined with draw_ prefixed methods. Erasure is + performed by erase_ prefixed methods. + """ + n = 0 + + def __init__(self, proto, col, line): + self.proto = proto + self.col = col + self.line = line + + def drawLines(self, s): + lines = s.splitlines() + c = self.col + line = self.line + for l in lines: + self.proto.cursorPosition(c - len(lines) / 2, line) + self.proto.write(l) + line += 1 + + def iterate(self): + getattr(self, 'erase_' + str(self.n))() + self.n += 1 + f = getattr(self, 'draw_' + str(self.n), None) + if f is None: + raise DrawingFinished() + f() + + def erase_0(self): + pass + + +class Splat(Drawable): + HEIGHT = 5 + WIDTH = 11 + + def draw_1(self): + # . . + #. . . + # . . + self.drawLines(' . .\n. . .\n . .') + + def erase_1(self): + self.drawLines(' \n \n ') + + def draw_2(self): + # . . . . + # . o o o . + #. o o o o . + # . o o o . + # . . . . + self.drawLines(' . . . .\n . o o o .\n. o o o o .\n . o o o .\n . . . .') + + def erase_2(self): + self.drawLines(' \n \n \n \n ') + + def draw_3(self): + # o o o o + # o O O O o + #o O O O O o + # o O O O o + # o o o o + self.drawLines(' o o o o\n o O O O o\no O O O O o\n o O O O o\n o o o o') + + erase_3 = erase_2 + + def draw_4(self): + # O O O O + # O . . . O + #O . . . . O + # O . . . O + # O O O O + self.drawLines(' O O O O\n O . . . O\nO . . . . O\n O . . . O\n O O O O') + + erase_4 = erase_3 + + def draw_5(self): + # . . . . + # . . + #. . + # . . + # . . . . + self.drawLines(' . . . .\n . .\n. .\n . .\n . . . .') + + erase_5 = erase_4 + +class Drop(Drawable): + WIDTH = 3 + HEIGHT = 4 + + def draw_1(self): + # o + self.drawLines(' o') + + def erase_1(self): + self.drawLines(' ') + + def draw_2(self): + # _ + #/ \ + #\./ + self.drawLines(' _ \n/ \\\n\\./') + + def erase_2(self): + self.drawLines(' \n \n ') + + def draw_3(self): + # O + self.drawLines(' O') + + def erase_3(self): + self.drawLines(' ') + +class DemoProtocol(insults.TerminalProtocol): + """Draws random things at random places on the screen. + """ + width = 80 + height = 24 + + interval = 0.1 + rate = 0.05 + + def connectionMade(self): + self.run() + + def connectionLost(self, reason): + self._call.stop() + del self._call + + def run(self): + # Clear the screen, matey + self.terminal.eraseDisplay() + + self._call = task.LoopingCall(self._iterate) + self._call.start(self.interval) + + def _iterate(self): + cls = random.choice((Splat, Drop)) + + # Move to a random location on the screen + col = random.randrange(self.width - cls.WIDTH) + cls.WIDTH + line = random.randrange(self.height - cls.HEIGHT) + cls.HEIGHT + + s = cls(self.terminal, col, line) + + c = task.LoopingCall(s.iterate) + c.start(self.rate).addErrback(lambda f: f.trap(DrawingFinished)).addErrback(log.err) + + # ITerminalListener + def terminalSize(self, width, height): + self.width = width + self.height = height + + def unhandledControlSequence(self, seq): + log.msg("Client sent something weird: %r" % (seq,)) + + def keystrokeReceived(self, keyID, modifier): + if keyID == '+': + self.interval /= 1.1 + elif keyID == '-': + self.interval *= 1.1 + elif keyID == '*': + self.rate /= 1.1 + elif keyID == '/': + self.rate *= 1.1 + else: + log.msg("Client sent: %r" % (keyID,)) + return + + self._call.stop() + self._call = task.LoopingCall(self._iterate) + self._call.start(self.interval) + + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Insults Demo App") + +makeService({'protocolFactory': DemoProtocol, + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo_manhole.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo_manhole.tac new file mode 100644 index 000000000000..7edb7a515c35 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo_manhole.tac @@ -0,0 +1,56 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo_manhole.tac + +"""An interactive Python interpreter with syntax coloring. + +Nothing interesting is actually defined here. Two listening ports are +set up and attached to protocols which know how to properly set up a +ColoredManhole instance. +""" + +from twisted.conch.manhole import ColoredManhole +from twisted.conch.insults import insults +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +from twisted.internet import protocol +from twisted.application import internet, service +from twisted.cred import checkers, portal + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Interactive Python Interpreter") + +makeService({'protocolFactory': ColoredManhole, + 'protocolArgs': (None,), + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo_recvline.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo_recvline.tac new file mode 100644 index 000000000000..92d01d13f199 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo_recvline.tac @@ -0,0 +1,77 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo_recvline.tac + +"""Demonstrates line-at-a-time handling with basic line-editing support. + +This is a variation on the echo server. It sets up two listening +ports: one on 6022 which accepts ssh connections; one on 6023 which +accepts telnet connections. No login for the telnet server is +required; for the ssh server, \"username\" is the username and +\"password\" is the password. + +The demo protocol defined in this module is handed a line of input at +a time, which it simply writes back to the connection. +HistoricRecvline, which the demo protocol subclasses, provides basic +line editing and input history features. +""" + +from twisted.conch import recvline +from twisted.conch.insults import insults +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +from twisted.internet import protocol +from twisted.application import internet, service +from twisted.cred import checkers, portal + +class DemoRecvLine(recvline.HistoricRecvLine): + """Simple echo protocol. + + Accepts lines of input and writes them back to its connection. If + a line consisting solely of \"quit\" is received, the connection + is dropped. + """ + + def lineReceived(self, line): + if line == "quit": + self.terminal.loseConnection() + self.terminal.write(line) + self.terminal.nextLine() + self.terminal.write(self.ps[self.pn]) + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Insults RecvLine Demo") + +makeService({'protocolFactory': DemoRecvLine, + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/demo_scroll.tac b/vendor/Twisted-10.0.0/doc/conch/examples/demo_scroll.tac new file mode 100644 index 000000000000..4fdfbce18cdd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/demo_scroll.tac @@ -0,0 +1,100 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny demo_scroll.tac + +"""Simple echo-ish server that uses the scroll-region. + +This demo sets up two listening ports: one on 6022 which accepts ssh +connections; one on 6023 which accepts telnet connections. No login +for the telnet server is required; for the ssh server, \"username\" is +the username and \"password\" is the password. + +The TerminalProtocol subclass defined here sets up a scroll-region occupying +most of the screen. It positions the cursor at the bottom of the screen and +then echos back printable input. When return is received, the line is +copied to the upper area of the screen (scrolling anything older up) and +clears the input line. +""" + +import string + +from twisted.python import log +from twisted.internet import protocol +from twisted.application import internet, service +from twisted.cred import checkers, portal + +from twisted.conch.insults import insults +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +class DemoProtocol(insults.TerminalProtocol): + """Copies input to an upwards scrolling region. + """ + width = 80 + height = 24 + + def connectionMade(self): + self.buffer = [] + self.terminalSize(self.width, self.height) + + # ITerminalListener + def terminalSize(self, width, height): + self.width = width + self.height = height + + self.terminal.setScrollRegion(0, height - 1) + self.terminal.cursorPosition(0, height) + self.terminal.write('> ') + + def unhandledControlSequence(self, seq): + log.msg("Client sent something weird: %r" % (seq,)) + + def keystrokeReceived(self, keyID, modifier): + if keyID == '\r': + self.terminal.cursorPosition(0, self.height - 2) + self.terminal.nextLine() + self.terminal.write(''.join(self.buffer)) + self.terminal.cursorPosition(0, self.height - 1) + self.terminal.eraseToLineEnd() + self.terminal.write('> ') + self.buffer = [] + elif keyID in list(string.printable): + self.terminal.write(keyID) + self.buffer.append(keyID) + + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Scroll Region Demo App") + +makeService({'protocolFactory': DemoProtocol, + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/index.html b/vendor/Twisted-10.0.0/doc/conch/examples/index.html new file mode 100644 index 000000000000..6880581dcf3c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/index.html @@ -0,0 +1,40 @@ + + +Twisted Documentation: Twisted.Conch code examples + + + + +

Twisted.Conch code examples

+ +
+ + +

Simple SSH server and client

+ + +

Simple telnet server

+ + + +

twisted.conch.insults examples

+ +
+ +

Index

+ Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleclient.py b/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleclient.py new file mode 100644 index 000000000000..a25b90e63c79 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleclient.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.conch.ssh import transport, userauth, connection, common, keys, channel +from twisted.internet import defer, protocol, reactor +from twisted.python import log +import struct, sys, getpass, os + +USER = 'z3p' # replace this with a valid username +HOST = 'localhost' # and a valid host + +class SimpleTransport(transport.SSHClientTransport): + def verifyHostKey(self, hostKey, fingerprint): + print 'host key fingerprint: %s' % fingerprint + return defer.succeed(1) + + def connectionSecure(self): + self.requestService( + SimpleUserAuth(USER, + SimpleConnection())) + +class SimpleUserAuth(userauth.SSHUserAuthClient): + def getPassword(self): + return defer.succeed(getpass.getpass("%s@%s's password: " % (USER, HOST))) + + def getGenericAnswers(self, name, instruction, questions): + print name + print instruction + answers = [] + for prompt, echo in questions: + if echo: + answer = raw_input(prompt) + else: + answer = getpass.getpass(prompt) + answers.append(answer) + return defer.succeed(answers) + + def getPublicKey(self): + path = os.path.expanduser('~/.ssh/id_dsa') + # this works with rsa too + # just change the name here and in getPrivateKey + if not os.path.exists(path) or self.lastPublicKey: + # the file doesn't exist, or we've tried a public key + return + return keys.getPublicKeyString(path+'.pub') + + def getPrivateKey(self): + path = os.path.expanduser('~/.ssh/id_dsa') + return defer.succeed(keys.getPrivateKeyObject(path)) + +class SimpleConnection(connection.SSHConnection): + def serviceStarted(self): + self.openChannel(TrueChannel(2**16, 2**15, self)) + self.openChannel(FalseChannel(2**16, 2**15, self)) + self.openChannel(CatChannel(2**16, 2**15, self)) + +class TrueChannel(channel.SSHChannel): + name = 'session' # needed for commands + + def openFailed(self, reason): + print 'true failed', reason + + def channelOpen(self, ignoredData): + self.conn.sendRequest(self, 'exec', common.NS('true')) + + def request_exit_status(self, data): + status = struct.unpack('>L', data)[0] + print 'true status was: %s' % status + self.loseConnection() + +class FalseChannel(channel.SSHChannel): + name = 'session' + + def openFailed(self, reason): + print 'false failed', reason + + def channelOpen(self, ignoredData): + self.conn.sendRequest(self, 'exec', common.NS('false')) + + def request_exit_status(self, data): + status = struct.unpack('>L', data)[0] + print 'false status was: %s' % status + self.loseConnection() + +class CatChannel(channel.SSHChannel): + name = 'session' + + def openFailed(self, reason): + print 'echo failed', reason + + def channelOpen(self, ignoredData): + self.data = '' + d = self.conn.sendRequest(self, 'exec', common.NS('cat'), wantReply = 1) + d.addCallback(self._cbRequest) + + def _cbRequest(self, ignored): + self.write('hello conch\n') + self.conn.sendEOF(self) + + def dataReceived(self, data): + self.data += data + + def closed(self): + print 'got data from cat: %s' % repr(self.data) + self.loseConnection() + reactor.stop() + +protocol.ClientCreator(reactor, SimpleTransport).connectTCP(HOST, 22) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleserver.py b/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleserver.py new file mode 100755 index 000000000000..7cfdf5aa742c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/sshsimpleserver.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.cred import portal, checkers +from twisted.conch import error, avatar +from twisted.conch.checkers import SSHPublicKeyDatabase +from twisted.conch.ssh import factory, userauth, connection, keys, session +from twisted.internet import reactor, protocol, defer +from twisted.python import log +from zope.interface import implements +import sys +log.startLogging(sys.stderr) + +""" +Example of running another protocol over an SSH channel. +log in with username "user" and password "password". +""" + +class ExampleAvatar(avatar.ConchUser): + + def __init__(self, username): + avatar.ConchUser.__init__(self) + self.username = username + self.channelLookup.update({'session':session.SSHSession}) + +class ExampleRealm: + implements(portal.IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + return interfaces[0], ExampleAvatar(avatarId), lambda: None + +class EchoProtocol(protocol.Protocol): + """this is our example protocol that we will run over SSH + """ + def dataReceived(self, data): + if data == '\r': + data = '\r\n' + elif data == '\x03': #^C + self.transport.loseConnection() + return + self.transport.write(data) + +publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHRivcJSkbh/C+BR3utDS555mV' + +privateKey = """-----BEGIN RSA PRIVATE KEY----- +MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW +4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw +vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb +Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 +xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 +PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 +gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu +DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML +pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP +EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== +-----END RSA PRIVATE KEY-----""" + + +class InMemoryPublicKeyChecker(SSHPublicKeyDatabase): + + def checkKey(self, credentials): + return credentials.username == 'user' and \ + keys.getPublicKeyString(data=publicKey) == credentials.blob + +class ExampleSession: + + def __init__(self, avatar): + """ + We don't use it, but the adapter is passed the avatar as its first + argument. + """ + + def getPty(self, term, windowSize, attrs): + pass + + def execCommand(self, proto, cmd): + raise Exception("no executing commands") + + def openShell(self, trans): + ep = EchoProtocol() + ep.makeConnection(trans) + trans.makeConnection(session.wrapProtocol(ep)) + + def eofReceived(self): + pass + + def closed(self): + pass + +from twisted.python import components +components.registerAdapter(ExampleSession, ExampleAvatar, session.ISession) + +class ExampleFactory(factory.SSHFactory): + publicKeys = { + 'ssh-rsa': keys.Key.fromString(data=publicKey) + } + privateKeys = { + 'ssh-rsa': keys.Key.fromString(data=privateKey) + } + services = { + 'ssh-userauth': userauth.SSHUserAuthServer, + 'ssh-connection': connection.SSHConnection + } + + +portal = portal.Portal(ExampleRealm()) +passwdDB = checkers.InMemoryUsernamePasswordDatabaseDontUse() +passwdDB.addUser('user', 'password') +portal.registerChecker(passwdDB) +portal.registerChecker(InMemoryPublicKeyChecker()) +ExampleFactory.portal = portal + +if __name__ == '__main__': + reactor.listenTCP(5022, ExampleFactory()) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/telnet_echo.tac b/vendor/Twisted-10.0.0/doc/conch/examples/telnet_echo.tac new file mode 100644 index 000000000000..9fabdb89d947 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/telnet_echo.tac @@ -0,0 +1,37 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.conch.telnet import TelnetTransport, TelnetProtocol +from twisted.internet.protocol import ServerFactory +from twisted.application.internet import TCPServer +from twisted.application.service import Application + +class TelnetEcho(TelnetProtocol): + def enableRemote(self, option): + self.transport.write("You tried to enable %r (I rejected it)\r\n" % (option,)) + return False + + + def disableRemote(self, option): + self.transport.write("You disabled %r\r\n" % (option,)) + + + def enableLocal(self, option): + self.transport.write("You tried to make me enable %r (I rejected it)\r\n" % (option,)) + return False + + + def disableLocal(self, option): + self.transport.write("You asked me to disable %r\r\n" % (option,)) + + + def dataReceived(self, data): + self.transport.write("I received %r from you\r\n" % (data,)) + + +factory = ServerFactory() +factory.protocol = lambda: TelnetTransport(TelnetEcho) +service = TCPServer(8023, factory) + +application = Application("Telnet Echo Server") +service.setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/examples/window.tac b/vendor/Twisted-10.0.0/doc/conch/examples/window.tac new file mode 100644 index 000000000000..e455f0164817 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/examples/window.tac @@ -0,0 +1,190 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +# You can run this .tac file directly with: +# twistd -ny window.tac + +from __future__ import division + +import string, random + +from twisted.python import log +from twisted.internet import protocol, task +from twisted.application import internet, service +from twisted.cred import checkers, portal + +from twisted.conch.insults import insults, window +from twisted.conch.telnet import TelnetTransport, TelnetBootstrapProtocol +from twisted.conch.manhole_ssh import ConchFactory, TerminalRealm + +from twisted.internet import reactor + +class DrawableCanvas(window.Canvas): + x = 0 + y = 0 + + def func_LEFT_ARROW(self, modifier): + self.x -= 1 + self.repaint() + + def func_RIGHT_ARROW(self, modifier): + self.x += 1 + self.repaint() + + def func_UP_ARROW(self, modifier): + self.y -= 1 + self.repaint() + + def func_DOWN_ARROW(self, modifier): + self.y += 1 + self.repaint() + + def characterReceived(self, keyID, modifier): + self[self.x, self.y] = keyID + self.x += 1 + self.repaint() + + def keystrokeReceived(self, keyID, modifier): + if keyID == '\r' or keyID == '\v': + return + window.Canvas.keystrokeReceived(self, keyID, modifier) + if self.x >= self.width: + self.x = 0 + elif self.x < 0: + self.x = self.width - 1 + + if self.y >= self.height: + self.y = 0 + elif self.y < 0: + self.y = self.height - 1 + self.repaint() + + def render(self, width, height, terminal): + window.Canvas.render(self, width, height, terminal) + if self.focused: + terminal.cursorPosition(self.x, self.y) + window.cursor(terminal, self[self.x, self.y]) + + +class ButtonDemo(insults.TerminalProtocol): + width = 80 + height = 24 + + def _draw(self): + self.window.draw(self.width, self.height, self.terminal) + + def _redraw(self): + self.window.filthy() + self._draw() + + def _schedule(self, f): + reactor.callLater(0, f) + + def connectionMade(self): + self.terminal.eraseDisplay() + self.terminal.resetPrivateModes([insults.privateModes.CURSOR_MODE]) + + self.window = window.TopWindow(self._draw, self._schedule) + self.output = window.TextOutput((15, 1)) + self.input = window.TextInput(15, self._setText) + self.select1 = window.Selection(map(str, range(100)), self._setText, 10) + self.select2 = window.Selection(map(str, range(200, 300)), self._setText, 10) + self.button = window.Button("Clear", self._clear) + self.canvas = DrawableCanvas() + + hbox = window.HBox() + hbox.addChild(self.input) + hbox.addChild(self.output) + hbox.addChild(window.Border(self.button)) + hbox.addChild(window.Border(self.select1)) + hbox.addChild(window.Border(self.select2)) + + t1 = window.TextOutputArea(longLines=window.TextOutputArea.WRAP) + t2 = window.TextOutputArea(longLines=window.TextOutputArea.TRUNCATE) + t3 = window.TextOutputArea(longLines=window.TextOutputArea.TRUNCATE) + t4 = window.TextOutputArea(longLines=window.TextOutputArea.TRUNCATE) + for _t in t1, t2, t3, t4: + _t.setText((('This is a very long string. ' * 3) + '\n') * 3) + + vp = window.Viewport(t3) + d = [1] + def spin(): + vp.xOffset += d[0] + if vp.xOffset == 0 or vp.xOffset == 25: + d[0] *= -1 + self.call = task.LoopingCall(spin) + self.call.start(0.25, now=False) + hbox.addChild(window.Border(vp)) + + vp2 = window.ScrolledArea(t4) + hbox.addChild(vp2) + + texts = window.VBox() + texts.addChild(window.Border(t1)) + texts.addChild(window.Border(t2)) + + areas = window.HBox() + areas.addChild(window.Border(self.canvas)) + areas.addChild(texts) + + vbox = window.VBox() + vbox.addChild(hbox) + vbox.addChild(areas) + self.window.addChild(vbox) + self.terminalSize(self.width, self.height) + + def connectionLost(self, reason): + self.call.stop() + insults.TerminalProtocol.connectionLost(self, reason) + + def terminalSize(self, width, height): + self.width = width + self.height = height + self.terminal.eraseDisplay() + self._redraw() + + + def keystrokeReceived(self, keyID, modifier): + self.window.keystrokeReceived(keyID, modifier) + + def _clear(self): + self.canvas.clear() + + def _setText(self, text): + self.input.setText('') + self.output.setText(text) + + +def makeService(args): + checker = checkers.InMemoryUsernamePasswordDatabaseDontUse(username="password") + + f = protocol.ServerFactory() + f.protocol = lambda: TelnetTransport(TelnetBootstrapProtocol, + insults.ServerProtocol, + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + tsvc = internet.TCPServer(args['telnet'], f) + + def chainProtocolFactory(): + return insults.ServerProtocol( + args['protocolFactory'], + *args.get('protocolArgs', ()), + **args.get('protocolKwArgs', {})) + + rlm = TerminalRealm() + rlm.chainedProtocolFactory = chainProtocolFactory + ptl = portal.Portal(rlm, [checker]) + f = ConchFactory(ptl) + csvc = internet.TCPServer(args['ssh'], f) + + m = service.MultiService() + tsvc.setServiceParent(m) + csvc.setServiceParent(m) + return m + +application = service.Application("Window Demo") + +makeService({'protocolFactory': ButtonDemo, + 'telnet': 6023, + 'ssh': 6022}).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/conch/howto/conch_client.html b/vendor/Twisted-10.0.0/doc/conch/howto/conch_client.html new file mode 100644 index 000000000000..c801b654b727 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/howto/conch_client.html @@ -0,0 +1,318 @@ + + +Twisted Documentation: Writing a client with Twisted.Conch + + + + +

Writing a client with Twisted.Conch

+ +
+ + +

Introduction

+ +

In the original days of computing, rsh/rlogin were used to connect to +remote computers and execute commands. These commands had the problem +that the passwords and commands were sent in the clear. To solve this +problem, the SSH protocol was created. Twisted.Conch implements the +second version of this protocol.

+ +

Writing a client

+ +

Writing a client with Conch involves sub-classing 4 classes: twisted.conch.ssh.transport.SSHClientTransport, twisted.conch.ssh.userauth.SSHUserAuthClient, twisted.conch.ssh.connection.SSHConnection, and twisted.conch.ssh.channel.SSHChannel. We'll start out +with SSHClientTransport because it's the base +of the client.

+ +

The Transport

+ +

1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +

from twisted.conch import error +from twisted.conch.ssh import transport +from twisted.internet import defer + +class ClientTransport(transport.SSHClientTransport): + + def verifyHostKey(self, pubKey, fingerprint): + if fingerprint != 'b1:94:6a:c9:24:92:d2:34:7c:62:35:b4:d2:61:11:84': + return defer.fail(error.ConchError('bad key')) + else: + return defer.succeed(1) + + def connectionSecure(self): + self.requestService(ClientUserAuth('user', ClientConnection())) +
+ +

See how easy it is? SSHClientTransport +handles the negotiation of encryption and the verification of keys +for you. The one security element that you as a client writer need to +implement is verifyHostKey(). This method +is called with two strings: the public key sent by the server and its +fingerprint. You should verify the host key the server sends, either +by checking against a hard-coded value as in the example, or by asking +the user. verifyHostKey returns a twisted.internet.defer.Deferred which gets a callback +if the host key is valid, or an errback if it is not. Note that in the +above, replace 'user' with the username you're attempting to ssh with, +for instance a call to os.getlogin() for the +current user.

+ +

The second method you need to implement is connectionSecure(). It is called when the +encryption is set up and other services can be run. The example requests +that the ClientUserAuth service be started. +This service will be discussed next.

+ +

The Authorization Client

+ +

1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

from twisted.conch.ssh import keys, userauth + +# these are the public/private keys from test_conch + +publicKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEArzJx8OYOnJmzf4tfBEvLi8DVPrJ3\ +/c9k2I/Az64fxjHf9imyRJbixtQhlH9lfNjUIx+4LmrJH5QNRsFporcHDKOTwTTYLh5KmRpslkYHR\ +ivcJSkbh/C+BR3utDS555mV' + +privateKey = """-----BEGIN RSA PRIVATE KEY----- +MIIByAIBAAJhAK8ycfDmDpyZs3+LXwRLy4vA1T6yd/3PZNiPwM+uH8Yx3/YpskSW +4sbUIZR/ZXzY1CMfuC5qyR+UDUbBaaK3Bwyjk8E02C4eSpkabJZGB0Yr3CUpG4fw +vgUd7rQ0ueeZlQIBIwJgbh+1VZfr7WftK5lu7MHtqE1S1vPWZQYE3+VUn8yJADyb +Z4fsZaCrzW9lkIqXkE3GIY+ojdhZhkO1gbG0118sIgphwSWKRxK0mvh6ERxKqIt1 +xJEJO74EykXZV4oNJ8sjAjEA3J9r2ZghVhGN6V8DnQrTk24Td0E8hU8AcP0FVP+8 +PQm/g/aXf2QQkQT+omdHVEJrAjEAy0pL0EBH6EVS98evDCBtQw22OZT52qXlAwZ2 +gyTriKFVoqjeEjt3SZKKqXHSApP/AjBLpF99zcJJZRq2abgYlf9lv1chkrWqDHUu +DZttmYJeEfiFBBavVYIF1dOlZT0G8jMCMBc7sOSZodFnAiryP+Qg9otSBjJ3bQML +pSTqy7c3a2AScC/YyOwkDaICHnnD3XyjMwIxALRzl0tQEKMXs6hH8ToUdlLROCrP +EhQ0wahUTCk1gKA4uPD6TMTChavbh4K63OvbKg== +-----END RSA PRIVATE KEY-----""" + +class ClientUserAuth(userauth.SSHUserAuthClient): + + def getPassword(self, prompt = None): + return + # this says we won't do password authentication + + def getPublicKey(self): + return keys.getPublicKeyString(data = publicKey) + + def getPrivateKey(self): + return defer.succeed(keys.getPrivateKeyObject(data = privateKey)) +
+ +

Again, fairly simple. The SSHUserAuthClient takes care of most +of the work, but the actual authentication data needs to be +supplied. getPassword() asks for a +password, getPublicKey() and getPrivateKey() get public and private keys, +respectively. getPassword() returns +a Deferred that is called back with +the password to use. getPublicKey() +returns the SSH key data for the public key to use. keys.getPublicKeyString() will take +keys in OpenSSH and LSH format, and convert them to the +required format. getPrivateKey() +returns a Deferred which is +called back with the key object (as used in PyCrypto) for +the private key. getPassword() +and getPrivateKey() return Deferreds because they may need to ask the user +for input.

+ +

Once the authentication is complete, SSHUserAuthClient takes care of starting the code +SSHConnection object given to it. Next, we'll +look at how to use the SSHConnection

+ +

The Connection

+ +

1 +2 +3 +4 +5 +6 +

from twisted.conch.ssh import connection + +class ClientConnection(connection.SSHConnection): + + def serviceStarted(self): + self.openChannel(CatChannel(conn = self)) +
+ +

SSHConnection is the easiest, +as it's only responsible for starting the channels. It has +other methods, those will be examined when we look at SSHChannel.

+ +

The Channel

+ +

1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

from twisted.conch.ssh import channel, common + +class CatChannel(channel.SSHChannel): + + name = 'session' + + def channelOpen(self, data): + d = self.conn.sendRequest(self, 'exec', common.NS('cat'), + wantReply = 1) + d.addCallback(self._cbSendRequest) + self.catData = '' + + def _cbSendRequest(self, ignored): + self.write('This data will be echoed back to us by "cat."\r\n') + self.conn.sendEOF(self) + self.loseConnection() + + def dataReceived(self, data): + self.catData += data + + def closed(self): + print 'We got this from "cat":', self.catData +
+ +

Now that we've spent all this time getting the server and +client connected, here is where that work pays off. SSHChannel is the interface between you and the +other side. This particular channel opens a session and plays with the +'cat' program, but your channel can implement anything, so long as the +server supports it.

+ +

The channelOpen() method is +where everything gets started. It gets passed a chunk of data; +however, this chunk is usually nothing and can be ignored. +Our channelOpen() initializes our +channel, and sends a request to the other side, using the +sendRequest() method of the SSHConnection object. Requests are used to send +events to the other side. We pass the method self so that it knows to +send the request for this channel. The 2nd argument of 'exec' tells the +server that we want to execute a command. The third argument is the data +that accompanies the request. common.NS encodes +the data as a length-prefixed string, which is how the server expects +the data. We also say that we want a reply saying that the process has a +been started. sendRequest() then returns a +Deferred which we add a callback for.

+ +

Once the callback fires, we send the data. SSHChannel supports the +twisted.internet.interface.Transport interface, so +it can be given to Protocols to run them over the secure +connection. In our case, we just write the data directly. sendEOF() does not follow the interface, +but Conch uses it to tell the other side that we will write no +more data. loseConnection() shuts +down our side of the connection, but we will still receive data +through dataReceived(). The closed() method is called when both sides of the +connection are closed, and we use it to display the data we received +(which should be the same as the data we sent.)

+ +

Finally, let's actually invoke the code we've set up.

+ +

The main() function

+

1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +

from twisted.internet import protocol, reactor + +def main(): + factory = protocol.ClientFactory() + factory.protocol = ClientTransport + reactor.connectTCP('localhost', 22, factory) + reactor.run() + +if __name__ == "__main__": + main() +
+ +

We call connectTCP() to connect to +localhost, port 22 (the standard port for ssh), and pass it an instance +of twisted.internet.protocol.ClientFactory. +This instance has the attribute protocol +set to our earlier ClientTransport +class. Note that the protocol attribute is set to the class ClientTransport, not an instance of +ClientTransport! When the connectTCP call completes, the protocol will be +called to create a ClientTransport() object +- this then invokes all our previous work.

+ +

It's worth noting that in the example main() +routine, the reactor.run() call never returns. +If you want to make the program exit, call +reactor.stop() in the earlier +closed() method.

+ +

If you wish to observe the interactions in more detail, adding a call +to log.startLogging(sys.stdout, setStdout=0) +before the reactor.run() call will send all +logging to stdout.

+ +
+ +

Index

+ Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/howto/index.html b/vendor/Twisted-10.0.0/doc/conch/howto/index.html new file mode 100644 index 000000000000..ca9140d91516 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/howto/index.html @@ -0,0 +1,28 @@ + + +Twisted Documentation: Twisted Documentation + + + + +

Twisted Documentation

+
    +
    + + + + +
    + +

    Index

    + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/index.html b/vendor/Twisted-10.0.0/doc/conch/index.html new file mode 100644 index 000000000000..4f57b8c6f901 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/index.html @@ -0,0 +1,25 @@ + + +Twisted Documentation: Twisted Conch Documentation + + + + +

    Twisted Conch Documentation

    +
      +
      + + +
        +
      • Developer guides: documentation on using +Twisted Conch to develop your own applications
      • +
      • Examples: short code examples using +Twisted Conch
      • +
      + +
      + +

      Index

      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/man/cftp-man.html b/vendor/Twisted-10.0.0/doc/conch/man/cftp-man.html new file mode 100644 index 000000000000..7cdf6d9d136f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/cftp-man.html @@ -0,0 +1,87 @@ + + +Twisted Documentation: CFTP.1 + + + + +

      CFTP.1

      + +
      + + + +

      NAME

      + +

      cftp

      + +

      SYNOPSIS

      + +

      cftp [-B buffer_size][-b command_file][-R num_requests][-s subsystem]

      + +

      DESCRIPTION

      + +

      cftp is a client for logging into a remote machine and executing commands to send and receive file information. It can wrap a number of file transfer subsystems +

      + +

      The options are as follows: +

      -B
      Specifies the default size of the buffer to use for sending and receiving. (Default value: 32768 bytes.) +
      -b
      File to read commands from, '-' for stdin. (Default value: interactive/stdin.) +
      -R
      Number of requests to make before waiting for a reply. +
      -s
      Subsystem/server program to connect to. +
      + +

      + +

      The following commands are recognised by +cftp : +

      cd path
      Change the remote directory to 'path'. +
      chgrp gid path
      Change the gid of 'path' to 'gid'. +
      chmod mode path
      Change mode of 'path' to 'mode'. +
      chown uid path
      Change uid of 'path' to 'uid'. +
      exit
      Disconnect from the server. +
      get remote-path [local-path]
      Get remote file and optionally store it at specified local path. +
      help
      Get a list of available commands. +
      lcd path
      Change local directory to 'path'. +
      lls [ls-options] [path]
      Display local directory listing. +
      lmkdir path
      Create local directory. +
      ln linkpath targetpath
      Symlink remote file. +
      lpwd
      Print the local working directory. +
      ls [-l] [path]
      Display remote directory listing. +
      mkdir path
      Create remote directory. +
      progress
      Toggle progress bar. +
      put local-path [remote-path]
      Transfer local file to remote location +
      pwd
      Print the remote working directory. +
      quit
      Disconnect from the server. +
      rename oldpath newpath
      Rename remote file. +
      rmdir path
      Remove remote directory. +
      rm path
      Remove remote file. +
      version
      Print the SFTP version. +
      ?
      Synonym for 'help'. +
      + +

      + +

      AUTHOR

      + +

      cftp by Paul Swartz <z3p@twistedmatrix.com>. Man page by Mary Gardiner <mary@twistedmatrix.com>. +

      + +

      REPORTING BUGS

      + +

      Report bugs to http://twistedmatrix.com/bugs/ +

      + +

      COPYRIGHT

      + +

      Copyright © 2005-2008 Twisted Matrix Laboratories +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +

      + +
      + +

      Index

      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/man/cftp.1 b/vendor/Twisted-10.0.0/doc/conch/man/cftp.1 new file mode 100644 index 000000000000..7eae88951697 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/cftp.1 @@ -0,0 +1,89 @@ +.Dd October 8, 2005 +.Dt CFTP 1 +.Os +.Sh NAME +.Nm cftp +.Nd Conch command-line SFTP client +.Sh SYNOPSIS +.Nm cftp +.Op Fl B Ar buffer_size +.Op Fl b Ar command_file +.Op Fl R Ar num_requests +.Op Fl s Ar subsystem +.Os +.Sh DESCRIPTION +.Nm +is a client for logging into a remote machine and executing commands to send and receive file information. It can wrap a number of file transfer subsystems +.Pp +The options are as follows: +.Bl -tag -width Ds +.It Fl B +Specifies the default size of the buffer to use for sending and receiving. (Default value: 32768 bytes.) +.It Fl b +File to read commands from, '-' for stdin. (Default value: interactive/stdin.) +.It Fl R +Number of requests to make before waiting for a reply. +.It Fl s +Subsystem/server program to connect to. +.El +.Pp +The following commands are recognised by +.Nm +: +.Bl -tag -width Ds +.It Ic cd Ar path +Change the remote directory to 'path'. +.It Ic chgrp Ar gid Ar path +Change the gid of 'path' to 'gid'. +.It Ic chmod Ar mode Ar path +Change mode of 'path' to 'mode'. +.It Ic chown Ar uid Ar path +Change uid of 'path' to 'uid'. +.It Ic exit +Disconnect from the server. +.It Ic get Ar remote-path Op Ar local-path +Get remote file and optionally store it at specified local path. +.It Ic help +Get a list of available commands. +.It Ic lcd Ar path +Change local directory to 'path'. +.It Ic lls Op Ar ls-options Op Ar path +Display local directory listing. +.It Ic lmkdir Ar path +Create local directory. +.It Ic ln Ar linkpath Ar targetpath +Symlink remote file. +.It Ic lpwd +Print the local working directory. +.It Ic ls Op Ar -l Op Ar path +Display remote directory listing. +.It Ic mkdir Ar path +Create remote directory. +.It Ic progress +Toggle progress bar. +.It Ic put Ar local-path Op Ar remote-path +Transfer local file to remote location +.It Ic pwd +Print the remote working directory. +.It Ic quit +Disconnect from the server. +.It Ic rename Ar oldpath Ar newpath +Rename remote file. +.It Ic rmdir Ar path +Remove remote directory. +.It Ic rm Ar path +Remove remote file. +.It Ic version +Print the SFTP version. +.It Ic ? +Synonym for 'help'. +.El +.Sh AUTHOR +cftp by Paul Swartz . Man page by Mary Gardiner . +.Sh "REPORTING BUGS" +Report bugs to \fIhttp://twistedmatrix.com/bugs/\fR +.Sh COPYRIGHT +Copyright \(co 2005-2008 Twisted Matrix Laboratories +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. diff --git a/vendor/Twisted-10.0.0/doc/conch/man/ckeygen-man.html b/vendor/Twisted-10.0.0/doc/conch/man/ckeygen-man.html new file mode 100644 index 000000000000..44d68f1841dd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/ckeygen-man.html @@ -0,0 +1,107 @@ + + +Twisted Documentation: CKEYGEN.1 + + + + +

      CKEYGEN.1

      + + + +

      Index

      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/man/ckeygen.1 b/vendor/Twisted-10.0.0/doc/conch/man/ckeygen.1 new file mode 100644 index 000000000000..a06d0390117c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/ckeygen.1 @@ -0,0 +1,58 @@ +.TH CKEYGEN "1" "October 2002" "" "" +.SH NAME +ckeygen \- connect to SSH servers +.SH SYNOPSIS +.B ckeygen [-b \fIbits\fR] [-f \fIfilename\fR] [-t \fItype\fR] +.B [-C \fIcomment\fR] [-N \fInew passphrase\fR] [-P \fIold passphrase\fR] +.B [-l] [-p] [-q] [-y] +.B ckeygen --help +.SH DESCRIPTION +.PP +The \fB\--help\fR prints out a usage message to standard output. +.TP +\fB-b\fR, \fB--bits\fR +Number of bits in the key to create (default: 1024) +.TP +\fB-f\fR, \fB--filename\fR +Filename of the key file. +.TP +\fB-t\fR, \fB--type\fR +Type of key (rsa or dsa). +.TP +\fB-C\fR, \fB--comment\fR +Provide a new comment. +.TP +\fB-N\fR, \fB--newpass\fR +Provide new passphrase. +.TP +\fB-P\fR, \fB--pass\fR +Provide old passphrase. +.TP +\fB-l\fR, \fB--fingerprint\fR +Show fingerprint of key file. +.TP +\fB-p\fR, \fB--changepass\fR +Change passphrase of private key file. +.TP +\fB-q\fR, \fB--quiet\fR +Be quiet. +.TP +\fB-y\fR, \fB--showpub\fR +Read private key file and print public key. +.TP +\fB--version\fR +Display version number only. +.SH DESCRIPTION +Manipulate public/private keys in various ways. +If no filename is given, a file name will be requested interactively. +.SH AUTHOR +Written by Moshe Zadka, based on ckeygen's help messages +.SH "REPORTING BUGS" +To report a bug, visit \fIhttp://twistedmatrix.com/bugs/\fR +.SH COPYRIGHT +Copyright \(co 2002-2008 Twisted Matrix Laboratories. +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +.SH "SEE ALSO" +ssh(1), conch(1) diff --git a/vendor/Twisted-10.0.0/doc/conch/man/conch-man.html b/vendor/Twisted-10.0.0/doc/conch/man/conch-man.html new file mode 100644 index 000000000000..5f0477855262 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/conch-man.html @@ -0,0 +1,148 @@ + + +Twisted Documentation: CONCH.1 + + + + +

      CONCH.1

      + +
      + + + +

      NAME

      + +

      conch

      + +

      SYNOPSIS

      + +

      conch [-AaCfINnrsTtVvx][-c cipher_spec][-e escape_char][-i identity_file][-K connection_spec][-L port: host: hostport][-l user][-m mac_spec][-o openssh_option][-p port][-R port: host: hostport][ user@] hostname[ command]

      + +

      DESCRIPTION

      + +

      conch is a SSHv2 client for logging into a remote machine and executing commands. It provides encrypted and secure communications across a possibly insecure network. Arbitrary TCP/IP ports can also be forwarded over the secure connection. +

      + +

      conch connects and logs into + hostname(as + useror the current username). The user must prove her/his identity through a public-key or a password. Alternatively, if a connection is already open to a server, a new shell can be opened over the connection without having to reauthenticate. +

      + +

      If + commandis specified, + commandis executed instead of a shell. If the +-soption is given, + commandis treated as an SSHv2 subsystem name. +Conch supports the public-key, keyboard-interactive, and password authentications. +

      + +

      The public-key method allows the RSA or DSA algorithm to be used. The client uses his/her private key, +or +to sign the session identifier, known only by the client and server. The server checks that the matching public key is valid for the user, and that the signature is correct. +

      + +

      If public-key authentication fails, +conch can authenticate by sending an encrypted password over the connection. +conch has the ability to multiplex multiple shells, commands and TCP/IP ports over the same secure connection. To disable multiplexing for a connection, use the +-Iflag. +

      + +

      The +-Koption determines how the client connects to the remote host. It is a comma-separated list of the methods to use, in order of preference. The two connection methods are +(for connecting over a multiplexed connection) and +(to connect directly). +To disable connecting over a multiplexed connection, do not include +in the preference list. +

      + +

      As an example of how connection sharing works, to speed up CVS over SSH: +

      + +

      conch --noshell --fork -l cvs_user cvs_host +set CVS_RSH=conch +

      + +

      Now, when CVS connects to cvs_host as cvs_user, instead of making a new connection to the server, +conch will add a new channel to the existing connection. This saves the cost of repeatedly negotiating the cryptography and authentication. +

      + +

      The options are as follows: +

      -A
      Enables authentication agent forwarding. +
      -a
      Disables authentication agent forwarding (default). +
      -C
      Enable compression. +
      -c
      cipher_specSelects encryption algorithms to be used for this connection, as a comma-separated list of ciphers in order of preference. The list that +conch supports is (in order of default preference): aes256-ctr, aes256-cbc, aes192-ctr, aes192-cbc, aes128-ctr, aes128-cbc, cast128-ctr, cast128-cbc, blowfish-ctr, blowfish, idea-ctr, idea-cbc, 3des-ctr, 3des-cbc. +
      -e
      ch| ^ch | noneSets the escape character for sessions with a PTY (default: +The escape character is only recognized at the beginning of a line (after a newline). +The escape character followed by a dot +closes the connection; +followed by ^Z suspends the connection; +and followed by the escape character sends the escape character once. +Setting the character to +disables any escapes. +
      -f
      Fork to background after authentication. +
      -I
      Do not allow connection sharing over this connection. +
      -i
      identity_specThe file from which the identity (private key) for RSA or DSA authentication is read. +The defaults are +and +It is possible to use this option more than once to use more than one private key. +
      -K
      connection_specSelects methods for connection to the server, as a comma-separated list of methods in order of preference. See +for more information. +
      -L
      port: host : hostportSpecifies that the given port on the client host is to be forwarded to the given host and port on the remote side. This allocates a socket to listen to + porton the local side, and when connections are made to that socket, they are forwarded over the secure channel and a connection is made to + hostport + hostportfrom the remote machine. +Only root can forward privieged ports. +
      -l
      userLog in using this username. +
      -m
      mac_specSelects MAC (message authentication code) algorithms, as a comma-separated list in order of preference. The list that +conch supports is (in order of preference): hmac-sha1, hmac-md5. +
      -N
      Do not execute a shell or command. +
      -n
      Redirect input from /dev/null. +
      -o
      openssh_optionIgnored OpenSSH options. +
      -p
      portThe port to connect to on the server. +
      -R
      port: host : hostportSpecifies that the given port on the remote host is to be forwarded to the given host and port on the local side. This allocates a socket to listen to + porton the remote side, and when connections are made to that socket, they are forwarded over the secure channel and a connection is made to + hostport + hostportfrom the client host. +Only root can forward privieged ports. +
      -s
      Reconnect to the server if the connection is lost. +
      -s
      Invoke + command(mandatory) as a SSHv2 subsystem. +
      -T
      Do not allocate a TTY. +
      -t
      Allocate a TTY even if command is given. +
      -V
      Display version number only. +
      -v
      Log to stderr. +
      -x
      Disable X11 connection forwarding (default). +
      + +

      + +

      AUTHOR

      + +

      Written by Paul Swartz <z3p@twistedmatrix.com>. +

      + +

      REPORTING BUGS

      + +

      To report a bug, visit http://twistedmatrix.com/bugs/ +

      + +

      COPYRIGHT

      + +

      Copyright © 2002-2008 Twisted Matrix Laboratories. +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +

      + +

      SEE ALSO

      + +

      ssh(1) +

      + +
      + +

      Index

      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/man/conch.1 b/vendor/Twisted-10.0.0/doc/conch/man/conch.1 new file mode 100644 index 000000000000..7ba9bfff304d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/conch.1 @@ -0,0 +1,206 @@ +.Dd May 22, 2004 +.Dt CONCH 1 +.Os +.Sh NAME +.Nm conch +.Nd Conch SSH client +.Sh SYNOPSIS +.Nm conch +.Op Fl AaCfINnrsTtVvx +.Op Fl c Ar cipher_spec +.Op Fl e Ar escape_char +.Op Fl i Ar identity_file +.Op Fl K Ar connection_spec +.Bk -words +.Oo Fl L Xo +.Sm off +.Ar port : +.Ar host : +.Ar hostport +.Sm on +.Xc +.Oc +.Ek +.Op Fl l Ar user +.Op Fl m Ar mac_spec +.Op Fl o Ar openssh_option +.Op Fl p Ar port +.Bk -words +.Oo Fl R Xo +.Sm off +.Ar port : +.Ar host : +.Ar hostport +.Sm on +.Xc +.Oc +.Ek +.Oo Ar user Ns @ Ns Oc Ar hostname +.Op Ar command +.Sh DESCRIPTION +.Nm +is a SSHv2 client for logging into a remote machine and executing commands. It provides encrypted and secure communications across a possibly insecure network. Arbitrary TCP/IP ports can also be forwarded over the secure connection. +.Pp +.Nm +connects and logs into +.Ar hostname +(as +.Ar user +or the current username). The user must prove her/his identity through a public\-key or a password. Alternatively, if a connection is already open to a server, a new shell can be opened over the connection without having to reauthenticate. +.Pp +If +.Ar command +is specified, +.Ar command +is executed instead of a shell. If the +.Fl s +option is given, +.Ar command +is treated as an SSHv2 subsystem name. +.Ss Authentication +Conch supports the public-key, keyboard-interactive, and password authentications. +.Pp +The public-key method allows the RSA or DSA algorithm to be used. The client uses his/her private key, +.Pa $HOME/.ssh/id_rsa +or +.Pa $HOME/.ssh/id_dsa +to sign the session identifier, known only by the client and server. The server checks that the matching public key is valid for the user, and that the signature is correct. +.Pp +If public-key authentication fails, +.Nm +can authenticate by sending an encrypted password over the connection. +.Ss Connection sharing +.Nm +has the ability to multiplex multiple shells, commands and TCP/IP ports over the same secure connection. To disable multiplexing for a connection, use the +.Fl I +flag. +.Pp +The +.Fl K +option determines how the client connects to the remote host. It is a comma-separated list of the methods to use, in order of preference. The two connection methods are +.Ql unix +(for connecting over a multiplexed connection) and +.Ql direct +(to connect directly). +To disable connecting over a multiplexed connection, do not include +.Ql unix +in the preference list. +.Pp +As an example of how connection sharing works, to speed up CVS over SSH: +.Pp +.Nm +--noshell --fork -l cvs_user cvs_host +.br +set CVS_RSH=\fBconch\fR +.Pp +Now, when CVS connects to cvs_host as cvs_user, instead of making a new connection to the server, +.Nm +will add a new channel to the existing connection. This saves the cost of repeatedly negotiating the cryptography and authentication. +.Pp +The options are as follows: +.Bl -tag -width Ds +.It Fl A +Enables authentication agent forwarding. +.It Fl a +Disables authentication agent forwarding (default). +.It Fl C +Enable compression. +.It Fl c Ar cipher_spec +Selects encryption algorithms to be used for this connection, as a comma-separated list of ciphers in order of preference. The list that +.Nm +supports is (in order of default preference): aes256-ctr, aes256-cbc, aes192-ctr, aes192-cbc, aes128-ctr, aes128-cbc, cast128-ctr, cast128-cbc, blowfish-ctr, blowfish, idea-ctr, idea-cbc, 3des-ctr, 3des-cbc. +.It Fl e Ar ch | ^ch | none +Sets the escape character for sessions with a PTY (default: +.Ql ~ ) . +The escape character is only recognized at the beginning of a line (after a newline). +The escape character followed by a dot +.Pq Ql \&. +closes the connection; +followed by ^Z suspends the connection; +and followed by the escape character sends the escape character once. +Setting the character to +.Dq none +disables any escapes. +.It Fl f +Fork to background after authentication. +.It Fl I +Do not allow connection sharing over this connection. +.It Fl i Ar identity_spec +The file from which the identity (private key) for RSA or DSA authentication is read. +The defaults are +.Pa $HOME/.ssh/id_rsa +and +.Pa $HOME/.ssh/id_dsa . +It is possible to use this option more than once to use more than one private key. +.It Fl K Ar connection_spec +Selects methods for connection to the server, as a comma-separated list of methods in order of preference. See +.Cm Connection sharing +for more information. +.It Fl L Xo +.Sm off +.Ar port : host : hostport +.Sm on +.Xc +Specifies that the given port on the client host is to be forwarded to the given host and port on the remote side. This allocates a socket to listen to +.Ar port +on the local side, and when connections are made to that socket, they are forwarded over the secure channel and a connection is made to +.Ar host +port +.Ar hostport +from the remote machine. +Only root can forward privieged ports. +.It Fl l Ar user +Log in using this username. +.It Fl m Ar mac_spec +Selects MAC (message authentication code) algorithms, as a comma-separated list in order of preference. The list that +.Nm +supports is (in order of preference): hmac-sha1, hmac-md5. +.It Fl N +Do not execute a shell or command. +.It Fl n +Redirect input from /dev/null. +.It Fl o Ar openssh_option +Ignored OpenSSH options. +.It Fl p Ar port +The port to connect to on the server. +.It Fl R Xo +.Sm off +.Ar port : host : hostport +.Sm on +.Xc +Specifies that the given port on the remote host is to be forwarded to the given host and port on the local side. This allocates a socket to listen to +.Ar port +on the remote side, and when connections are made to that socket, they are forwarded over the secure channel and a connection is made to +.Ar host +port +.Ar hostport +from the client host. +Only root can forward privieged ports. +.It Fl s +Reconnect to the server if the connection is lost. +.It Fl s +Invoke +.Ar command +(mandatory) as a SSHv2 subsystem. +.It Fl T +Do not allocate a TTY. +.It Fl t +Allocate a TTY even if command is given. +.It Fl V +Display version number only. +.It Fl v +Log to stderr. +.It Fl x +Disable X11 connection forwarding (default). +.El +.Sh AUTHOR +Written by Paul Swartz . +.Sh "REPORTING BUGS" +To report a bug, visit \fIhttp://twistedmatrix.com/bugs/\fR +.Sh COPYRIGHT +Copyright \(co 2002-2008 Twisted Matrix Laboratories. +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +.Sh SEE ALSO +ssh(1) diff --git a/vendor/Twisted-10.0.0/doc/conch/man/tkconch-man.html b/vendor/Twisted-10.0.0/doc/conch/man/tkconch-man.html new file mode 100644 index 000000000000..a335e0851a3c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/tkconch-man.html @@ -0,0 +1,129 @@ + + +Twisted Documentation: CONCH.1 + + + + +

      CONCH.1

      + +
      + + + +

      NAME

      + +

      tkconch - connect to SSH servers graphically +

      + +

      SYNOPSIS

      + +

      conch [-l user] [-i identity [ -i identity ... ]] [-c cipher] [-m MAC] [-p port] [-n] [-t] [-T] [-V] [-C] [-N] [-s] [arg [...]]

      + +

      conch --help

      + +

      DESCRIPTION

      + +

      The --help prints out a usage message to standard output. +

      -l, --user <user> +
      Log in using this user name. +
      + +
      -e, --escape <escape character> +
      Set escape character; 'none' = disable (default: ~) +
      + +
      -i, --identity <identity> +
      Add an identity file for public key authentication (default: ~/.ssh/identity) +
      + +
      -c, --cipher <cipher> +
      Cipher algorithm to use. +
      + +
      -m, --macs <mac> +
      Specify MAC algorithms for protocol version 2. +
      + +
      -p, --port <port> +
      Port to connect to. +
      + +
      -L, --localforward <listen-port:host:port> +
      Forward local port to remote address. +
      + +
      -R, --remoteforward <listen-port:host:port> +
      Forward remote port to local address. +
      + +
      -t, --tty +
      Allocate a tty even if command is given. +
      + +
      -n, --notty +
      Do not allocate a tty. +
      + +
      -V, --version +
      Display version number only. +
      + +
      -C, --compress +
      Enable compression. +
      + +
      -a, --ansilog +
      Print the received data to stdout. +
      + +
      -N, --noshell +
      Do not execute a shell or command. +
      + +
      -s, --subsystem +
      Invoke command (mandatory) as SSH2 subsystem. +
      + +
      --log +
      Print the receieved data to stderr. +
      + +
      + +

      + +

      DESCRIPTION

      + +

      Open an SSH connection to specified server, and either run the command +given there or open a remote interactive shell. +

      + +

      AUTHOR

      + +

      Written by Moshe Zadka, based on conch's help messages +

      + +

      REPORTING BUGS

      + +

      To report a bug, visit http://twistedmatrix.com/bugs/ +

      + +

      COPYRIGHT

      + +

      Copyright © 2002-2008 Twisted Matrix Laboratories. +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +

      + +

      SEE ALSO

      + +

      ssh(1) +

      + +
      + +

      Index

      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/conch/man/tkconch.1 b/vendor/Twisted-10.0.0/doc/conch/man/tkconch.1 new file mode 100644 index 000000000000..54260bf501b0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/conch/man/tkconch.1 @@ -0,0 +1,72 @@ +.TH CONCH "1" "October 2002" "" "" +.SH NAME +tkconch \- connect to SSH servers graphically +.SH SYNOPSIS +.B conch [-l \fIuser\fR] [-i \fIidentity\fR [ -i \fIidentity\fR ... ]] [-c \fIcipher\fR] [-m \fIMAC\fR] [-p \fIport\fR] [-n] [-t] [-T] [-V] [-C] [-N] [-s] [arg [...]] +.PP +.B conch --help +.SH DESCRIPTION +.PP +The \fB\--help\fR prints out a usage message to standard output. +.TP +\fB-l\fR, \fB--user\fR +Log in using this user name. +.TP +\fB-e\fR, \fB--escape\fR +Set escape character; 'none' = disable (default: ~) +.TP +\fB-i\fR, \fB--identity\fR +Add an identity file for public key authentication (default: ~/.ssh/identity) +.TP +\fB-c\fR, \fB--cipher\fR +Cipher algorithm to use. +.TP +\fB-m\fR, \fB--macs\fR +Specify MAC algorithms for protocol version 2. +.TP +\fB-p\fR, \fB--port\fR +Port to connect to. +.TP +\fB-L\fR, \fB--localforward\fR +Forward local port to remote address. +.TP +\fB-R\fR, \fB--remoteforward\fR +Forward remote port to local address. +.TP +\fB-t\fR, \fB--tty\fR +Allocate a tty even if command is given. +.TP +\fB-n\fR, \fB--notty\fR +Do not allocate a tty. +.TP +\fB-V\fR, \fB--version\fR +Display version number only. +.TP +\fB-C\fR, \fB--compress\fR +Enable compression. +.TP +\fB-a\fR, \fB--ansilog\fR +Print the received data to stdout. +.TP +\fB-N\fR, \fB--noshell\fR +Do not execute a shell or command. +.TP +\fB-s\fR, \fB--subsystem\fR +Invoke command (mandatory) as SSH2 subsystem. +.TP +\fB--log\fR +Print the receieved data to stderr. +.SH DESCRIPTION +Open an SSH connection to specified server, and either run the command +given there or open a remote interactive shell. +.SH AUTHOR +Written by Moshe Zadka, based on conch's help messages +.SH "REPORTING BUGS" +To report a bug, visit \fIhttp://twistedmatrix.com/bugs/\fR +.SH COPYRIGHT +Copyright \(co 2002-2008 Twisted Matrix Laboratories. +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +.SH "SEE ALSO" +ssh(1) diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/banana.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/banana.py new file mode 100644 index 000000000000..1c1f03170494 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/banana.py @@ -0,0 +1,10 @@ +#!/usr/bin/python + +from timer import timeit +from twisted.spread.banana import b1282int + +ITERATIONS = 100000 + +for length in (1, 5, 10, 50, 100): + elapsed = timeit(b1282int, ITERATIONS, "\xff" * length) + print "b1282int %3d byte string: %10d cps" % (length, ITERATIONS / elapsed) diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/deferreds.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/deferreds.py new file mode 100644 index 000000000000..a9cddd0b3466 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/deferreds.py @@ -0,0 +1,145 @@ +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +See how fast deferreds are. + +This is mainly useful to compare cdefer.Deferred to defer.Deferred +""" + + +from twisted.internet import defer +from timer import timeit + +benchmarkFuncs = [] + +def benchmarkFunc(iter, args=()): + """ + A decorator for benchmark functions that measure a single iteration + count. Registers the function with the given iteration count to the global + benchmarkFuncs list + """ + def decorator(func): + benchmarkFuncs.append((func, args, iter)) + return func + return decorator + +def benchmarkNFunc(iter, ns): + """ + A decorator for benchmark functions that measure multiple iteration + counts. Registers the function with the given iteration count to the global + benchmarkFuncs list. + """ + def decorator(func): + for n in ns: + benchmarkFuncs.append((func, (n,), iter)) + return func + return decorator + +def instantiate(): + """ + Only create a deferred + """ + d = defer.Deferred() +instantiate = benchmarkFunc(100000)(instantiate) + +def instantiateShootCallback(): + """ + Create a deferred and give it a normal result + """ + d = defer.Deferred() + d.callback(1) +instantiateShootCallback = benchmarkFunc(100000)(instantiateShootCallback) + +def instantiateShootErrback(): + """ + Create a deferred and give it an exception result. To avoid Unhandled + Errors, also register an errback that eats the error + """ + d = defer.Deferred() + try: + 1/0 + except: + d.errback() + d.addErrback(lambda x: None) +instantiateShootErrback = benchmarkFunc(200)(instantiateShootErrback) + +ns = [10, 1000, 10000] + +def instantiateAddCallbacksNoResult(n): + """ + Creates a deferred and adds a trivial callback/errback/both to it the given + number of times. + """ + d = defer.Deferred() + def f(result): + return result + for i in xrange(n): + d.addCallback(f) + d.addErrback(f) + d.addBoth(f) + d.addCallbacks(f, f) +instantiateAddCallbacksNoResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksNoResult) + +def instantiateAddCallbacksBeforeResult(n): + """ + Create a deferred and adds a trivial callback/errback/both to it the given + number of times, and then shoots a result through all of the callbacks. + """ + d = defer.Deferred() + def f(result): + return result + for i in xrange(n): + d.addCallback(f) + d.addErrback(f) + d.addBoth(f) + d.addCallbacks(f) + d.callback(1) +instantiateAddCallbacksBeforeResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksBeforeResult) + +def instantiateAddCallbacksAfterResult(n): + """ + Create a deferred, shoots it and then adds a trivial callback/errback/both + to it the given number of times. The result is processed through the + callbacks as they are added. + """ + d = defer.Deferred() + def f(result): + return result + d.callback(1) + for i in xrange(n): + d.addCallback(f) + d.addErrback(f) + d.addBoth(f) + d.addCallbacks(f) +instantiateAddCallbacksAfterResult = benchmarkNFunc(20, ns)(instantiateAddCallbacksAfterResult) + +def pauseUnpause(n): + """ + Adds the given number of callbacks/errbacks/both to a deferred while it is + paused, and unpauses it, trigerring the processing of the value through the + callbacks. + """ + d = defer.Deferred() + def f(result): + return result + d.callback(1) + d.pause() + for i in xrange(n): + d.addCallback(f) + d.addErrback(f) + d.addBoth(f) + d.addCallbacks(f) + d.unpause() +pauseUnpause = benchmarkNFunc(20, ns)(pauseUnpause) + +def benchmark(): + """ + Run all of the benchmarks registered in the benchmarkFuncs list + """ + print defer.Deferred.__module__ + for func, args, iter in benchmarkFuncs: + print func.__name__, args, timeit(func, iter, *args) + +if __name__ == '__main__': + benchmark() diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/failure.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/failure.py new file mode 100644 index 000000000000..d98cb4929b4c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/failure.py @@ -0,0 +1,66 @@ + +"""See how slow failure creation is""" + +import random +from twisted.python import failure + +random.seed(10050) +O = [0, 20, 40, 60, 80, 10, 30, 50, 70, 90] +DEPTH = 30 + +def pickVal(): + return random.choice([None, 1, 'Hello', [], {1: 1}, (1, 2, 3)]) + +def makeLocals(n): + return ';'.join(['x%d = %s' % (i, pickVal()) for i in range(n)]) + +for nLocals in O: + for i in range(DEPTH): + s = """ +def deepFailure%d_%d(): + %s + deepFailure%d_%d() +""" % (nLocals, i, makeLocals(nLocals), nLocals, i + 1) + exec s + + exec """ +def deepFailure%d_%d(): + 1 / 0 +""" % (nLocals, DEPTH) + +R = range(5000) +def fail(n): + for i in R: + try: + eval('deepFailure%d_0' % n)() + except: + failure.Failure() + +def fail_str(n): + for i in R: + try: + eval('deepFailure%d_0' % n)() + except: + str(failure.Failure()) + +class PythonException(Exception): pass + +def fail_easy(n): + for i in R: + try: + failure.Failure(PythonException()) + except: + pass + +from timer import timeit +# for i in O: +# timeit(fail, 1, i) + +# for i in O: +# print 'easy failing', i, timeit(fail_easy, 1, i) + +for i in O: + print 'failing', i, timeit(fail, 1, i) + +# for i in O: +# print 'string failing', i, timeit(fail_str, 1, i) diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/linereceiver.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/linereceiver.py new file mode 100644 index 000000000000..7f552919e375 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/linereceiver.py @@ -0,0 +1,47 @@ +import math, time + +from twisted.protocols import basic + +class CollectingLineReceiver(basic.LineReceiver): + def __init__(self): + self.lines = [] + self.lineReceived = self.lines.append + +def deliver(proto, chunks): + map(proto.dataReceived, chunks) + +def benchmark(chunkSize, lineLength, numLines): + bytes = ('x' * lineLength + '\r\n') * numLines + chunkCount = len(bytes) / chunkSize + 1 + chunks = [] + for n in xrange(chunkCount): + chunks.append(bytes[n*chunkSize:(n+1)*chunkSize]) + assert ''.join(chunks) == bytes, (chunks, bytes) + p = CollectingLineReceiver() + + before = time.clock() + deliver(p, chunks) + after = time.clock() + + assert bytes.splitlines() == p.lines, (bytes.splitlines(), p.lines) + + print 'chunkSize:', chunkSize, + print 'lineLength:', lineLength, + print 'numLines:', numLines, + print 'CPU Time: ', after - before + + + +def main(): + for numLines in 100, 1000: + for lineLength in (10, 100, 1000): + for chunkSize in (1, 500, 5000): + benchmark(chunkSize, lineLength, numLines) + + for numLines in 10000, 50000: + for lineLength in (1000, 2000): + for chunkSize in (51, 500, 5000): + benchmark(chunkSize, lineLength, numLines) + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/task.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/task.py new file mode 100644 index 000000000000..e3d437b42c97 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/task.py @@ -0,0 +1,26 @@ + +""" +Benchmarks for L{twisted.internet.task}. +""" + +from timer import timeit + +from twisted.internet import task + +def test_performance(): + """ + L{LoopingCall} should not take long to skip a lot of iterations. + """ + clock = task.Clock() + call = task.LoopingCall(lambda: None) + call.clock = clock + + call.start(0.1) + clock.advance(1000000) + + +def main(): + print "LoopingCall large advance takes", timeit(test_performance, iter=1) + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/timer.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/timer.py new file mode 100644 index 000000000000..4b15a0353300 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/timer.py @@ -0,0 +1,24 @@ +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Helper stuff for benchmarks. +""" + +import gc +gc.disable() +print 'Disabled GC' + +def timeit(func, iter = 1000, *args, **kwargs): + """ + timeit(func, iter = 1000 *args, **kwargs) -> elapsed time + + calls func iter times with args and kwargs, returns time elapsed + """ + + from time import time as currentTime + r = range(iter) + t = currentTime() + for i in r: + func(*args, **kwargs) + return currentTime() - t diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient.py new file mode 100644 index 000000000000..9e5e082b0b02 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient.py @@ -0,0 +1,60 @@ +"""Throughput test.""" + +import time, sys +from twisted.internet import reactor, protocol +from twisted.python import log + +TIMES = 10000 +S = "0123456789" * 1240 + +toReceive = len(S) * TIMES + +class Sender(protocol.Protocol): + + def connectionMade(self): + start() + self.numSent = 0 + self.received = 0 + self.transport.registerProducer(self, 0) + + def stopProducing(self): + pass + + def pauseProducing(self): + pass + + def resumeProducing(self): + self.numSent += 1 + self.transport.write(S) + if self.numSent == TIMES: + self.transport.unregisterProducer() + self.transport.loseConnection() + + def connectionLost(self, reason): + shutdown(self.numSent == TIMES) + + +started = None + +def start(): + global started + started = time.time() + +def shutdown(success): + if not success: + raise SystemExit, "failure or something" + passed = time.time() - started + print "Throughput (send): %s kbytes/sec" % ((toReceive / passed) / 1024) + reactor.stop() + + +def main(): + f = protocol.ClientFactory() + f.protocol = Sender + reactor.connectTCP(sys.argv[1], int(sys.argv[2]), f) + reactor.run() + + +if __name__ == '__main__': + #log.startLogging(sys.stdout) + main() diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient_nt.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient_nt.py new file mode 100644 index 000000000000..a8170d77b255 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpclient_nt.py @@ -0,0 +1,22 @@ +"""Non-twisted throughput client.""" + +import socket, time, sys + +TIMES = 50000 +S = "0123456789" * 1024 +sent = len(S) * TIMES + +def main(): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((sys.argv[1], int(sys.argv[2]))) + start = time.time() + i = 0 + while i < TIMES: + i += 1 + s.sendall(S) + passed = time.time() - start + print "Throughput: %s kbytes/sec" % ((sent / passed) / 1024) + s.close() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver.py new file mode 100644 index 000000000000..49024e1409e4 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver.py @@ -0,0 +1,19 @@ +"""Throughput server.""" + +import sys + +from twisted.protocols.wire import Discard +from twisted.internet import protocol, reactor +from twisted.python import log + + +def main(): + f = protocol.ServerFactory() + f.protocol = Discard + reactor.listenTCP(8000, f) + reactor.run() + + +if __name__ == '__main__': + main() + diff --git a/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver_nt.py b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver_nt.py new file mode 100644 index 000000000000..e4bfddad791c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/benchmarks/tpserver_nt.py @@ -0,0 +1,22 @@ +"""Non-twisted throughput server.""" + +import socket, signal, sys + +def signalhandler(*args): + print "alarm!" + sys.stdout.flush() + +signal.signal(signal.SIGALRM, signalhandler) + +s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) +s.bind(('', 8001)) +s.listen(1) +while 1: + c, (h, p) = s.accept() + c.settimeout(30) + signal.alarm(5) + while 1: + d = c.recv(16384) + if not d: + break + c.close() diff --git a/vendor/Twisted-10.0.0/doc/core/development/index.html b/vendor/Twisted-10.0.0/doc/core/development/index.html new file mode 100644 index 000000000000..63bf02954311 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/index.html @@ -0,0 +1,27 @@ + + +Twisted Documentation: Development of Twisted + + + + +

      Development of Twisted

      +
        +
        + + +

        This documentation is for people who work on the Twisted codebase itself, +rather than for people who want to use Twisted in their own projects.

        + +
        + +

        Index

        + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/listings/new_module_template.py b/vendor/Twisted-10.0.0/doc/core/development/listings/new_module_template.py new file mode 100644 index 000000000000..ec3c2e5c2ae6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/listings/new_module_template.py @@ -0,0 +1,12 @@ +# -*- test-case-name: -*- + +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +Docstring goes here. +""" + + +__all__ = [] diff --git a/vendor/Twisted-10.0.0/doc/core/development/naming.html b/vendor/Twisted-10.0.0/doc/core/development/naming.html new file mode 100644 index 000000000000..5ec66c2d47cb --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/naming.html @@ -0,0 +1,38 @@ + + +Twisted Documentation: Naming Conventions + + + + +

        Naming Conventions

        +
          +
          + + + +

          While this may sound like a small detail, clear method naming is important to provide an API that developers familiar with event-based programming can pick up quickly.

          + +

          Since the idea of a method call maps very neatly onto that of a received event, all event handlers are simply methods named after past-tense verbs. All class names are descriptive nouns, designed to mirror the is-a relationship of the abstractions they implement. All requests for notification or transmission are present-tense imperative verbs.

          + +

          Here are some examples of this naming scheme:

          + +
            +
          • An event notification of data received from peer: +dataReceived(data)
          • +
          • A request to send data: write(data)
          • +
          • A class that implements a protocol: Protocol
          • +
          + +

          The naming is platform neutral. This means that the names are equally appropriate in a wide variety of environments, as long as they can publish the required events.

          + +

          It is self-consistent. Things that deal with TCP use the acronym TCP, and it is always capitalized. Dropping, losing, terminating, and closing the connection are all referred to as losing the connection. This symmetrical naming allows developers to easily locate other API calls if they have learned a few related to what they want to do.

          + +

          It is semantically clear. The semantics of dataReceived are simple: there are some bytes available for processing. This remains true even if the lower-level machinery to get the data is highly complex.

          + +
          + +

          Index

          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/philosophy.html b/vendor/Twisted-10.0.0/doc/core/development/philosophy.html new file mode 100644 index 000000000000..c74591c95777 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/philosophy.html @@ -0,0 +1,58 @@ + + +Twisted Documentation: Philosophy + + + + +

          Philosophy

          + + + +

          Index

          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/coding-standard.html b/vendor/Twisted-10.0.0/doc/core/development/policy/coding-standard.html new file mode 100644 index 000000000000..48b7b1de40f5 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/coding-standard.html @@ -0,0 +1,809 @@ + + +Twisted Documentation: Twisted Coding Standard + + + + +

          Twisted Coding Standard

          + +
          + + +

          Naming

          + +

          Try to choose names which are both easy to remember and + meaningful. Some silliness is OK at the module naming level + (see twisted.spread...) but when + choosing class names, be as precise as possible.

          + +

          Try to avoid overloaded terms. This rule is often broken, + since it is incredibly difficult, as most normal words have + already been taken by some other software. More importantly, + try to avoid meaningless words. In particular, words like + handler, processor, engine, manager + and component don't really indicate what something does, + only that it does something.

          + +

          Use American spelling in both names and docstrings. For compound + technical terms such as 'filesystem', use a non-hyphenated spelling in + both docstrings and code in order to avoid unnecessary + capitalization.

          + +

          Testing

          + +

          Overview

          + +

          Twisted development should always be + + test-driven. The complete test suite in trunk@HEAD is required to + be passing on + supported platforms at all times. Regressions in the test suite + are addressed by reverting whatever revisions introduced them. For + complete documentation about testing Twisted itself, refer to the + Test Standard. What follows is + intended to be a synopsis of the most important points.

          + +

          Test Suite

          + +

          The Twisted test suite is spread across many subpackages of the + twisted package. Many tests are in + twisted.test. Others can be found at places such as + twisted.web.test or twisted.internet.test. + Parts of the Twisted test suite may serve as good examples of how to + write tests for Twisted or for Twisted-based libraries (newer parts of + the test suite are generally better examples than older parts - check + when the code you are looking at was written before you use it as an + example of what you should write). The names of test modules should + begin with test_ so that they are automatically discoverable by + test runners such as Trial. Twisted's unit tests are written using + twisted.trial, an xUnit library which has been + extensively customized for use in testing Twisted and Twisted-based + libraries.

          + +

          Implementation (ie, non-test) source files should begin with a + test-case-name tag which gives the name of any test + modules or packages which exercise them. This lets tools discover a + subset of the entire test suite which they can run first to find tests + which might be broken by a particular change.

          + +

          It is strongly suggested that developers learn to use Emacs, and use + the twisted-dev.el file included in the TwistedEmacs + package to bind the F9 key to run unit tests and bang on it + frequently. Support for other editors is unavailable at this time but + we would love to provide it.

          + +

          To run the whole Twisted test without using emacs, use trial:

          + +
          +$ bin/trial twisted
          +    
          + +

          To run an individual test module, such as + twisted/mail/test/test_pop3.py, specify the module + name:

          + +
          +$ bin/trial twisted.mail.test.test_pop3
          +    
          + +

          To run the tests associated with a particular implementation file, + such as twisted/mail/pop3.py, use the + testmodule option:

          + +
          +$ bin/trial twisted/mail/pop3.py
          +    
          + +

          All unit test methods should have docstrings specifying at a high + level the intent of the test. That is, a description that users of the + method would understand.

          + +

          If you modify, or write a new, HOWTO, please read the Lore + documentation to learn how to format the docs.

          + +

          Copyright Header

          + +

          Whenever a new file is added to the repository, add the following + license header at the top of the file, including the year the file was + added. For example:

          + +

          1 +2 +

          # Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. +
          + +

          When you update existing files, make sure the year in the copyright + header is up to date as well. You should add a new copyright header when + it's completely missing in the file that is being edited.

          + +

          Whitespace

          + +

          Indentation is 4 spaces per indent. Tabs are not allowed. It + is preferred that every block appear on a new line, so that + control structure indentation is always visible.

          + +

          Lines are flowed at 79 columns. They must not have trailing + whitespace. Long lines must be wrapped using implied line continuation + inside parentheses; backslashes aren't allowed. To handle long import + lines, please repeat the import like this:

          + +

          1 +2 +

          from very.long.package import foo, bar, baz +from very.long.package import qux, quux, quuux +
          + +

          Top-level classes and functions must be separated with 3 blank lines, + and class-level functions with 2 blank lines. The control-L (i.e. ^L) form + feed character must not be used.

          + +

          Modules

          + +

          Modules must be named in all lower-case, preferably short, + single words. If a module name contains multiple words, they + may be separated by underscores or not separated at all.

          + +

          Modules must have a copyright message, a docstring and a + reference to a test module that contains the bulk of its tests. + Use this template:

          + +
          + +

          In most cases, modules should contain more than one class, + function, or method; if a module contains only one object, + consider refactoring to include more related functionality in + that module.

          + +

          Depending on the situation, it is acceptable to have imports that + look like this: +

          1 +

          from twisted.internet.defer import Deferred +
          + or like this: +

          1 +

          from twisted.internet import defer +
          + That is, modules should import modules or classes and + functions, but not packages.

          + +

          Wildcard import syntax may not be used by code in Twisted. These + imports lead to code which is difficult to read and maintain by + introducing complexity which strains human readers and automated tools + alike. If you find yourself with many imports to make from a single + module and wish to save typing, consider importing the module itself, + rather than its attributes.

          + +

          Relative imports (or sibling imports) may not be + used by code in Twisted. Relative imports allow certain circularities + to be introduced which can ultimately lead to unimportable modules or + duplicate instances of a single module. Relative imports also make the + task of refactoring more difficult.

          + +

          In case of local names conflicts due to import, use the as + syntax, for example: +

          1 +

          from twisted.trial import util as trial_util +

          + +

          The encoding must always be ASCII, so no coding cookie is necessary.

          + +

          Packages

          + +

          Package names should follow the same conventions as module + names. All modules must be encapsulated in some package. Nested + packages may be used to further organize related modules.

          + +

          __init__.py must never contain anything other than a + docstring and (optionally) an __all__ attribute. Packages are + not modules and should be treated differently. This rule may be + broken to preserve backwards compatibility if a module is made + into a nested package as part of a refactoring.

          + +

          If you wish to promote code from a module to a package, for + example, to break a large module out into several smaller + files, the accepted way to do this is to promote from within + the module. For example,

          + +

          1 +2 +3 +4 +5 +6 +7 +8 +9 +

          # parent/ +# --- __init__.py --- +import child + +# --- child.py --- +import parent +class Foo: + pass +parent.Foo = Foo +
          + +

          Every package should be added to the list in + setup.py.

          + +

          Packages must not depend circularly upon each other. To simplify + maintaining this state, packages must also not import each other + circularly. While this applies to all packages within Twisted, one + twisted.python deserves particular attention, as it may + not depend on any other Twisted package.

          + +

          String Formatting Operations

          + +

          When using string formatting + operations like formatString % values you should always + use a tuple if you're using non-mapping values. This is to + avoid unexpected behavior when you think you're passing in a single value, + but the value is unexpectedly a tuple, e.g.:

          + +

          1 +2 +

          def foo(x): + return "Hi %s\n" % x +
          + +

          The example shows you can pass in foo("foo") or + foo(3) fine, but if you pass in foo((1,2)), + it raises a TypeError. You should use this instead:

          + +

          1 +2 +

          def foo(x): + return "Hi %s\n" % (x,) +
          + +

          Docstrings

          + +

          Docstrings should always be used to describe the + purpose of methods, functions, classes, and modules.

          + +

          Docstrings are never to be used to provide semantic + information about an object; this rule may be violated if the + code in question is to be used in a system where this is a + requirement (such as Zope).

          + +

          Docstrings should be indented to the level of the code they + are documenting.

          + +

          Docstrings should be triple-quoted. The opening and the closing of the + docstrings should be on a line by themselves. For example: +

          1 +2 +3 +4 +5 +6 +7 +8 +

          class Ninja(object): + """ + A L{Ninja} is a warrior specializing in various unorthodox arts of war. + """ + def attack(self, someone): + """ + Attack C{someone} with this L{Ninja}'s shuriken. + """ +
          +

          + +

          Docstrings should be written in epytext format; more + documentation is available in the + Epytext Markup Language documentation.

          + +

          Additionally, to accommodate emacs users:

          + +
            +
          • Single quotes of the type of the docstring's triple-quote + should be escaped. This will prevent font-lock from + accidentally fontifying large portions of the file as a + string.
          • + +
          • Code examples in docstrings should be prefixed by the | + character. This will prevent IM-Python from regarding sample + code as real functions, methods, and classes.
          • +
          + +

          For example,

          +

          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +

          def foo2bar(f): + """ + Convert L{foo}s to L{bar}s. + + A function that should be used when you have a C{foo} but you want a + C{bar}; note that this is a non-destructive operation. If this method + can't convert the C{foo} to a C{bar} it will raise a L{FooException}. + + @param f: C{foo} + @type f: str + + For example:: + + | import wombat + | def sample(something): + | f = something.getFoo() + | f.doFooThing() + | b = wombat.foo2bar(f) + | b.doBarThing() + | return b + + """ + # Optionally, actual code can go here. +
          + +

          Comments

          + +

          Comments marked with XXX or TODO must contain a reference to the + associated ticket.

          + +

          Versioning

          + +

          The API documentation should be marked up with version information. + When a new API is added the class should be marked with the epytext + @since: field including the version number when + the change was introduced, eg. @since: 8.1.

          + +

          Scripts

          + +

          For each script, that is, a program you expect a Twisted user + to run from the command-line, the following things must be done:

          + +
            +
          1. Write a module in twisted.scripts + which contains a callable global named run. This + will be called by the command line part with no arguments (it + will usually read sys.argv). Feel free to write more + functions or classes in this module, if you feel they are useful + to others.
          2. + +
          3. Create a file which contains a shebang line for Python. For Twisted + Core, this file should be placed in the bin/ directory; for + example, bin/twistd. For sub-projects, it should be placed + in bin/<subproject>; for example, the key-generation tool + for the Conch sub-project is in bin/conch/ckeygen. +

            1 +

            #!/usr/bin/env python +
          4. + +

            To make sure that the script is portable across different UNIX like + operating systems we use the /usr/bin/env command. The env + command allows you to run a program in a modified environment. That way + you don't have to search for a program via the PATH environment + variable. This makes the script more portable but note that it is not a + foolproof method. Always make sure that /usr/bin/env exists or + use a softlink/symbolic link to point it to the correct path. Python's + distutils will rewrite the shebang line upon installation so this policy + only covers the source files in version control.

            + +
          5. Add the Twisted running-from-SVN header: +

            1 +2 +3 +4 +5 +6 +7 +8 +9 +

            ### Twisted Preamble +# This makes sure that users don't have to set up their environment +# specially in order to run these programs from bin/. +import sys, os, string +if string.find(os.path.abspath(sys.argv[0]), os.sep+'Twisted') != -1: + sys.path.insert(0, os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))) +if not hasattr(os, "getuid") or os.getuid() != 0: + sys.path.insert(0, os.getcwd()) +### end of preamble +
          6. + +
          7. And end with: +

            1 +2 +

            from twisted.scripts.yourmodule import run +run() +
          8. + +
          9. Write a manpage and add it to the man folder + of a subproject's doc folder. On Debian systems + you can find a skeleton example of a manpage in + /usr/share/doc/man-db/examples/manpage.example.
          10. +
          + +

          This will insure your program will work correctly for users of SVN, + Windows releases and Debian packages.

          + +

          Examples

          + +

          For example scripts you expect a Twisted user + to run from the command-line, add this Python shebang line at the top + of the file:

          +

          1 +

          #!/usr/bin/env python +
          + +

          Standard Library Extension Modules

          + +

          When using the extension version of a module for which there is also + a Python version, place the import statement inside a try/except block, + and import the Python version if the import fails. This allows code to + work on platforms where the extension version is not available. For + example: + +

          1 +2 +3 +4 +

          try: + import cPickle as pickle +except ImportError: + import pickle +
          + + Use the "as" syntax of the import statement as well, to set + the name of the extension module to the name of the Python module.

          + +

          Some modules don't exist across all supported Python versions. For + example, Python 2.3's sets module was deprecated in Python 2.6 + in favor of the set and frozenset builtins. When + you need to use sets or frozensets in your code, please use + twisted.python.compat.set and + twisted.python.compat.frozenset. There are some + differences between sets.Set and set, that are + explained in the + set PEP. Please be + sure to not rely on the behavior of one or the other implementation. +

          + +

          Classes

          + +

          Classes are to be named in mixed case, with the first letter + capitalized; each word separated by having its first letter + capitalized. Acronyms should be capitalized in their entirety. + Class names should not be prefixed with the name of the module they are + in. Examples of classes meeting this criteria:

          + +
            +
          • twisted.spread.pb.ViewPoint
          • +
          • twisted.parser.patterns.Pattern
          • +
          + +

          Examples of classes not meeting this criteria:

          + +
            +
          • event.EventHandler
          • +
          • main.MainGadget
          • +
          + +

          An effort should be made to prevent class names from clashing + with each other between modules, to reduce the need for + qualification when importing. For example, a Service subclass + for Forums might be named twisted.forum.service.ForumService, + and a Service subclass for Words might be + twisted.words.service.WordsService. Since neither of these + modules are volatile (see above) the classes may be + imported directly into the user's namespace and not cause + confusion.

          + +

          New-style Classes

          + +

          Classes and instances in Python come in two flavors: old-style or + classic, and new-style. Up to Python 2.1, old-style classes were the + only flavour available to the user, new-style classes were introduced + in Python 2.2 to unify classes and types. All classes added to Twisted + should be written as new-style classes. If x + is an instance of a new-style class, then type(x) + is the same as x.__class__.

          + +

          Methods

          + +

          Methods should be in mixed case, with the first letter lower + case, each word separated by having its first letter + capitalized. For example, someMethodName, + method.

          + +

          Sometimes, a class will dispatch to a specialized sort of + method using its name; for example, twisted.reflect.Accessor. + In those cases, the type of method should be a prefix in all + lower-case with a trailing underscore, so method names will + have an underscore in them. For example, get_someAttribute. + Underscores in method names in twisted code are therefore + expected to have some semantic associated with them.

          + +

          Some methods, in particular addCallback and its + cousins return self to allow for chaining calls. In this case, + wrap the chain in parenthesis, and start each chained call on + a separate line, for example:

          + +

          1 +2 +3 +4 +

          return (foo() + .addCallback(bar) + .addCallback(thud) + .addCallback(wozers)) +
          + +

          Callback Arguments

          + +

          There are several methods whose purpose is to help the user set up + callback functions, for example Deferred.addCallback or the + reactor's callLater method. To make + access to the callback as transparent as possible, most of these methods + use **kwargs to capture arbitrary arguments + that are destined for the user's callback. This allows the call to the + setup function to look very much like the eventual call to the target + callback function.

          + +

          In these methods, take care to not have other argument names that will + steal the user's callback's arguments. When sensible, prefix these + internal argument names with an underscore. For example, RemoteReference.callRemote is + meant to be called like this:

          + +

          1 +2 +3 +4 +5 +

          myref.callRemote("addUser", "bob", "555-1212") + +# on the remote end, the following method is invoked: +def addUser(name, phone): + ... +
          + +

          where addUser is the remote method name. The user might also + choose to call it with named parameters like this:

          + +

          1 +

          myref.callRemote("addUser", name="bob", phone="555-1212") +
          + +

          In this case, callRemote (and any code that uses the + **kwargs syntax) must be careful to not use name, phone, or + any other name that might overlap with a user-provided named parameter. + Therefore, callRemote is implemented with the following + signature:

          + +

          1 +2 +

          def callRemote(self, _name, *args, **kw): + ... +
          + +

          Do whatever you can to reduce user confusion. It may also be + appropriate to assert that the kwargs + dictionary does not contain parameters with names that will eventually + cause problems.

          + + +

          Special Methods

          + +

          The augmented assignment protocol, defined by __iadd__ and other + similarly named methods, can be used to allow objects to be modified in + place or to rebind names if an object is immutable -- both through use + of the same operator. This can lead to confusing code, which in turn + leads to buggy code. For this reason, methods of the augmented + assignment protocol should not be used in Twisted.

          + +

          Functions

          + +

          Functions should be named similiarly to methods.

          + +

          Functions or methods which are responding to events to + complete a callback or errback should be named _cbMethodName or + _ebMethodName, in order to distinguish them from normal + methods.

          + +

          Attributes

          + +

          Attributes should be named similarly to functions and + methods. Attributes should be named descriptively; attribute + names like mode, type, and + buf are generally discouraged. Instead, use + displayMode, playerType, or + inputBuffer.

          + +

          Do not use Python's private attribute syntax; prefix + non-public attributes with a single leading underscore. Since + several classes have the same name in Twisted, and they are + distinguished by which package they come from, Python's + double-underscore name mangling will not work reliably in some + cases. Also, name-mangled private variables are more difficult + to address when unit testing or persisting a class.

          + +

          An attribute (or function, method or class) should be + considered private when one or more of the following conditions + are true:

          + +
            +
          • The attribute represents intermediate state which is not + always kept up-to-date.
          • + +
          • Referring to the contents of the attribute or otherwise + maintaining a reference to it may cause resources to + leak.
          • + +
          • Assigning to the attribute will break internal + assumptions.
          • + +
          • The attribute is part of a known-to-be-sub-optimal + interface and will certainly be removed in a future + release.
          • +
          + + +

          Database

          + +

          Database tables will be named with plural nouns.

          + +

          Database columns will be named with underscores between + words, all lower case, since most databases do not distinguish + between case.

          + +

          Any attribute, method argument, or method name that + corresponds directly to a column in the database will + be named exactly the same as that column, regardless of other + coding conventions surrounding that circumstance.

          + +

          All SQL keywords should be in upper case.

          + +

          C Code

          + +

          Wherever possible, C code should be optional, and the + default python implementation should be maintained in tandem + with it. C code should be strict ANSI C, and + must build using GCC as well as Visual Studio + for Windows, and really shouldn't have any problems with other + compilers either. Don't do anything tricky.

          + +

          C code should only be used for efficiency, not for binding + to external libraries. If your particular code is not + frequently run, write it in Python. If you require the use of + an external library, develop a separate, external bindings + package and make your twisted code depend on it.

          + +

          Commit Messages

          + +

          The commit messages are being distributed in a myriad of ways. Because + of that, you need to observe a few simple rules when writing a commit + message.

          + +

          The first line of the message is being used as both the subject of + the commit email and the announcement on #twisted. Therefore, it should + be short (aim for < 80 characters) and descriptive -- and must be + able to stand alone (it is best if it is a complete sentence). The rest + of the e-mail should be separated with hard line breaks into + short lines (< 70 characters). This is free-format, so you can do + whatever you like here.

          + +

          Commit messages should be about what, not how: we can + get how from SVN diff. Explain reasons for commits, and what they + affect.

          + +

          Each commit should be a single logical change, which is internally + consistent. If you can't summarize your changes in one short line, this + is probably a sign that they should be broken into multiple checkins.

          + +

          Source Control

          + +

          Twisted currently uses Subversion for source control. All + development should occur using branches; when a task is + considered complete another Twisted developer may review it and if no + problems are found, it may be merged into trunk. The Twisted wiki has a start. + Branches must be used for major development. Branches + should be managed using Combinator (but + if you can manage them in some other way without anyone noticing, knock + yourself out).

          + +

          Certain features of Subversion should be avoided.

          + +
            +
          • + +

            Do not set the svn:ignore property on any + file or directory. What you wish to ignore, others may wish to examine. + What others may wish you ignore, you may wish you examine. + svn:ignore will affect everyone who uses + the repository, and so it is not the right mechanism to express personal + preferences.

            + +

            If you wish to ignore certain files use the + global-ignores feature of + ~/.subversion/config , for example:

            + +
            +[miscellany]
            +global-ignores = dropin.cache *.pyc *.pyo *.o *.lo *.la #*# .*.rej *.rej .*~
            +    
            + +
          • +
          + +

          Fallback

          + +

          In case of conventions not enforced in this document, the reference + documents to use in fallback is + PEP 8 for Python + code and PEP 7 for + C code. For example, the paragraph Whitespace in Expressions and + Statements in PEP 8 describes what should be done in Twisted + code.

          + +

          Recommendations

          + +

          These things aren't necessarily standardizeable (in that + code can't be easily checked for compliance) but are a good + idea to keep in mind while working on Twisted.

          + +

          If you're going to work on a fragment of the Twisted + codebase, please consider finding a way that you would use + such a fragment in daily life. Using a Twisted Web server on your + website encourages you to actively maintain and improve your code, + as the little everyday issues with using it become apparent.

          + +

          Twisted is a big codebase! If you're + refactoring something, please make sure to recursively grep for + the names of functions you're changing. You may be surprised to + learn where something is called. Especially if you are moving + or renaming a function, class, method, or module, make sure + that it won't instantly break other code.

          + +
          + +

          Index

          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/doc-standard.html b/vendor/Twisted-10.0.0/doc/core/development/policy/doc-standard.html new file mode 100644 index 000000000000..f76629036245 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/doc-standard.html @@ -0,0 +1,188 @@ + + +Twisted Documentation: HTML Documentation Standard for Twisted + + + + +

          HTML Documentation Standard for Twisted

          + +
          + + +

          Allowable Tags

          + +

          Please try to restrict your HTML usage to the following tags (all only for the original logical purpose, and not whatever visual effect you see): <html>, <title>, <head>, <body>, <h1>, <h2, <h3>, <ol>, <ul>, <dl>, <li>, <dt>, <dd>, <p>, <code>, <img>, <blockquote>, <a>, <cite>, <div>, <span>, <strong>, <em>, <pre>, <q>, <table>,<tr>, <td> and <th>.

          + +

          Please avoid using the quote sign (") for quoting, and use the relevant html tags (<q></q>) -- it is impossible to distinguish right and left quotes with the quote sign, and some more sophisticated output methods work better with that distinction.

          + +

          Multi-line Code Snippets

          + +

          Multi-line code snippets should be delimited with a + <pre> tag, with a mandatory class attribute. The + conventionalized classes are python, python-interpreter, + and shell. For example:

          + +

          python

          +
          +    <p>
          +    For example, this is how one defines a Resource:
          +    </p>
          +
          +    <pre class="python">
          +from twisted.web import resource
          +
          +class MyResource(resource.Resource):
          +    def render_GET(self, request):
          +        return "Hello, world!"
          +    </pre>
          +
          + +

          For example, this is how one defines a Resource:

          +

          1 +2 +3 +4 +5 +

          from twisted.web import resource + +class MyResource(resource.Resource): + def render_GET(self, request): + return "Hello, world!" +
          + +

          Note that you should never have leading indentation inside a + <pre> block -- this makes it hard for readers to + copy/paste the code.

          + +

          python-interpreter

          +
          +    <pre class="python-interpreter">
          +    &gt;&gt;&gt; from twisted.web import resource
          +    &gt;&gt;&gt; class MyResource(resource.Resource):
          +    ...     def render_GET(self, request):
          +    ...         return "Hello, world!"
          +    ...
          +    &gt;&gt;&gt; MyResource().render_GET(None)
          +    "Hello, world!"
          +    </pre>
          +
          + +
          +>>> from twisted.web import resource
          +>>> class MyResource(resource.Resource):
          +...     def render_GET(self, request):
          +...         return "Hello, world!"
          +...
          +>>> MyResource().render_GET(None)
          +"Hello, world!"
          +
          + +

          shell

          +
          +    <pre class="shell">
          +    $ twistd web --path /var/www
          +    </pre>
          +
          + +
          +$ twistd web --path /var/www
          +
          + +

          Code inside paragraph text

          + +

          For single-line code-snippets and attribute, method, class, + and module names, use the <code> tag, with a class of + API or python. During processing, module or class-names + with class API will automatically be looked up in the API + reference and have a link placed around it referencing the + actual API documents for that module/classname. If you wish to + reference an API document, then make sure you at least have a + single module-name so that the processing code will be able to + figure out which module or class you're referring to.

          + +

          You may also use the base attribute in conjuction + with a class of API to indicate the module that should be prepended + to the module or classname. This is to help keep the documentation + clearer and less cluttered by allowing links to API docs that don't + need the module name.

          +
          +        <p>
          +    To add a <code class="API">twisted.web.widgets.Widget</code>
          +    instance to a <code class="API"
          +    base="twisted.web.widgets">Gadget</code> instance, do 
          +    <code class="python">myGadget.putWidget("widgetPath",
          +    MyWidget())</code>.  
          +        </p>
          +    
          +        <p> 
          +    (implementation note: the widgets are stored in the <code
          +    class="python">gadgetInstance.widgets</code> attribute,
          +    which is a
          +    list.)
          +        </p>
          +    
          +
          + +
          +

          + To add a twisted.web.widgets.Widget + instance to a Gadget + instance, do + myGadget.putWidget("widgetPath", MyWidget()). +

          + +

          + (implementation note: the widgets are stored in the gadgetInstance.widgets attribute, + which is a + list.) +

          + +
          + +

          Headers

          + +

          It goes without mentioning that you should use <hN> in + a sane way -- <h1> should only appear once in the + document, to specify the title. Sections of the document should + use <h2>, sub-headers <h3>, and so on.

          + +

          XHTML

          + +

          XHTML is mandatory. That means tags that don't have a + closing tag need a /; for example, <hr /> + . Also, tags which have optional closing tags in HTML + need to be closed in XHTML; for example, + <li>foo</li>

          + +

          Tag Case

          + +

          All tags will be done in lower-case. XHTML demands this, and + so do I. :-)

          + +

          Footnotes

          + +

          Footnotes are enclosed inside + <span class="footnote"></span>. They must not + contain any markup.

          + +

          Suggestions

          + +

          Use lore -o lint to check your documentation + is not broken. lore -o lint will never change + your HTML, but it will complain if it doesn't like it.

          + +

          Don't use tables for formatting. 'nuff said.

          + +

          __all__

          + +

          __all__ is a module level list of strings, naming + objects in the module that are public. Make sure publically exported classes, + functions and constants are listed here.

          + +
          + +

          Index

          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/index.html b/vendor/Twisted-10.0.0/doc/core/development/policy/index.html new file mode 100644 index 000000000000..92bef0407a67 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/index.html @@ -0,0 +1,33 @@ + + +Twisted Documentation: Twisted Development Policy + + + + +

          Twisted Development Policy

          +
            +
            + + + +

            +This series of documents is designed for people who wish to contribute to the +Twisted codebase. +

            + + + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/svn-dev.html b/vendor/Twisted-10.0.0/doc/core/development/policy/svn-dev.html new file mode 100644 index 000000000000..aa6a3c3cc63b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/svn-dev.html @@ -0,0 +1,227 @@ + + +Twisted Documentation: Working from Twisted's Subversion repository + + + + +

            Working from Twisted's Subversion repository

            + +
            + + +

            If you're going to be doing development on Twisted itself, or if you want +to take advantage of bleeding-edge features (or bug fixes) that are not yet +available in a numbered release, you'll probably want to check out a tree from +the Twisted Subversion repository. The Trunk is where all current development +takes place.

            + +

            This document lists some useful tips for working on this cutting +edge.

            + +

            Checkout

            + +

            Subversion tutorials can be found elsewhere, see in particular the Subversion homepage. The relevant +data you need to check out a copy of the Twisted tree is available on the development page +, and is as follows:

            + +
            +$ svn co svn://svn.twistedmatrix.com/svn/Twisted/trunk Twisted
            +
            + +

            Alternate tree names

            + +

            By using svn co svn://svn.twistedmatrix.com/svn/Twisted/trunk +otherdir, you can put the workspace tree in a directory other than +Twisted. I do this (with a name like Twisted-Subversion) to +remind myself that this tree comes from Subversion and not from a released +version (like Twisted-1.0.5). This practice can cause a few problems, +because there are a few places in the Twisted tree that need to know where +the tree starts, so they can add it to sys.path without +requiring the user manually set their PYTHONPATH. These functions walk the +current directory up to the root, looking for a directory named +Twisted (sometimes exactly that, sometimes with a +.startswith test). Generally these are test scripts or other +administrative tools which expect to be launched from somewhere inside the +tree (but not necessarily from the top).

            + +

            If you rename the tree to something other than Twisted, these +tools may wind up trying to use Twisted source files from /usr/lib/python2.5 +or elsewhere on the default sys.path. Normally this won't +matter, but it is good to be aware of the issue in case you run into +problems.

            + +

            twisted/test/process_twisted.py is one of these programs.

            + +

            Combinator

            + +

            In order to simplify the use of Subversion, we typically use +Divmod Combinator. +You may find it to be useful, too. In particular, because Twisted uses +branches for almost all feature development, if you plan to contribute to +Twisted you will probably find Combinator very useful. For more details, +see the Combinator website, as well as the + +UQDS page.

            + +

            Compiling C extensions

            + +

            +There are currently several C extension modules in Twisted: +twisted.protocols._c_urlarg, twisted.internet.cfsupport, +twisted.internet.iocpreactor._iocp, and twisted.python._epoll. These modules +are optional, but you'll have to compile them if you want to experience their +features, performance improvements, or bugs. There are two approaches. +

            + +

            The first is to do a regular distutils ./setup.py build, which +will create a directory under build/ to hold both the generated +.so files as well as a copy of the 600-odd .py files +that make up Twisted. If you do this, you will need to set your PYTHONPATH to +something like MyDir/Twisted/build/lib.linux-i686-2.5 in order to +run code against the Subversion twisted (as opposed to whatever's installed in +/usr/lib/python2.5 or wherever python usually looks). In +addition, you will need to re-run the build command every +time you change a .py file. The build/lib.foo +directory is a copy of the main tree, and that copy is only updated when you +re-run setup.py build. It is easy to forget this and then wonder +why your code changes aren't being expressed.

            + +

            The second technique is to build the C modules in place, and point your +PYTHONPATH at the top of the tree, like MyDir/Twisted. This way +you're using the .py files in place too, removing the confusion a forgotten +rebuild could cause with the separate build/ directory above. To build the C +modules in place, do ./setup.py build_ext -i. You only need to +re-run this command when you change the C files. Note that +setup.py is not Make, it does not always get the dependencies +right (.h files in particular), so if you are hacking on the +cReactor you may need to manually delete the .o files before +doing a rebuild. Also note that doing a setup.py clean will +remove the .o files but not the final .so files, +they must be deleted by hand.

            + + +

            Running tests

            + +

            To run the full unit-test suite, do:

            + +
            ./bin/trial twisted
            + +

            To run a single test file (like twisted/test/test_defer.py), +do one of:

            + +
            ./bin/trial twisted.test.test_defer
            + +

            or

            + +
            ./bin/trial twisted/test/test_defer.py
            + +

            To run any tests that are related to a code file, like +twisted/protocols/imap4.py, do:

            + +
            ./bin/trial --testmodule twisted/mail/imap4.py
            + +

            This depends upon the .py file having an appropriate +test-case-name tag that indicates which test cases provide coverage. +See the Test Standards document for +details about using test-case-name. In this example, the +twisted.mail.test.test_imap test will be run.

            + +

            Many tests create temporary files in /tmp or ./_trial_temp, but +everything in /tmp should be deleted when the test finishes. Sometimes these +cleanup calls are commented out by mistake, so if you see a stray +/tmp/@12345.1 directory, it is probably from test_dirdbm or test_popsicle. +Look for an rmtree that has been commented out and complain to +the last developer who touched that file.

            + +

            Building docs

            + +

            Twisted documentation (not including the automatically-generated API docs) +is in Lore Format. +These .xhtml files are translated into .html files by +the bin/lore/lore script, which can check the files for syntax problems +(hlint), process multiple files at once, insert the files into a template +before processing, and can also translate the files into LaTeX or PostScript +instead.

            + +

            To build the HTML form of the howto/ docs, do the following. Note that +the index file will be placed in doc/howto/index.html.

            + +
            +./bin/lore/lore -p --config template=doc/howto/template.tpl doc/howto/*.xhtml
            +
            + +

            To run hlint over a single Lore document, such as +doc/development/policy/svn-dev.xhtml, do the following. This is +useful because the HTML conversion may bail without a useful explanation if +it sees mismatched tags.

            + +
            +./bin/lore/lore -n --output lint doc/development/policy/svn-dev.xhtml
            +
            + +

            To convert it to HTML (including markup, interpolation of examples, +footnote processing, etc), do the following. The results will be placed in +doc/development/policy/svn-dev.html:

            + +
            +./bin/lore/lore -p --config template=doc/howto/template.tpl \
            +   doc/development/policy/svn-dev.xhtml
            +
            + +

            Note that hyperlinks to other documents may not be quite right unless you +include a -l argument to bin/lore/lore. Links in the +.xhtml file are to .xhtml targets: when the .xhtml is turned into .html, the +link targets are supposed to be turned into .html also. In addition to this, +Lore markup of the form <code class="API"> is supposed to +turn into a link to the corresponding API reference page. These links will +probably be wrong unless the correct base URL is provided to Lore.

            + +

            Committing and Post-commit Hooks

            + +

            Twisted uses a customized + +trac-post-commit-hook to enable ticket updates based on svn commit +logs. When making a branch for a ticket, the branch name should end +in -<ticket number>, for +example my-branch-9999. This will add a ticket comment containing a +changeset link and branch name. To make your commit message show up as a comment +on a Trac ticket, add a refs #<ticket number> line at the +bottom of your commit message. To automatically close a ticket on Trac +as Fixed and add a comment with the closing commit message, add +a Fixes: #<ticket number> line to your commit message. In +general, a commit message closing a ticket looks like this:

            + +
            +Merge my-branch-9999: A single-line summary.
            +
            +Author: jesstess
            +Reviewers: exarkun, glyph
            +Fixes: #9999
            +
            +My longer description of the changes made.
            +
            + +

            The Twisted Coding Standard +elaborates on commit messages and source control.

            + +

            Emacs

            + +

            A minor mode for development with Twisted using Emacs is available. See +emacs/twisted-dev.el for several utility functions which make +it easier to grep for methods, run test cases, etc.

            + +

            Building Debian packages

            + +

            Our support for building Debian packages has fallen into disrepair. We +would very much like to restore this functionality, but until we do so, if +you are interested in this, you are on your own. See +stdeb for one possible approach +to this.

            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/test-standard.html b/vendor/Twisted-10.0.0/doc/core/development/policy/test-standard.html new file mode 100644 index 000000000000..546639dc5218 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/test-standard.html @@ -0,0 +1,362 @@ + + +Twisted Documentation: Unit Tests in Twisted + + + + +

            Unit Tests in Twisted

            + +
            + + +

            Each unit test tests one bit of functionality in the + software. Unit tests are entirely automated and complete quickly. + Unit tests for the entire system are gathered into one test suite, + and may all be run in a single batch. The result of a unit test + is simple: either it passes, or it doesn't. All this means you + can test the entire system at any time without inconvenience, and + quickly see what passes and what fails.

            + +

            Unit Tests in the Twisted Philosophy

            + +

            The Twisted development team + adheres to the practice of Extreme + Programming (XP), and the usage of unit tests is a cornerstone + XP practice. Unit tests are a tool to give you increased + confidence. You changed an algorithm -- did you break something? + Run the unit tests. If a test fails, you know where to look, + because each test covers only a small amount of code, and you know + it has something to do with the changes you just made. If all the + tests pass, you're good to go, and you don't need to second-guess + yourself or worry that you just accidently broke someone else's + program.

            + +

            What to Test, What Not to Test

            + +

            You don't have to write a test for every single + method you write, only production methods that could possibly break.

            +
            + +

            -- Kent Beck, Extreme Programming Explained, p. 58.

            + +

            Running the Tests

            + +

            How

            + +

            From the root of the Twisted source tree, run + Trial: +

            + +
            +$ bin/trial twisted
            +    
            + +

            You'll find that having something like this in your emacs init + files is quite handy:

            + +
            +(defun runtests () (interactive)
            +  (compile "python /somepath/Twisted/bin/trial /somepath/Twisted"))
            +
            +(global-set-key [(alt t)] 'runtests)
            +
            +

            When

            + +

            Always, always, always be sure all the + tests pass before committing any code. If someone else + checks out code at the start of a development session and finds + failing tests, they will not be happy and may decide to hunt + you down.

            + +

            Since this is a geographically dispersed team, the person who + can help you get your code working probably isn't in the room with + you. You may want to share your work in progress over the + network, but you want to leave the main Subversion tree in good working + order. So use a branch, + and merge your changes back in only after your problem is solved + and all the unit tests pass again.

            + +

            Adding a Test

            + +

            Please don't add new modules to Twisted without adding tests + for them too. Otherwise we could change something which breaks + your module and not find out until later, making it hard to know + exactly what the change that broke it was, or until after a + release, and nobody wants broken code in a release.

            + +

            Tests go into dedicated test packages such as + twisted/test/ or twisted/conch/test/, + and are named test_foo.py, where foo is the name + of the module or package being tested. Extensive documentation on using + the PyUnit framework for writing unit tests can be found in the + links section below. +

            + +

            One deviation from the standard PyUnit documentation: To ensure + that any variations in test results are due to variations in the + code or environment and not the test process itself, Twisted ships + with its own, compatible, testing framework. That just + means that when you import the unittest module, you will from twisted.trial import unittest instead of the + standard import unittest.

            + +

            As long as you have followed the module naming and placement + conventions, trial will be smart + enough to pick up any new tests you write.

            + +

            PyUnit provides a large number of assertion methods to be used when + writing tests. Many of these are redundant. For consistency, Twisted + unit tests should use the assert forms rather than the + fail forms. Also, use assertEquals, + assertNotEquals, and assertAlmostEquals rather + than assertEqual, assertNotEqual, and + assertAlmostEqual. assertTrue is also + preferred over assert_. You may notice this convention is + not followed everywhere in the Twisted codebase. If you are changing + some test code and notice the wrong method being used in nearby code, + feel free to adjust it.

            + +

            When you add a unit test, make sure all methods have docstrings + specifying at a high level the intent of the test. That is, a description + that users of the method would understand.

            + +

            Skipping tests, TODO items

            + +

            Trial, the Twisted unit test framework, has some extensions which are +designed to encourage developers to add new tests. One common situation is +that a test exercises some optional functionality: maybe it depends upon +certain external libraries being available, maybe it only works on certain +operating systems. The important common factor is that nobody considers +these limitations to be a bug.

            + +

            To make it easy to test as much as possible, some tests may be skipped in +certain situations. Individual test cases can raise the +SkipTest exception to indicate that they should be skipped, and +the remainder of the test is not run. In the summary (the very last thing +printed, at the bottom of the test output) the test is counted as a +skip instead of a success or fail. This should be used +inside a conditional which looks for the necessary prerequisites:

            + +

            1 +2 +3 +4 +

            def test_sshClient(self): + if not ssh_path: + raise unittest.SkipTest("cannot find ssh, nothing to test") + foo() # do actual test after the SkipTest +
            + +

            You can also set the .skip attribute on the method, with a string to +indicate why the test is being skipped. This is convenient for temporarily +turning off a test case, but it can also be set conditionally (by +manipulating the class attributes after they've been defined):

            + +

            1 +2 +3 +

            def test_thing(self): + dotest() +test_thing.skip = "disabled locally" +
            + +

            1 +2 +3 +4 +5 +6 +7 +8 +9 +

            class MyTestCase(unittest.TestCase): + def test_one(self): + ... + def test_thing(self): + dotest() + +if not haveThing: + MyTestCase.test_thing.im_func.skip = "cannot test without Thing" + # but test_one() will still run +
            + +

            Finally, you can turn off an entire TestCase at once by setting the .skip +attribute on the class. If you organize your tests by the functionality they +depend upon, this is a convenient way to disable just the tests which cannot +be run.

            + +

            1 +2 +3 +4 +5 +6 +7 +

            class TCPTestCase(unittest.TestCase): + ... +class SSLTestCase(unittest.TestCase): + if not haveSSL: + skip = "cannot test without SSL support" + # but TCPTestCase will still run + ... +
            + +

            .todo and Testing New Functionality

            + +

            Two good practices which arise from the XP development process are +sometimes at odds with each other:

            + +
              +
            • Unit tests are a good thing. Good developers recoil in horror when + they see a failing unit test. They should drop everything until the test + has been fixed.
            • + +
            • Good developers write the unit tests first. Once tests are done, they + write implementation code until the unit tests pass. Then they stop.
            • +
            + +

            These two goals will sometimes conflict. The unit tests that are written +first, before any implementation has been done, are certain to fail. We want +developers to commit their code frequently, for reliability and to improve +coordination between multiple people working on the same problem together. +While the code is being written, other developers (those not involved in the +new feature) should not have to pay attention to failures in the new code. +We should not dilute our well-indoctrinated Failing Test Horror Syndrome by +crying wolf when an incomplete module has not yet started passing its unit +tests. To do so would either teach the module author to put off writing or +committing their unit tests until after all the functionality is +working, or it would teach the other developers to ignore failing test +cases. Both are bad things.

            + +

            .todo is intended to solve this problem. When a developer first +starts writing the unit tests for functionality that has not yet been +implemented, they can set the .todo attribute on the test +methods that are expected to fail. These methods will still be run, but +their failure will not be counted the same as normal failures: they will go +into an expected failures category. Developers should learn to treat +this category as a second-priority queue, behind actual test failures.

            + +

            As the developer implements the feature, the tests will eventually start +passing. This is surprising: after all those tests are marked as being +expected to fail. The .todo tests which nevertheless pass are put into a +unexpected success category. The developer should remove the .todo +tag from these tests. At that point, they become normal tests, and their +failure is once again cause for immediate action by the entire development +team.

            + +

            The life cycle of a test is thus:

            + +
              +
            1. Test is created, marked .todo. Test fails: expected + failure.
            2. + +
            3. Code is written, test starts to pass. unexpected success.
            4. + +
            5. .todo tag is removed. Test passes. success.
            6. + +
            7. Code is broken, test starts to fail. failure. Developers spring + into action.
            8. + +
            9. Code is fixed, test passes once more. success.
            10. +
            + +

            Any test which remains marked with .todo for too long should +be examined. Either it represents functionality which nobody is working on, +or the test is broken in some fashion and needs to be fixed. Generally, +.todo may be of use while you are developing a feature, but +by the time you are ready to commit anything, all the tests you have written +should be passing. In other words, you should rarely, if ever, feel the need +to add a test marked todo to trunk. When you do, consider whether a ticket +in the issue tracker would be more useful.

            + +

            Line Coverage Information

            + +

            Trial provides line coverage information, which is very useful to ensure +old code has decent coverage. Passing the --coverage option to +to Trial will generate the coverage information in a file called +coverage which can be found in the _trial_temp +folder. This option requires Python 2.3.3 or newer.

            + +

            Associating Test Cases With Source Files

            + +

            Please add a test-case-name tag to the source file that is +covered by your new test. This is a comment at the beginning of the file +which looks like one of the following:

            + +

            1 +

            # -*- test-case-name: twisted.test.test_defer -*- +
            + +

            or

            + +

            1 +2 +

            #!/usr/bin/env python +# -*- test-case-name: twisted.test.test_defer -*- +
            + +

            This format is understood by emacs to mark File Variables. The +intention is to accept test-case-name anywhere emacs would on +the first or second line of the file (but not in the File +Variables: block that emacs accepts at the end of the file). If you +need to define other emacs file variables, you can either put them in the +File Variables: block or use a semicolon-separated list of +variable definitions:

            + +

            1 +

            # -*- test-case-name: twisted.test.test_defer; fill-column: 75; -*- +
            + +

            If the code is exercised by multiple test cases, those may be marked by +using a comma-separated list of tests, as follows: (NOTE: not all tools can +handle this yet.. trial --testmodule does, though)

            + +

            1 +

            # -*- test-case-name: twisted.test.test_defer,twisted.test.test_tcp -*- +
            + +

            The test-case-name tag will allow trial +--testmodule twisted/dir/myfile.py to determine which test cases need +to be run to exercise the code in myfile.py. Several tools (as +well as twisted-dev.el's F9 command) use this to automatically +run the right tests.

            + +
            + + + +

            See also Tips for writing tests for Twisted + code.

            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/policy/writing-standard.html b/vendor/Twisted-10.0.0/doc/core/development/policy/writing-standard.html new file mode 100644 index 000000000000..b07e533c4296 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/policy/writing-standard.html @@ -0,0 +1,313 @@ + + +Twisted Documentation: Twisted Writing Standard + + + + +

            Twisted Writing Standard

            + +
            + + +

            The Twisted writing standard describes the documentation writing + styles we prefer in our documentation. This standard applies particularly + to howtos and other descriptive documentation.

            + +

            This document should be read with the documentation standard, which describes + markup style for the documentation.

            + +

            This document is meant to help Twisted documentation authors produce + documentation that does not have the following problems:

            + +
              +
            • misleads users about what is good Twisted style;
            • +
            • misleads users into thinking that an advanced howto is an introduction + to writing their first Twisted server; and
            • +
            • misleads users about whether they fit the document's target audience: + for example, that they are able to use enterprise without knowing how to + write SQL queries.
            • +
            + +

            General style

            + +

            Documents should aim to be clear and concise, allowing the API + documentation and the example code to tell as much of the story as they + can. Demonstrations and where necessary supported arguments should always + preferred to simple statements ("here is how you would simplify this + code with Deferreds" rather than "Deferreds make code + simpler").

            + +

            Documents should be clearly delineated into sections and subsections. + Each of these sections, like the overall document, should have a single + clear purpose. This is most easily tested by trying to have meaningful + headings: a section which is headed by "More details" or + "Advanced stuff" is not purposeful enough. There should be + fairly obvious ways to split a document. The two most common are task + based sectioning and sectioning which follows module and class + separations.

            + +

            Documentation must use American English spelling, and where possible + avoid any local variants of either vocabulary or grammar. Grammatically + complex sentences should ideally be avoided: these make reading + unnecessarily difficult, particularly for non-native speakers.

            + +

            Evangelism and usage documents

            + +

            The Twisted documentation should maintain a reasonable distinction + between "evangelism" documentation, which compares the Twisted + design or Twisted best practice with other approaches and argues for the + Twisted approach, and "usage" documentation, which describes the + Twisted approach in detail without comparison to other possible + approaches.

            + +

            While both kinds of documentation are useful, they have different + audiences. The first kind of document, evangelical documents, is useful to + a reader who is researching and comparing approaches and seeking to + understand the Twisted approach or Twisted functionality in order to + decide whether it is useful to them. The second kind of document, usage + documents, are useful to a reader who has decided to use Twisted and + simply wants further information about available functions and + architectures they can use to accomplish their goal.

            + +

            Since they have distinct audiences, evangelism and detailed usage + documentation belongs in separate files. There should be links between + them in 'Further reading' or similar sections.

            + +

            Descriptions of features

            + +

            Descriptions of any feature added since release 2.0 of Twisted core + must have a note describing which release of which Twisted project they + were added in at the first mention in each document. If they are not yet + released, give them the number of the next minor release.

            + +

            For example, a substantial change might have a version number added in + the introduction:

            + +
            + This document describes the Application infrastructure for deploying + Twisted applications (added in Twisted 1.3). +
            + +

            The version does not need to be mentioned elsewhere in the document + except for specific features which were added in subsequent releases, + which might should be mentioned separately.

            + +
            + The simplest way to create a .tac file, SuperTac (added + in Twisted Core 99.7)...
            + +

            In the case where the usage of a feature has substantially changed, the + number should be that of the release in which the current usage became + available. For example:

            + +
            This document describes the Application infrastructure for + deploying Twisted applications (updated[/substantially updated] in Twisted + 2.7).
            + +

            Linking

            + +

            The first occurrence of the name of any module, class or function should + always link to the API documents. Subsequent mentions may or may not link + at the author's discretion: discussions which are very closely bound to a + particular API should probably link in the first mention in the given + section.

            + +

            Links between howtos are encouraged. Overview documents and tutorials + should always link to reference documents and in depth documents. These + documents should link among themselves wherever it's needed: if you're + tempted to re-describe the functionality of another module, you should + certainly link instead.

            + +

            Introductions

            + +

            The introductory section of a Twisted howto should immediately follow + the top-level heading and precede any subheadings.

            + +

            The following items should be present in the introduction to Twisted + howtos: the introductory paragraph and the description of the target + audience.

            + +

            Introductory paragraph

            + +

            The introductory paragraph of a document should summarize what the + document is designed to present. It should use the both proper names for + the Twisted technologies and simple non-Twisted descriptions of the + technologies. For example, in this paragraph both the name of the technology + ("Conch") and a description ("SSH server") are used:

            + +
            + This document describes setting up a SSH server to serve data from the + file system using Conch, the Twisted SSH implementation. +
            + +

            The introductory paragraph should be relatively short, but should, like + the above, somewhere define the document's objective: what the reader + should be able to do using instructions in the document.

            + +

            Description of target audience

            + +

            Subsequent paragraphs in the introduction should describe the target + audience of the document: who would want to read it, and what they should + know before they can expect to use your document. For example:

            + +
            +

            + The target audience of this document is a Twisted user who has a set of + filesystem like data objects that they would like to make available to + authenticated users over SFTP. +

            + +

            + Following the directions in this document will require that you are + familiar with managing authentication via the Twisted Cred system. +

            +
            + +

            Use your discretion about the extent to which you list assumed + knowledge. Very introductory documents that are going to be among a + reader's first exposure to Twisted will even need to specify that they + rely on knowledge of Python and of certain networking concepts (ports, + servers, clients, connections) but documents that are going to be sought + out by existing Twisted users for particular purposes only need to specify + other Twisted knowledge that is assumed.

            + +

            Any knowledge of technologies that wouldn't be considered "core + Python" and/or "simple networking" need to be explicitly + specified, no matter how obvious they seem to someone familiar with the + technology. For example, it needs to be stated that someone using + enterprise should know SQL and should know how to set up and populate + databases for testing purposes.

            + +

            Where possible, link to other documents that will fill in missing + knowledge for the reader. Linking to documents in the Twisted repository + is preferred but not essential.

            + +

            Goals of document

            + +

            The introduction should finish with a list of tasks that the user can + expect to see the document accomplish. These tasks should be concrete + rather than abstract, so rather than telling the user that they will + "understand Twisted Conch", you would list the specific tasks + that they will see the document do. For example:

            + +
            +

            + This document will demonstrate the following tasks using Twisted Conch: +

            + +
              +
            • creating an anonymous access read-only SFTP server using a filesystem + backend;
            • +
            • creating an anonymous access read-only SFTP server using a proxy + backend connecting to an HTTP server; and
            • +
            • creating a anonymous access read and write SFTP server using a + filesystem backend.
            • +
            +
            + +

            In many cases this will essentially be a list of your code examples, + but it need not be. If large sections of your code are devoted to design + discussions, your goals might resemble the following:

            + +
            +

            + This document will discuss the following design aspects of writing Conch + servers: +

            + +
              +
            • authentication of users; and
            • +
            • choice of data backends.
            • +
            +
            + + +

            Example code

            + +

            Wherever possible, example code should be provided to illustrate a + certain technique or piece of functionality.

            + +

            Example code should try and meet as many of the following requirements + as possible:

            + +
            + +

            The requirement to have a complete working example will occasionally + impose upon authors the need to have a few dummy functions: in Twisted + documentation the most common example is where a function is needed to + generate a Deferred and fire it after some time has passed. An example + might be this, where deferLater is used to fire a callback + after a period of time:

            + +

            1 +2 +3 +4 +5 +6 +7 +8 +

            from twisted.internet import task, reactor + + def getDummyDeferred(): + """ + Dummy method which returns a deferred that will fire in 5 seconds with + a result + """ + return task.deferLater(reactor, 5, lambda x: "RESULT") +
            + +

            As in the above example, it is imperative to clearly mark that the + function is a dummy in as many ways as you can: using Dummy in + the function name, explaining that it is a dummy in the docstring, and + marking particular lines as being required to create an effect for the + purposes of demonstration. In most cases, this will save the reader from + mistaking this dummy method for an idiom they should use in their Twisted + code.

            + +

            Conclusions

            + +

            The conclusion of a howto should follow the very last section heading + in a file. This heading would usually be called "Conclusion".

            + +

            The conclusion of a howto should remind the reader of the tasks that + they have done while reading the document. For example:

            + +
            +

            + In this document, you have seen how to: +

            + +
              +
            1. set up an anonymous read-only SFTP server;
            2. +
            3. set up a SFTP server where users authenticate;
            4. +
            5. set up a SFTP server where users are restricted to some parts of the + filesystem based on authentication; and
            6. +
            7. set up a SFTP server where users have write access to some parts of + the filesystem based on authentication.
            8. +
            +
            + +

            If appropriate, the howto could follow this description with links to + other documents that might be of interest to the reader with their + newfound knowledge. However, these links should be limited to fairly + obvious extensions of at least one of the listed tasks.

            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/development/security.html b/vendor/Twisted-10.0.0/doc/core/development/security.html new file mode 100644 index 000000000000..4f2a2e55ffc9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/development/security.html @@ -0,0 +1,43 @@ + + +Twisted Documentation: Security + + + + +

            Security

            + + + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/examples/ampclient.py b/vendor/Twisted-10.0.0/doc/core/examples/ampclient.py new file mode 100644 index 000000000000..53494486ed00 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/ampclient.py @@ -0,0 +1,26 @@ +from twisted.internet import reactor, defer +from twisted.internet.protocol import ClientCreator +from twisted.protocols import amp +from ampserver import Sum, Divide + + +def doMath(): + d1 = ClientCreator(reactor, amp.AMP).connectTCP( + '127.0.0.1', 1234).addCallback( + lambda p: p.callRemote(Sum, a=13, b=81)).addCallback( + lambda result: result['total']) + def trapZero(result): + result.trap(ZeroDivisionError) + print "Divided by zero: returning INF" + return 1e1000 + d2 = ClientCreator(reactor, amp.AMP).connectTCP( + '127.0.0.1', 1234).addCallback( + lambda p: p.callRemote(Divide, numerator=1234, + denominator=0)).addErrback(trapZero) + def done(result): + print 'Done with math:', result + defer.DeferredList([d1, d2]).addCallback(done) + +if __name__ == '__main__': + doMath() + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/ampserver.py b/vendor/Twisted-10.0.0/doc/core/examples/ampserver.py new file mode 100644 index 000000000000..7b5adf010762 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/ampserver.py @@ -0,0 +1,40 @@ +from twisted.protocols import amp + +class Sum(amp.Command): + arguments = [('a', amp.Integer()), + ('b', amp.Integer())] + response = [('total', amp.Integer())] + + +class Divide(amp.Command): + arguments = [('numerator', amp.Integer()), + ('denominator', amp.Integer())] + response = [('result', amp.Float())] + errors = {ZeroDivisionError: 'ZERO_DIVISION'} + + +class Math(amp.AMP): + def sum(self, a, b): + total = a + b + print 'Did a sum: %d + %d = %d' % (a, b, total) + return {'total': total} + Sum.responder(sum) + + def divide(self, numerator, denominator): + result = float(numerator) / denominator + print 'Divided: %d / %d = %f' % (numerator, denominator, result) + return {'result': result} + Divide.responder(divide) + + +def main(): + from twisted.internet import reactor + from twisted.internet.protocol import Factory + pf = Factory() + pf.protocol = Math + reactor.listenTCP(1234, pf) + print 'started' + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/bananabench.py b/vendor/Twisted-10.0.0/doc/core/examples/bananabench.py new file mode 100644 index 000000000000..9d13f386f905 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/bananabench.py @@ -0,0 +1,79 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +import sys +import time +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +# Twisted Imports +from twisted.spread import banana +from twisted.internet import protocol + +iterationCount = 10000 + +class BananaBench: + r = range( iterationCount ) + def setUp(self, encClass): + self.io = StringIO.StringIO() + self.enc = encClass() + self.enc.makeConnection(protocol.FileWrapper(self.io)) + self.enc._selectDialect("none") + self.enc.expressionReceived = self.putResult + + def putResult(self, result): + self.result = result + + def tearDown(self): + self.enc.connectionLost() + del self.enc + + def testEncode(self, value): + starttime = time.time() + for i in self.r: + self.enc.sendEncoded(value) + self.io.truncate(0) + endtime = time.time() + print ' Encode took %s seconds' % (endtime - starttime) + return endtime - starttime + + def testDecode(self, value): + self.enc.sendEncoded(value) + encoded = self.io.getvalue() + starttime = time.time() + for i in self.r: + self.enc.dataReceived(encoded) + endtime = time.time() + print ' Decode took %s seconds' % (endtime - starttime) + return endtime - starttime + + def performTest(self, method, data, encClass): + self.setUp(encClass) + method(data) + self.tearDown() + + def runTests(self, testData): + print 'Test data is: %s' % testData + print ' Using Pure Python Banana:' + self.performTest(self.testEncode, testData, banana.Banana) + self.performTest(self.testDecode, testData, banana.Banana) + +bench = BananaBench() +print 'Doing %s iterations of each test.' % iterationCount +print '' +testData = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] +bench.runTests(testData) +testData = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] +bench.runTests(testData) +testData = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] +bench.runTests(testData) +testData = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"] +bench.runTests(testData) +testData = [1l, 2l, 3l, 4l, 5l, 6l, 7l, 8l, 9l, 10l] +bench.runTests(testData) +testData = [1, 2, [3, 4], [30.5, 40.2], 5, ["six", "seven", ["eight", 9]], [10], []] +bench.runTests(testData) + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/chatserver.py b/vendor/Twisted-10.0.0/doc/core/examples/chatserver.py new file mode 100644 index 000000000000..76c3cf8926d3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/chatserver.py @@ -0,0 +1,37 @@ +"""The most basic chat protocol possible. + +run me with twistd -y chatserver.py, and then connect with multiple +telnet clients to port 1025 +""" + +from twisted.protocols import basic + + + +class MyChat(basic.LineReceiver): + def connectionMade(self): + print "Got new client!" + self.factory.clients.append(self) + + def connectionLost(self, reason): + print "Lost a client!" + self.factory.clients.remove(self) + + def lineReceived(self, line): + print "received", repr(line) + for c in self.factory.clients: + c.message(line) + + def message(self, message): + self.transport.write(message + '\n') + + +from twisted.internet import protocol +from twisted.application import service, internet + +factory = protocol.ServerFactory() +factory.protocol = MyChat +factory.clients = [] + +application = service.Application("chatserver") +internet.TCPServer(1025, factory).setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/core/examples/courier.py b/vendor/Twisted-10.0.0/doc/core/examples/courier.py new file mode 100644 index 000000000000..319373ebd9d1 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/courier.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Example of a interfacing to Courier's mail filter interface. +""" + +LOGFILE = '/tmp/filter.log' + +# Setup log file +from twisted.python import log +log.startLogging(open(LOGFILE, 'a')) +import sys +sys.stderr = log.logfile + +# Twisted imports +from twisted.internet import reactor, stdio +from twisted.internet.protocol import Protocol, Factory +from twisted.protocols import basic + +FILTERS='/var/lib/courier/filters' +ALLFILTERS='/var/lib/courier/allfilters' +FILTERNAME='twistedfilter' + +import os, os.path +from syslog import syslog, openlog, LOG_MAIL +from rfc822 import Message + +def trace_dump(): + t,v,tb = sys.exc_info() + openlog(FILTERNAME, 0, LOG_MAIL) + syslog('Unhandled exception: %s - %s' % (v, t)) + while tb: + syslog('Trace: %s:%s %s' % (tb.tb_frame.f_code.co_filename,tb.tb_frame.f_code.co_name,tb.tb_lineno)) + tb = tb.tb_next + # just to be safe + del tb + +def safe_del(file): + try: + if os.path.isdir(file): + os.removedirs(file) + else: + os.remove(file) + except OSError: + pass + + +class DieWhenLost(Protocol): + def connectionLost(self, reason=None): + reactor.stop() + + +class MailProcessor(basic.LineReceiver): + """I process a mail message. + + Override filterMessage to do any filtering you want.""" + messageFilename = None + delimiter = '\n' + + def connectionMade(self): + log.msg('Connection from %r' % self.transport) + self.state = 'connected' + self.metaInfo = [] + + def lineReceived(self, line): + if self.state == 'connected': + self.messageFilename = line + self.state = 'gotMessageFilename' + if self.state == 'gotMessageFilename': + if line: + self.metaInfo.append(line) + else: + if not self.metaInfo: + self.transport.loseConnection() + return + self.filterMessage() + + def filterMessage(self): + """Override this. + + A trivial example is included. + """ + try: + m = Message(open(self.messageFilename)) + self.sendLine('200 Ok') + except: + trace_dump() + self.sendLine('435 %s processing error' % FILTERNAME) + + +def main(): + # Listen on the UNIX socket + f = Factory() + f.protocol = MailProcessor + safe_del('%s/%s' % (ALLFILTERS, FILTERNAME)) + reactor.listenUNIX('%s/%s' % (ALLFILTERS, FILTERNAME), f, 10) + + # Once started, close fd 3 to let Courier know we're ready + reactor.callLater(0, os.close, 3) + + # When stdin is closed, it's time to exit. + s = stdio.StandardIO(DieWhenLost()) + + # Go! + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/cred.py b/vendor/Twisted-10.0.0/doc/core/examples/cred.py new file mode 100644 index 000000000000..200231718677 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/cred.py @@ -0,0 +1,163 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + + +import sys +from zope.interface import implements, Interface + +from twisted.protocols import basic +from twisted.internet import protocol +from twisted.python import log + +from twisted.cred import error +from twisted.cred import portal +from twisted.cred import checkers +from twisted.cred import credentials + +class IProtocolUser(Interface): + def getPrivileges(): + """Return a list of privileges this user has.""" + + def logout(): + """Cleanup per-login resources allocated to this avatar""" + +class AnonymousUser: + implements(IProtocolUser) + + def getPrivileges(self): + return [1, 2, 3] + + def logout(self): + print "Cleaning up anonymous user resources" + +class RegularUser: + implements(IProtocolUser) + + def getPrivileges(self): + return [1, 2, 3, 5, 6] + + def logout(self): + print "Cleaning up regular user resources" + +class Administrator: + implements(IProtocolUser) + + def getPrivileges(self): + return range(50) + + def logout(self): + print "Cleaning up administrator resources" + +class Protocol(basic.LineReceiver): + user = None + portal = None + avatar = None + logout = None + + def connectionMade(self): + self.sendLine("Login with USER followed by PASS or ANON") + self.sendLine("Check privileges with PRIVS") + + def connectionLost(self, reason): + if self.logout: + self.logout() + self.avatar = None + self.logout = None + + def lineReceived(self, line): + f = getattr(self, 'cmd_' + line.upper().split()[0]) + if f: + try: + f(*line.split()[1:]) + except TypeError: + self.sendLine("Wrong number of arguments.") + except: + self.sendLine("Server error (probably your fault)") + + def cmd_ANON(self): + if self.portal: + self.portal.login(credentials.Anonymous(), None, IProtocolUser + ).addCallbacks(self._cbLogin, self._ebLogin + ) + else: + self.sendLine("DENIED") + + def cmd_USER(self, name): + self.user = name + self.sendLine("Alright. Now PASS?") + + def cmd_PASS(self, password): + if not self.user: + self.sendLine("USER required before PASS") + else: + if self.portal: + self.portal.login( + credentials.UsernamePassword(self.user, password), + None, + IProtocolUser + ).addCallbacks(self._cbLogin, self._ebLogin + ) + else: + self.sendLine("DENIED") + + def cmd_PRIVS(self): + self.sendLine("You have the following privileges: ") + self.sendLine(" ".join(map(str, self.avatar.getPrivileges()))) + + def _cbLogin(self, (interface, avatar, logout)): + assert interface is IProtocolUser + self.avatar = avatar + self.logout = logout + self.sendLine("Login successful. Available commands: PRIVS") + + def _ebLogin(self, failure): + failure.trap(error.UnauthorizedLogin) + self.sendLine("Login denied! Go away.") + +class ServerFactory(protocol.ServerFactory): + protocol = Protocol + + def __init__(self, portal): + self.portal = portal + + def buildProtocol(self, addr): + p = protocol.ServerFactory.buildProtocol(self, addr) + p.portal = self.portal + return p + +class Realm: + implements(portal.IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if IProtocolUser in interfaces: + if avatarId == checkers.ANONYMOUS: + av = AnonymousUser() + elif avatarId.isupper(): + # Capitalized usernames are administrators. + av = Administrator() + else: + av = RegularUser() + return IProtocolUser, av, av.logout + raise NotImplementedError("Only IProtocolUser interface is supported by this realm") + +def main(): + r = Realm() + p = portal.Portal(r) + c = checkers.InMemoryUsernamePasswordDatabaseDontUse() + c.addUser("auser", "thepass") + c.addUser("SECONDUSER", "secret") + p.registerChecker(c) + p.registerChecker(checkers.AllowAnonymousAccess()) + + f = ServerFactory(p) + + log.startLogging(sys.stdout) + + from twisted.internet import reactor + reactor.listenTCP(4738, f) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/dbcred.py b/vendor/Twisted-10.0.0/doc/core/examples/dbcred.py new file mode 100755 index 000000000000..2d9bfc30c022 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/dbcred.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Simple example of a db checker: define a L{ICredentialsChecker} implementation +that deals with a database backend to authenticate a user. +""" + +from twisted.cred import error +from twisted.cred.credentials import IUsernameHashedPassword, IUsernamePassword +from twisted.cred.checkers import ICredentialsChecker +from twisted.internet.defer import Deferred + +from zope.interface import implements + + +class DBCredentialsChecker(object): + """ + This class checks the credentials of incoming connections + against a user table in a database. + """ + implements(ICredentialsChecker) + + def __init__(self, runQuery, + query="SELECT username, password FROM user WHERE username = %s", + customCheckFunc=None, caseSensitivePasswords=True): + """ + @param runQuery: This will be called to get the info from the db. + Generally you'd want to create a + L{twisted.enterprice.adbapi.ConnectionPool} and pass it's runQuery + method here. Otherwise pass a function with the same prototype. + @type runQuery: C{callable} + + @type query: query used to authenticate user. + @param query: C{str} + + @param customCheckFunc: Use this if the passwords in the db are stored + as hashes. We'll just call this, so you can do the checking + yourself. It takes the following params: + (username, suppliedPass, dbPass) and must return a boolean. + @type customCheckFunc: C{callable} + + @param caseSensitivePasswords: If true requires that every letter in + C{credentials.password} is exactly the same case as the it's + counterpart letter in the database. + This is only relevant if C{customCheckFunc} is not used. + @type caseSensitivePasswords: C{bool} + """ + self.runQuery = runQuery + self.caseSensitivePasswords = caseSensitivePasswords + self.customCheckFunc = customCheckFunc + # We can't support hashed password credentials if we only have a hash + # in the DB + if customCheckFunc: + self.credentialInterfaces = (IUsernamePassword,) + else: + self.credentialInterfaces = ( + IUsernamePassword, IUsernameHashedPassword,) + + self.sql = query + + def requestAvatarId(self, credentials): + """ + Authenticates the kiosk against the database. + """ + # Check that the credentials instance implements at least one of our + # interfaces + for interface in self.credentialInterfaces: + if interface.providedBy(credentials): + break + else: + raise error.UnhandledCredentials() + # Ask the database for the username and password + dbDeferred = self.runQuery(self.sql, (credentials.username,)) + # Setup our deferred result + deferred = Deferred() + dbDeferred.addCallbacks(self._cbAuthenticate, self._ebAuthenticate, + callbackArgs=(credentials, deferred), + errbackArgs=(credentials, deferred)) + return deferred + + def _cbAuthenticate(self, result, credentials, deferred): + """ + Checks to see if authentication was good. Called once the info has + been retrieved from the DB. + """ + if len(result) == 0: + # Username not found in db + deferred.errback(error.UnauthorizedLogin('Username unknown')) + else: + username, password = result[0] + if self.customCheckFunc: + # Let the owner do the checking + if self.customCheckFunc( + username, credentials.password, password): + deferred.callback(credentials.username) + else: + deferred.errback( + error.UnauthorizedLogin('Password mismatch')) + else: + # It's up to us or the credentials object to do the checking + # now + if IUsernameHashedPassword.providedBy(credentials): + # Let the hashed password checker do the checking + if credentials.checkPassword(password): + deferred.callback(credentials.username) + else: + deferred.errback( + error.UnauthorizedLogin('Password mismatch')) + elif IUsernamePassword.providedBy(credentials): + # Compare the passwords, deciging whether or not to use + # case sensitivity + if self.caseSensitivePasswords: + passOk = ( + password.lower() == credentials.password.lower()) + else: + passOk = password == credentials.password + # See if they match + if passOk: + deferred.callback(credentials.username) + else: + deferred.errback( + error.UnauthorizedLogin('Password mismatch')) + else: + # OK, we don't know how to check this + deferred.errback(error.UnhandledCredentials()) + + def _ebAuthenticate(self, message, credentials, deferred): + """ + The database lookup failed for some reason. + """ + deferred.errback(error.LoginFailed(message)) + + +def main(): + """ + Run a simple echo pb server to test the checker. It defines a custom query + for dealing with sqlite special quoting, but otherwise it's a + straightforward use of the object. + + You can test it running C{pbechoclient.py}. + """ + import sys + from twisted.python import log + log.startLogging(sys.stdout) + import os + if os.path.isfile('testcred'): + os.remove('testcred') + from twisted.enterprise import adbapi + pool = adbapi.ConnectionPool('pysqlite2.dbapi2', 'testcred') + # Create the table that will be used + query1 = """CREATE TABLE user ( + username string, + password string + )""" + # Insert a test user + query2 = """INSERT INTO user VALUES ('guest', 'guest')""" + def cb(res): + pool.runQuery(query2) + pool.runQuery(query1).addCallback(cb) + + checker = DBCredentialsChecker(pool.runQuery, + query="SELECT username, password FROM user WHERE username = ?") + from twisted.cred.portal import Portal + + import pbecho + from twisted.spread import pb + portal = Portal(pbecho.SimpleRealm()) + portal.registerChecker(checker) + reactor.listenTCP(pb.portno, pb.PBServerFactory(portal)) + + +if __name__ == "__main__": + from twisted.internet import reactor + reactor.callWhenRunning(main) + reactor.run() + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoclient.py b/vendor/Twisted-10.0.0/doc/core/examples/echoclient.py new file mode 100644 index 000000000000..c5ad2fe81c6f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoclient.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.internet.protocol import ClientFactory +from twisted.protocols.basic import LineReceiver +from twisted.internet import reactor +import sys + +class EchoClient(LineReceiver): + end="Bye-bye!" + def connectionMade(self): + self.sendLine("Hello, world!") + self.sendLine("What a fine day it is.") + self.sendLine(self.end) + + def lineReceived(self, line): + print "receive:", line + if line==self.end: + self.transport.loseConnection() + +class EchoClientFactory(ClientFactory): + protocol = EchoClient + + def clientConnectionFailed(self, connector, reason): + print 'connection failed:', reason.getErrorMessage() + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print 'connection lost:', reason.getErrorMessage() + reactor.stop() + +def main(): + factory = EchoClientFactory() + reactor.connectTCP('localhost', 8000, factory) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoclient_ssl.py b/vendor/Twisted-10.0.0/doc/core/examples/echoclient_ssl.py new file mode 100755 index 000000000000..892116131275 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoclient_ssl.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from OpenSSL import SSL +import sys + +from twisted.internet.protocol import ClientFactory +from twisted.protocols.basic import LineReceiver +from twisted.internet import ssl, reactor + + +class EchoClient(LineReceiver): + end="Bye-bye!" + def connectionMade(self): + self.sendLine("Hello, world!") + self.sendLine("What a fine day it is.") + self.sendLine(self.end) + + def connectionLost(self, reason): + print 'connection lost (protocol)' + + def lineReceived(self, line): + print "receive:", line + if line==self.end: + self.transport.loseConnection() + +class EchoClientFactory(ClientFactory): + protocol = EchoClient + + def clientConnectionFailed(self, connector, reason): + print 'connection failed:', reason.getErrorMessage() + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print 'connection lost:', reason.getErrorMessage() + reactor.stop() + +def main(): + factory = EchoClientFactory() + reactor.connectSSL('localhost', 8000, factory, ssl.ClientContextFactory()) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoclient_udp.py b/vendor/Twisted-10.0.0/doc/core/examples/echoclient_udp.py new file mode 100644 index 000000000000..3233918105d7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoclient_udp.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor + +class EchoClientDatagramProtocol(DatagramProtocol): + strings = [ + "Hello, world!", + "What a fine day it is.", + "Bye-bye!" + ] + + def startProtocol(self): + self.transport.connect('127.0.0.1', 8000) + self.sendDatagram() + + def sendDatagram(self): + if len(self.strings): + datagram = self.strings.pop(0) + self.transport.write(datagram) + else: + reactor.stop() + + def datagramReceived(self, datagram, host): + print 'Datagram received: ', repr(datagram) + self.sendDatagram() + +def main(): + protocol = EchoClientDatagramProtocol() + t = reactor.listenUDP(0, protocol) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoserv.py b/vendor/Twisted-10.0.0/doc/core/examples/echoserv.py new file mode 100644 index 000000000000..90b68918953d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoserv.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet.protocol import Protocol, Factory +from twisted.internet import reactor + +### Protocol Implementation + +# This is just about the simplest possible protocol +class Echo(Protocol): + def dataReceived(self, data): + """ + As soon as any data is received, write it back. + """ + self.transport.write(data) + + +def main(): + f = Factory() + f.protocol = Echo + reactor.listenTCP(8000, f) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoserv_ssl.py b/vendor/Twisted-10.0.0/doc/core/examples/echoserv_ssl.py new file mode 100644 index 000000000000..c05b5f1e53f2 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoserv_ssl.py @@ -0,0 +1,30 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from OpenSSL import SSL + +class ServerContextFactory: + + def getContext(self): + """Create an SSL context. + + This is a sample implementation that loads a certificate from a file + called 'server.pem'.""" + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.use_certificate_file('server.pem') + ctx.use_privatekey_file('server.pem') + return ctx + + +if __name__ == '__main__': + import echoserv, sys + from twisted.internet.protocol import Factory + from twisted.internet import ssl, reactor + from twisted.python import log + log.startLogging(sys.stdout) + factory = Factory() + factory.protocol = echoserv.Echo + reactor.listenSSL(8000, factory, ServerContextFactory()) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/echoserv_udp.py b/vendor/Twisted-10.0.0/doc/core/examples/echoserv_udp.py new file mode 100644 index 000000000000..9171d6693969 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/echoserv_udp.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor + +# Here's a UDP version of the simplest possible protocol +class EchoUDP(DatagramProtocol): + def datagramReceived(self, datagram, address): + self.transport.write(datagram, address) + +def main(): + reactor.listenUDP(8000, EchoUDP()) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/filewatch.py b/vendor/Twisted-10.0.0/doc/core/examples/filewatch.py new file mode 100644 index 000000000000..87c36824cb31 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/filewatch.py @@ -0,0 +1,17 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# +from twisted.application import internet + +def watch(fp): + fp.seek(fp.tell()) + for line in fp.readlines(): + sys.stdout.write(line) + +import sys +from twisted.internet import reactor +s = internet.TimerService(0.1, watch, file(sys.argv[1])) +s.startService() +reactor.run() +s.stopService() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/ftpclient.py b/vendor/Twisted-10.0.0/doc/core/examples/ftpclient.py new file mode 100644 index 000000000000..c6de976097d6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/ftpclient.py @@ -0,0 +1,113 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +An example of using the FTP client +""" + +# Twisted imports +from twisted.protocols.ftp import FTPClient, FTPFileListProtocol +from twisted.internet.protocol import Protocol, ClientCreator +from twisted.python import usage +from twisted.internet import reactor + +# Standard library imports +import string +import sys +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + + +class BufferingProtocol(Protocol): + """Simple utility class that holds all data written to it in a buffer.""" + def __init__(self): + self.buffer = StringIO() + + def dataReceived(self, data): + self.buffer.write(data) + +# Define some callbacks + +def success(response): + print 'Success! Got response:' + print '---' + if response is None: + print None + else: + print string.join(response, '\n') + print '---' + + +def fail(error): + print 'Failed. Error was:' + print error + +def showFiles(result, fileListProtocol): + print 'Processed file listing:' + for file in fileListProtocol.files: + print ' %s: %d bytes, %s' \ + % (file['filename'], file['size'], file['date']) + print 'Total: %d files' % (len(fileListProtocol.files)) + +def showBuffer(result, bufferProtocol): + print 'Got data:' + print bufferProtocol.buffer.getvalue() + + +class Options(usage.Options): + optParameters = [['host', 'h', 'localhost'], + ['port', 'p', 21], + ['username', 'u', 'anonymous'], + ['password', None, 'twisted@'], + ['passive', None, 0], + ['debug', 'd', 1], + ] + +def run(): + # Get config + config = Options() + config.parseOptions() + config.opts['port'] = int(config.opts['port']) + config.opts['passive'] = int(config.opts['passive']) + config.opts['debug'] = int(config.opts['debug']) + + # Create the client + FTPClient.debug = config.opts['debug'] + creator = ClientCreator(reactor, FTPClient, config.opts['username'], + config.opts['password'], passive=config.opts['passive']) + creator.connectTCP(config.opts['host'], config.opts['port']).addCallback(connectionMade).addErrback(connectionFailed) + reactor.run() + +def connectionFailed(f): + print "Connection Failed:", f + reactor.stop() + +def connectionMade(ftpClient): + # Get the current working directory + ftpClient.pwd().addCallbacks(success, fail) + + # Get a detailed listing of the current directory + fileList = FTPFileListProtocol() + d = ftpClient.list('.', fileList) + d.addCallbacks(showFiles, fail, callbackArgs=(fileList,)) + + # Change to the parent directory + ftpClient.cdup().addCallbacks(success, fail) + + # Create a buffer + proto = BufferingProtocol() + + # Get short listing of current directory, and quit when done + d = ftpClient.nlst('.', proto) + d.addCallbacks(showBuffer, fail, callbackArgs=(proto,)) + d.addCallback(lambda result: reactor.stop()) + + +# this only runs if the module was *not* imported +if __name__ == '__main__': + run() + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/ftpserver.py b/vendor/Twisted-10.0.0/doc/core/examples/ftpserver.py new file mode 100644 index 000000000000..ec2d78fa8a06 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/ftpserver.py @@ -0,0 +1,55 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +An example FTP server with minimal user authentication. +""" + +from twisted.protocols.ftp import FTPFactory, FTPRealm +from twisted.cred.portal import Portal +from twisted.cred.checkers import AllowAnonymousAccess, FilePasswordDB +from twisted.internet import reactor + +# +# First, set up a portal (twisted.cred.portal.Portal). This will be used +# to authenticate user logins, including anonymous logins. +# +# Part of this will be to establish the "realm" of the server - the most +# important task in this case is to establish where anonymous users will +# have default access to. In a real world scenario this would typically +# point to something like '/pub' but for this example it is pointed at the +# current working directory. +# +# The other important part of the portal setup is to point it to a list of +# credential checkers. In this case, the first of these is used to grant +# access to anonymous users and is relatively simple; the second is a very +# primitive password checker. This example uses a plain text password file +# that has one username:password pair per line. This checker *does* provide +# a hashing interface, and one would normally want to use it instead of +# plain text storage for anything remotely resembling a 'live' network. In +# this case, the file "pass.dat" is used, and stored in the same directory +# as the server. BAD. +# +# Create a pass.dat file which looks like this: +# +# ===================== +# jeff:bozo +# grimmtooth:bozo2 +# ===================== +# +p = Portal(FTPRealm('./'), + [AllowAnonymousAccess(), FilePasswordDB("pass.dat")]) + +# +# Once the portal is set up, start up the FTPFactory and pass the portal to +# it on startup. FTPFactory will start up a twisted.protocols.ftp.FTP() +# handler for each incoming OPEN request. Business as usual in Twisted land. +# +f = FTPFactory(p) + +# +# You know this part. Point the reactor to port 21 coupled with the above factory, +# and start the event loop. +# +reactor.listenTCP(21, f) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/gpsfix.py b/vendor/Twisted-10.0.0/doc/core/examples/gpsfix.py new file mode 100644 index 000000000000..b1176a29f6bf --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/gpsfix.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +GPSTest is a simple example using the SerialPort transport and the NMEA 0183 +and Rockwell Zodiac GPS protocols to display fix data as it is received from +the device. +""" +from twisted.python import log, usage +import sys + +if sys.platform == 'win32': + from twisted.internet import win32eventreactor + win32eventreactor.install() + + +class GPSFixLogger: + def handle_fix(self, *args): + """ + handle_fix gets called whenever either rockwell.Zodiac or nmea.NMEAReceiver + receives and decodes fix data. Generally, GPS receivers will report a + fix at 1hz. Implementing only this method is sufficient for most purposes + unless tracking of ground speed, course, utc date, or detailed satellite + information is necessary. + + For example, plotting a map from MapQuest or a similar service only + requires longitude and latitude. + """ + log.msg('fix:\n' + + '\n'.join(map(lambda n: ' %s = %s' % tuple(n), zip(('utc', 'lon', 'lat', 'fix', 'sat', 'hdp', 'alt', 'geo', 'dgp'), map(repr, args))))) + +class GPSOptions(usage.Options): + optFlags = [ + ['zodiac', 'z', 'Use Rockwell Zodiac (DeLorme Earthmate) [default: NMEA 0183]'], + ] + optParameters = [ + ['outfile', 'o', None, 'Logfile [default: sys.stdout]'], + ['baudrate', 'b', None, 'Serial baudrate [default: 4800 for NMEA, 9600 for Zodiac]'], + ['port', 'p', '/dev/ttyS0', 'Serial Port device'], + ] + + +if __name__ == '__main__': + from twisted.internet import reactor + from twisted.internet.serialport import SerialPort + + o = GPSOptions() + try: + o.parseOptions() + except usage.UsageError, errortext: + print '%s: %s' % (sys.argv[0], errortext) + print '%s: Try --help for usage details.' % (sys.argv[0]) + raise SystemExit, 1 + + logFile = o.opts['outfile'] + if logFile is None: + logFile = sys.stdout + log.startLogging(logFile) + + if o.opts['zodiac']: + from twisted.protocols.gps.rockwell import Zodiac as GPSProtocolBase + baudrate = 9600 + else: + from twisted.protocols.gps.nmea import NMEAReceiver as GPSProtocolBase + baudrate = 4800 + class GPSTest(GPSProtocolBase, GPSFixLogger): + pass + + if o.opts['baudrate']: + baudrate = int(o.opts['baudrate']) + + + port = o.opts['port'] + log.msg('Attempting to open %s at %dbps as a %s device' % (port, baudrate, GPSProtocolBase.__name__)) + s = SerialPort(GPSTest(), o.opts['port'], reactor, baudrate=baudrate) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/index.html b/vendor/Twisted-10.0.0/doc/core/examples/index.html new file mode 100644 index 000000000000..94cb84a3150c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/index.html @@ -0,0 +1,127 @@ + + +Twisted Documentation: Twisted code examples + + + + +

            Twisted code examples

            + +
            + + +

            Simple Echo server and client

            + + +

            Chat

            + + +

            Echo server & client variants

            + + +

            AMP server & client variants

            + + +

            Perspective Broker

            + + +

            ROW (Twisted Enterprise)

            +
              +
            • row_example.py - using twisted.enterpise.row to load objects + from a database and manipulate them.
            • +
            • row_schema.sql - sample statements to populate tables for + row_example.py
            • +
            • row_util.py - definitions of row classes for + row_example.py
            • +
            + +

            Cred

            +
              +
            • cred.py - Authenticate a user with an in-memory username/password + database
            • +
            • dbcred.py - Using a database backend to authenticate a user
            • +
            + +

            GUI

            + + +

            FTP examples

            +
              +
            • ftpclient.py - example of using the FTP client
            • +
            • ftpserver.py - create an FTP server which + serves files for anonymous users from the working directory and serves + files for authenticated users from /home.
            • +
            + +

            Logging

            + + +

            Miscellaneous

            +
              +
            • shaper.py - example of rate-limiting your web server
            • +
            • stdiodemo.py - example using stdio, Deferreds, LineReceiver + and twisted.web.client.
            • +
            • mouse.py - example using MouseMan protocol with the SerialPort + transport
            • +
            • ptyserv.py - serve shells in pseudo-terminals over TCP
            • +
            • courier.py - example of interfacing to Courier's mail filter + interface
            • +
            • longex.py - example of doing arbitarily long calculations nicely + in Twisted
            • +
            • longex2.py - using generators to do long calculations
            • +
            • stdin.py - reading a line at a time from standard input + without blocking the reactor.
            • +
            • filewatch.py - write the content of a file to standard out + one line at a time
            • +
            • shoutcast.py - example Shoutcast client
            • +
            • gpsfix.py - example using the SerialPort transport and GPS + protocols to display fix data as it is received from the device
            • +
            • wxacceptance.py - acceptance tests for wxreactor
            • +
            • postfix.py - test application for PostfixTCPMapServer
            • +
            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/examples/longex.py b/vendor/Twisted-10.0.0/doc/core/examples/longex.py new file mode 100644 index 000000000000..6fc9a7fe054a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/longex.py @@ -0,0 +1,66 @@ +"""Simple example of doing arbitarily long calculations nicely in Twisted. + +This is also a simple demonstration of twisted.protocols.basic.LineReceiver. +""" + +from twisted.protocols import basic +from twisted.internet import reactor +from twisted.internet.protocol import ServerFactory + +class LongMultiplicationProtocol(basic.LineReceiver): + """A protocol for doing long multiplications. + + It receives a list of numbers (seperated by whitespace) on a line, and + writes back the answer. The answer is calculated in chunks, so no one + calculation should block for long enough to matter. + """ + def connectionMade(self): + self.workQueue = [] + + def lineReceived(self, line): + try: + numbers = map(long, line.split()) + except ValueError: + self.sendLine('Error.') + return + + if len(numbers) <= 1: + self.sendLine('Error.') + return + + self.workQueue.append(numbers) + reactor.callLater(0, self.calcChunk) + + def calcChunk(self): + # Make sure there's some work left; when multiple lines are received + # while processing is going on, multiple calls to reactor.callLater() + # can happen between calls to calcChunk(). + if self.workQueue: + # Get the first bit of work off the queue + work = self.workQueue[0] + + # Do a chunk of work: [a, b, c, ...] -> [a*b, c, ...] + work[:2] = [work[0] * work[1]] + + # If this piece of work now has only one element, send it. + if len(work) == 1: + self.sendLine(str(work[0])) + del self.workQueue[0] + + # Schedule this function to do more work, if there's still work + # to be done. + if self.workQueue: + reactor.callLater(0, self.calcChunk) + + +class LongMultiplicationFactory(ServerFactory): + protocol = LongMultiplicationProtocol + + +if __name__ == '__main__': + from twisted.python import log + import sys + log.startLogging(sys.stdout) + reactor.listenTCP(1234, LongMultiplicationFactory()) + reactor.run() + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/longex2.py b/vendor/Twisted-10.0.0/doc/core/examples/longex2.py new file mode 100644 index 000000000000..87589888e204 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/longex2.py @@ -0,0 +1,101 @@ +"""Example of doing arbitarily long calculations nicely in Twisted. + +This is also a simple demonstration of twisted.protocols.basic.LineReceiver. +This example uses generators to do the calculation. It also tries to be +a good example in division of responsibilities: +- The protocol handles the wire layer, reading in lists of numbers + and writing out the result. +- The factory decides on policy, and has relatively little knowledge + of the details of the protocol. Other protocols can use the same + factory class by intantiating and setting .protocol +- The factory does little job itself: it is mostly a policy maker. + The 'smarts' are in free-standing functions which are written + for flexibility. + +The goal is for minimal dependencies: +- You can use runIterator to run any iterator inside the Twisted + main loop. +- You can use multiply whenever you need some way of multiplying + numbers such that the multiplications will happen asynchronously, + but it is your responsibility to schedule the multiplications. +- You can use the protocol with other factories to implement other + functions that apply to arbitrary lists of longs. +- You can use the factory with other protocols for support of legacy + protocols. In fact, the factory does not even have to be used as + a protocol factory. Here are easy ways to support the operation + over XML-RPC and PB. + +class Multiply(xmlrpc.XMLRPC): + def __init__(self): self.factory = Multiplication() + def xmlrpc_multiply(self, *numbers): + return self.factory.calc(map(long, numbers)) + +class Multiply(pb.Referencable): + def __init__(self): self.factory = Multiplication() + def remote_multiply(self, *numbers): + return self.factory.calc(map(long, numbers)) + +Note: +Multiplying zero numbers is a perfectly sensible operation, and the +result is 1. In that, this example departs from doc/examples/longex.py, +which errors out when trying to do this. +""" +from __future__ import generators +from twisted.protocols import basic +from twisted.internet import defer, protocol + +def runIterator(reactor, iterator): + try: + iterator.next() + except StopIteration: + pass + else: + reactor.callLater(0, runIterator, reactor, iterator) + +def multiply(numbers): + d = defer.Deferred() + def _(): + acc = 1 + while numbers: + acc *= numbers.pop() + yield None + d.callback(acc) + return d, _() + +class Numbers(basic.LineReceiver): + """Protocol for reading lists of numbers and manipulating them. + + It receives a list of numbers (seperated by whitespace) on a line, and + writes back the answer. The exact algorithm to use depends on the + factory. It should return an str-able Deferred. + """ + def lineReceived(self, line): + try: + numbers = map(long, line.split()) + except ValueError: + self.sendLine('Error.') + return + deferred = self.factory.calc(numbers) + deferred.addCallback(str) + deferred.addCallback(self.sendLine) + +class Multiplication(protocol.ServerFactory): + """Factory for multiplying numbers. + + It provides a function which calculates the multiplication + of a list of numbers. The function destroys its input. + Note that instances of this factory can use other formats + for transmitting the number lists, as long as they set + correct protoocl values. + """ + protocol = Numbers + def calc(self, numbers): + deferred, iterator = multiply(numbers) + from twisted.internet import reactor + runIterator(reactor, iterator) + return deferred + +if __name__ == '__main__': + from twisted.internet import reactor + reactor.listenTCP(1234, Multiplication()) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/mouse.py b/vendor/Twisted-10.0.0/doc/core/examples/mouse.py new file mode 100755 index 000000000000..ee5b3927d219 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/mouse.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Example using MouseMan protocol with the SerialPort transport. +""" + +# TODO set tty modes, etc. +# This works for me: + +# speed 1200 baud; rows 0; columns 0; line = 0; +# intr = ^C; quit = ^\; erase = ^?; kill = ^U; eof = ^D; +# eol = ; eol2 = ; start = ^Q; stop = ^S; susp = ^Z; +# rprnt = ^R; werase = ^W; lnext = ^V; flush = ^O; min = 1; time = 0; +# -parenb -parodd cs7 hupcl -cstopb cread clocal -crtscts ignbrk +# -brkint ignpar -parmrk -inpck -istrip -inlcr -igncr -icrnl -ixon +# -ixoff -iuclc -ixany -imaxbel -opost -olcuc -ocrnl -onlcr -onocr +# -onlret -ofill -ofdel nl0 cr0 tab0 bs0 vt0 ff0 -isig -icanon -iexten +# -echo -echoe -echok -echonl -noflsh -xcase -tostop -echoprt -echoctl +# -echoke + +import sys +from twisted.python import usage, log +from twisted.protocols.mice import mouseman + +if sys.platform == 'win32': + # win32 serial does not work yet! + raise NotImplementedError, "The SerialPort transport does not currently support Win32" + from twisted.internet import win32eventreactor + win32eventreactor.install() + +class Options(usage.Options): + optParameters = [ + ['port', 'p', '/dev/mouse', 'Device for serial mouse'], + ['baudrate', 'b', '1200', 'Baudrate for serial mouse'], + ['outfile', 'o', None, 'Logfile [default: sys.stdout]'], + ] + +class McFooMouse(mouseman.MouseMan): + def down_left(self): + log.msg("LEFT") + + def up_left(self): + log.msg("left") + + def down_middle(self): + log.msg("MIDDLE") + + def up_middle(self): + log.msg("middle") + + def down_right(self): + log.msg("RIGHT") + + def up_right(self): + log.msg("right") + + def move(self, x, y): + log.msg("(%d,%d)" % (x, y)) + +if __name__ == '__main__': + from twisted.internet import reactor + from twisted.internet.serialport import SerialPort + o = Options() + try: + o.parseOptions() + except usage.UsageError, errortext: + print "%s: %s" % (sys.argv[0], errortext) + print "%s: Try --help for usage details." % (sys.argv[0]) + raise SystemExit, 1 + + logFile = sys.stdout + if o.opts['outfile']: + logFile = o.opts['outfile'] + log.startLogging(logFile) + + SerialPort(McFooMouse(), o.opts['port'], reactor, baudrate=int(o.opts['baudrate'])) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pb_exceptions.py b/vendor/Twisted-10.0.0/doc/core/examples/pb_exceptions.py new file mode 100644 index 000000000000..00753a4816a3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pb_exceptions.py @@ -0,0 +1,36 @@ + +from twisted.python import util +from twisted.spread import pb +from twisted.cred import portal, checkers, credentials + +class Avatar(pb.Avatar): + def perspective_exception(self, x): + return x / 0 + +class Realm: + def requestAvatar(self, interface, mind, *interfaces): + if pb.IPerspective in interfaces: + return pb.IPerspective, Avatar(), lambda: None + +def cbLogin(avatar): + avatar.callRemote("exception", 10).addCallback(str).addCallback(util.println) + +def ebLogin(failure): + print failure + +def main(): + c = checkers.InMemoryUsernamePasswordDatabaseDontUse(user="pass") + p = portal.Portal(Realm(), [c]) + server = pb.PBServerFactory(p) + server.unsafeTracebacks = True + client = pb.PBClientFactory() + login = client.login(credentials.UsernamePassword("user", "pass")) + login.addCallback(cbLogin).addErrback(ebLogin).addBoth(lambda: reactor.stop()) + + from twisted.internet import reactor + p = reactor.listenTCP(0, server) + c = reactor.connectTCP('127.0.0.1', p.getHost().port, client) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbbenchclient.py b/vendor/Twisted-10.0.0/doc/core/examples/pbbenchclient.py new file mode 100644 index 000000000000..9cd2b317b902 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbbenchclient.py @@ -0,0 +1,42 @@ + +from twisted.spread import pb +from twisted.internet import defer, reactor +from twisted.cred.credentials import UsernamePassword +import time + +class PBBenchClient: + hostname = 'localhost' + portno = pb.portno + calledThisSecond = 0 + + def callLoop(self, ignored): + d1 = self.persp.callRemote("simple") + d2 = self.persp.callRemote("complexTypes") + defer.DeferredList([d1, d2]).addCallback(self.callLoop) + self.calledThisSecond += 1 + thisSecond = int(time.time()) + if thisSecond != self.lastSecond: + if thisSecond - self.lastSecond > 1: + print "WARNING it took more than one second" + print 'cps:', self.calledThisSecond + self.calledThisSecond = 0 + self.lastSecond = thisSecond + + def _cbPerspective(self, persp): + self.persp = persp + self.lastSecond = int(time.time()) + self.callLoop(None) + + def runTest(self): + factory = pb.PBClientFactory() + reactor.connectTCP(self.hostname, self.portno, factory) + factory.login(UsernamePassword("benchmark", "benchmark")).addCallback(self._cbPerspective) + + +def main(): + PBBenchClient().runTest() + from twisted.internet import reactor + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbbenchserver.py b/vendor/Twisted-10.0.0/doc/core/examples/pbbenchserver.py new file mode 100644 index 000000000000..c38726b805bf --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbbenchserver.py @@ -0,0 +1,54 @@ +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""Server for PB benchmark.""" + +from zope.interface import implements + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred.portal import IRealm + +class PBBenchPerspective(pb.Avatar): + callsPerSec = 0 + def __init__(self): + pass + + def perspective_simple(self): + self.callsPerSec = self.callsPerSec + 1 + return None + + def printCallsPerSec(self): + print '(s) cps:', self.callsPerSec + self.callsPerSec = 0 + reactor.callLater(1, self.printCallsPerSec) + + def perspective_complexTypes(self): + return ['a', 1, 1l, 1.0, [], ()] + + +class SimpleRealm: + implements(IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective in interfaces: + p = PBBenchPerspective() + p.printCallsPerSec() + return pb.IPerspective, p, lambda : None + else: + raise NotImplementedError("no interface") + + +def main(): + from twisted.cred.portal import Portal + from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse + portal = Portal(SimpleRealm()) + checker = InMemoryUsernamePasswordDatabaseDontUse() + checker.addUser("benchmark", "benchmark") + portal.registerChecker(checker) + reactor.listenTCP(8787, pb.PBServerFactory(portal)) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbecho.py b/vendor/Twisted-10.0.0/doc/core/examples/pbecho.py new file mode 100644 index 000000000000..f10428da2225 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbecho.py @@ -0,0 +1,51 @@ +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + +if __name__ == '__main__': + # Avoid using any names defined in the "__main__" module. + from pbecho import main + raise SystemExit(main()) + +from zope.interface import implements + +from twisted.spread import pb +from twisted.cred.portal import IRealm + +class DefinedError(pb.Error): + pass + + +class SimplePerspective(pb.Avatar): + + def perspective_echo(self, text): + print 'echoing',text + return text + + def perspective_error(self): + raise DefinedError("exception!") + + def logout(self): + print self, "logged out" + + +class SimpleRealm: + implements(IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective in interfaces: + avatar = SimplePerspective() + return pb.IPerspective, avatar, avatar.logout + else: + raise NotImplementedError("no interface") + + +def main(): + from twisted.internet import reactor + from twisted.cred.portal import Portal + from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse + portal = Portal(SimpleRealm()) + checker = InMemoryUsernamePasswordDatabaseDontUse() + checker.addUser("guest", "guest") + portal.registerChecker(checker) + reactor.listenTCP(pb.portno, pb.PBServerFactory(portal)) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbechoclient.py b/vendor/Twisted-10.0.0/doc/core/examples/pbechoclient.py new file mode 100644 index 000000000000..5d5eff62488d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbechoclient.py @@ -0,0 +1,32 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.internet import reactor +from twisted.spread import pb +from twisted.cred.credentials import UsernamePassword + +from pbecho import DefinedError + +def success(message): + print "Message received:",message + # reactor.stop() + +def failure(error): + t = error.trap(DefinedError) + print "error received:", t + reactor.stop() + +def connected(perspective): + perspective.callRemote('echo', "hello world").addCallbacks(success, failure) + perspective.callRemote('error').addCallbacks(success, failure) + print "connected." + + +factory = pb.PBClientFactory() +reactor.connectTCP("localhost", pb.portno, factory) +factory.login( + UsernamePassword("guest", "guest")).addCallbacks(connected, failure) + +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2.py b/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2.py new file mode 100644 index 000000000000..8c4590e73752 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2.py @@ -0,0 +1,122 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from __future__ import nested_scopes + +from twisted.internet import gtk2reactor +gtk2reactor.install() + +import gtk +from gtk import glade +from twisted import copyright +from twisted.internet import reactor, defer +from twisted.python import failure, log, util +from twisted.spread import pb +from twisted.cred.credentials import UsernamePassword +from twisted.internet import error as netError + + +class LoginDialog: + def __init__(self, deferred): + self.deferredResult = deferred + + gladefile = util.sibpath(__file__, "pbgtk2login.glade") + self.glade = glade.XML(gladefile) + + self.glade.signal_autoconnect(self) + + self.setWidgetsFromGladefile() + self._loginDialog.show() + + def setWidgetsFromGladefile(self): + widgets = ("hostEntry", "portEntry", "userNameEntry", "passwordEntry", + "statusBar", "loginDialog") + gw = self.glade.get_widget + for widgetName in widgets: + setattr(self, "_" + widgetName, gw(widgetName)) + + self._statusContext = self._statusBar.get_context_id("Login dialog.") + + def on_loginDialog_response(self, widget, response): + handlers = {gtk.RESPONSE_NONE: self.windowClosed, + gtk.RESPONSE_DELETE_EVENT: self.windowClosed, + gtk.RESPONSE_OK: self.doLogin, + gtk.RESPONSE_CANCEL: self.cancelled} + handlers.get(response)() + + def on_loginDialog_close(self, widget, userdata=None): + self.windowClosed() + + def cancelled(self): + if not self.deferredResult.called: + self.deferredResult.errback() + self._loginDialog.destroy() + + def windowClosed(self, reason=None): + if not self.deferredResult.called: + self.deferredResult.errback() + + def doLogin(self): + host = self._hostEntry.get_text() + port = int(self._portEntry.get_text()) + userName = self._userNameEntry.get_text() + password = self._passwordEntry.get_text() + + client_factory = pb.PBClientFactory() + reactor.connectTCP(host, port, client_factory) + creds = UsernamePassword(userName, password) + client_factory.login(creds).addCallbacks(self._cbGotPerspective, self._ebFailedLogin) + + self.statusMsg("Contacting server...") + + def _cbGotPerspective(self, perspective): + self.statusMsg("Connected to server.") + self.deferredResult.callback(perspective) + self._loginDialog.destroy() + + def _ebFailedLogin(self, reason): + if isinstance(reason, failure.Failure): + text = str(reason.value) + else: + text = str(reason) + + self.statusMsg(text) + msg = gtk.MessageDialog(self._loginDialog, + gtk.DIALOG_DESTROY_WITH_PARENT, + gtk.MESSAGE_ERROR, + gtk.BUTTONS_CLOSE, + text) + msg.show_all() + msg.connect("response", lambda *a: msg.destroy()) + + def statusMsg(self, text): + self._statusBar.push(self._statusContext, text) + + +class EchoClient: + def __init__(self, echoer): + self.echoer = echoer + w = gtk.Window(gtk.WINDOW_TOPLEVEL) + vb = gtk.VBox(); b = gtk.Button("Echo:") + self.entry = gtk.Entry(); self.outry = gtk.Entry() + w.add(vb) + map(vb.add, [b, self.entry, self.outry]) + b.connect('clicked', self.clicked) + w.connect('destroy', self.stop) + w.show_all() + + def clicked(self, b): + txt = self.entry.get_text() + self.entry.set_text("") + self.echoer.callRemote('echo',txt).addCallback(self.outry.set_text) + + def stop(self, b): + reactor.stop() + +d = defer.Deferred() +LoginDialog(d) +d.addCallbacks(EchoClient, + lambda _: reactor.stop()) + +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2login.glade b/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2login.glade new file mode 100644 index 000000000000..6b5eb01e6d44 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbgtk2login.glade @@ -0,0 +1,330 @@ + + + + + + + Login + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + True + True + True + + + + + + True + False + 0 + + + + True + GTK_BUTTONBOX_END + + + + True + True + True + gtk-cancel + True + GTK_RELIEF_NORMAL + -6 + + + + + + True + True + True + True + GTK_RELIEF_NORMAL + -5 + + + + True + 0.5 + 0.5 + 0 + 0 + + + + True + False + 2 + + + + True + gtk-ok + 4 + 0.5 + 0.5 + 0 + 0 + + + 0 + False + False + + + + + + True + _Login + True + False + GTK_JUSTIFY_LEFT + False + False + 0.5 + 0.5 + 0 + 0 + + + 0 + False + False + + + + + + + + + + + 0 + False + True + GTK_PACK_END + + + + + + True + False + + + 0 + False + False + GTK_PACK_END + + + + + + True + 3 + 2 + False + 0 + 0 + + + + True + _Host: + True + False + GTK_JUSTIFY_LEFT + False + False + 0.9 + 0.5 + 0 + 0 + hostEntry + + + + + + + 0 + 1 + 0 + 1 + fill + + + + + + + True + False + 0 + + + + True + The name of a host to connect to. + True + True + True + True + 0 + localhost + True + * + True + + + + + + 0 + True + True + + + + + + True + The number of a port to connect on. + True + True + True + 0 + 8787 + True + * + True + 5 + + + 0 + False + True + + + + + 1 + 2 + 0 + 1 + fill + + + + + + True + _Name: + True + False + GTK_JUSTIFY_LEFT + False + False + 0.9 + 0.5 + 0 + 0 + userNameEntry + + + 0 + 1 + 1 + 2 + fill + + + + + + + True + An identity to log in as. + True + True + True + 0 + guest + True + * + True + + + 1 + 2 + 1 + 2 + + + + + + + True + The Identity's log-in password. + True + True + False + 0 + guest + True + * + True + + + 1 + 2 + 2 + 3 + + + + + + + True + _Password: + True + False + GTK_JUSTIFY_LEFT + False + False + 0.9 + 0.5 + 0 + 0 + passwordEntry + + + 0 + 1 + 2 + 3 + fill + + + + + + 0 + False + False + + + + + + + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbinterop.py b/vendor/Twisted-10.0.0/doc/core/examples/pbinterop.py new file mode 100644 index 000000000000..de59632f3bd7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbinterop.py @@ -0,0 +1,71 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""PB interop server.""" + +from twisted.spread import pb, jelly, flavors +from twisted.internet import reactor + + +class Interop(pb.Root): + """Test object for PB interop tests.""" + + def __init__(self): + self.o = pb.Referenceable() + + def remote_int(self): + return 1 + + def remote_string(self): + return "string" + + def remote_unicode(self): + return u"string" + + def remote_float(self): + return 1.5 + + def remote_list(self): + return [1, 2, 3] + + def remote_recursive(self): + l = [] + l.append(l) + return l + + def remote_dict(self): + return {1 : 2} + + def remote_reference(self): + return self.o + + def remote_local(self, obj): + d = obj.callRemote("hello") + d.addCallback(self._local_success) + + def _local_success(self, result): + if result != "hello, world": + raise ValueError, "%r != %r" % (result, "hello, world") + + def remote_receive(self, obj): + expected = [1, 1.5, "hi", u"hi", {1 : 2}] + if obj != expected: + raise ValueError, "%r != %r" % (obj, expected) + + def remote_self(self, obj): + if obj != self: + raise ValueError, "%r != %r" % (obj, self) + + def remote_copy(self, x): + o = flavors.Copyable() + o.x = x + return o + + +if __name__ == '__main__': + reactor.listenTCP(8789, pb.PBServerFactory(Interop())) + reactor.run() + + + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbsimple.py b/vendor/Twisted-10.0.0/doc/core/examples/pbsimple.py new file mode 100644 index 000000000000..7c3d9f442c3f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbsimple.py @@ -0,0 +1,16 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.spread import pb +from twisted.internet import reactor + +class Echoer(pb.Root): + def remote_echo(self, st): + print 'echoing:', st + return st + +if __name__ == '__main__': + reactor.listenTCP(8789, pb.PBServerFactory(Echoer())) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pbsimpleclient.py b/vendor/Twisted-10.0.0/doc/core/examples/pbsimpleclient.py new file mode 100644 index 000000000000..91c1be6a7871 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pbsimpleclient.py @@ -0,0 +1,18 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.python import util + +factory = pb.PBClientFactory() +reactor.connectTCP("localhost", 8789, factory) +d = factory.getRootObject() +d.addCallback(lambda object: object.callRemote("echo", "hello network")) +d.addCallback(lambda echo: 'server echoed: '+echo) +d.addErrback(lambda reason: 'error: '+str(reason.value)) +d.addCallback(util.println) +d.addCallback(lambda _: reactor.stop()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/postfix.py b/vendor/Twisted-10.0.0/doc/core/examples/postfix.py new file mode 100644 index 000000000000..edb77e4a6bdd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/postfix.py @@ -0,0 +1,29 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Test app for PostfixTCPMapServer. + +Call with parameters KEY1=VAL1 KEY2=VAL2 ... +""" + +import sys + +from twisted.internet import reactor +from twisted.protocols import postfix +from twisted.python import log + +log.startLogging(sys.stdout) + +d = {} +for arg in sys.argv[1:]: + try: + k,v = arg.split('=', 1) + except ValueError: + k = arg + v = '' + d[k] = v + +f = postfix.PostfixTCPMapDictServerFactory(d) +port = reactor.listenTCP(4242, f, interface='127.0.0.1') +reactor.run() \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/examples/ptyserv.py b/vendor/Twisted-10.0.0/doc/core/examples/ptyserv.py new file mode 100644 index 000000000000..becbfd230ce3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/ptyserv.py @@ -0,0 +1,32 @@ +from twisted.internet import reactor, protocol + +class FakeTelnet(protocol.Protocol): + commandToRun = ['/bin/sh'] # could have args too + dirToRunIn = '/tmp' + def connectionMade(self): + print 'connection made' + self.propro = ProcessProtocol(self) + reactor.spawnProcess(self.propro, self.commandToRun[0], self.commandToRun, {}, + self.dirToRunIn, usePTY=1) + def dataReceived(self, data): + self.propro.transport.write(data) + def conectionLost(self): + print 'connection lost' + self.propro.tranport.loseConnection() + +class ProcessProtocol(protocol.ProcessProtocol): + + def __init__(self, pr): + self.pr = pr + + def outReceived(self, data): + self.pr.transport.write(data) + + def processEnded(self, reason): + print 'protocol conection lost' + self.pr.transport.loseConnection() + +f = protocol.Factory() +f.protocol = FakeTelnet +reactor.listenTCP(5823, f) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pyui_bg.png b/vendor/Twisted-10.0.0/doc/core/examples/pyui_bg.png new file mode 100644 index 0000000000000000000000000000000000000000..08d45ec0a68ee61b66203ed38e0ee69b358f50b9 GIT binary patch literal 29913 zcmcG0cUTkI7pRKDDs2@35rMUVf`C#)i3#gU5oroa5d*78Z$V0c5F(;t18jhFm1YRN zgqT18QIIYWY9{{0~aLu41~9pQgkVP?)@=Wcmk5AzKU`s=c9py#H`;hul#|K;F` z`AhHkalOAz=ouTFGCpzYFYV(-$B$Pnws^yA5@D7$W)l6|#pJi?{@rye3kGe+oOcP` zv`KQ;@89NrR_q!K-0FSS=Io|TmC2H8*SEpf`!8BwHruo*ebc6m^sbFfo4huqcWr_j zUhrj?{>FxW*M@%j#-hX|e$%F~t(K-|FGr4XS@So5&q)#f1&4SMgCciK+8ICgpJ%s3 zZvJ?+u?IWmsIJuH0302SjXv=I_s^csn`Hz)c<=xe@EkctW~QdlDprFpJaFJ`r-+uj zTDo&*-=}q?tw`|T!Q4L^IR&xtrltlHN5Sv5rkBe$GyAe1ymLNBZb9t4L^Kbyz#e#O zoXC^`O--+4RH%$UeDKJ?8$p%Znc|~>4bwQF`6oqgr(wX}-|Y(%^<1{C=}Qzw;{a!P z+iyuna4>*TZbi<6#c_1Tl3+OxX#u|l6makVM_r4c{6=;J_0hM0q-z>GYdMA7hEdnq z3lHD9LLtGA$BmsXn6j!~Hyn-9LTl{|?-YlUzG{aNO&K~`Oy;w?F9o|UwJc(SJ^v#_ zR)1`ej+R7g?lkOHfE2!#hLxH!3_SqEtN#d@iy^vp zYr@o&7uwWN6>8;Xu+y+h45EI~j_NaIoWK~-WI0xU9avSSEDtaId8wrdKe(X&c1Sx} zSUhZF+7Q~@@ch!gF^tPhM)r+Rf=vD%anms14u{eIN@@agiZKQFpHq{iLgr1 z)>IVwq2bPdEZWbM!O?Wr<>H4vDUP51lWX`xU#IpzCA44af>HU)^qwcf)~1rff7YLG z`=)4RT60BBz<3>x*m<~EBG#{qrTg@OPg(vtF(iV1=D>m3ADiDfjxLWqcyPyFdE^wo zZ&zOjaob|nYQRIENya9!EuLap6XdsXwFq_Xz*VDKLbrpO?oREDy^SETH|>FfO`^My zAz^LmUq~*;bHvo&8agnCP59g8`koW#UH{_mlt0j#yW(bO0yF+~4WlqcbOEuVb4%u;8SBZ8)d`?X~;@KHP z?BfDdkaz#toFg`VTt_SXINP@7yx(G~Rm}fd3qw}8|Ft*@*3sjA6tOeUj?{)}shGOiv73N9t2^@yF{w!3AlB0vl~5eD}B4@-@rN zUA3Cs^51Nvl>)zpeAx^1iT){=dGw<&;>pP$r{-?L46(K1kE;s3o}3Il<%`jEAijTF zLzK_sF4|qai^%P?b_%FC;_dgrM^{)a4r$8o)47m#_4ru#dc_sWaxd8Jc&q-1i(j*k zk+20ur|i?ckorHgC#CU@@Uq$LWBUT}CxU8|K#Z<^-^$y|#Lao4MLSn7q1MFZY~teW zOBsylzUu;svX9w~H@s@7%Vxgz(`Id>xs=_I6UL-`_3ou4Mg*6?RuU^=S!Hrd8(hN` zdxyU|(eZSwtUHHL6nk(x3_bzI7*xCG?$6swsjw=Mh0fLKKV%ew%WgJ&Vll~*T3*7%kPD@4dRI}iIXtYmIEE`T4%_y>Fu;= z1qQ}&xCIw1GqPf{JYH)FB1n> zfn`e$q|vbkyck)(g602n=pD1S|wI8oY*?2H#`<b%7hit1B|!0J}*9#vuMQ$iu#U{1?wUupnS4x zuj;-0y(z+~@^`dv?aZn5N$0LGr(laP&dQ?<@`&&c#aNA=yTe4WFukl;Rv#B5pA-Mxv}RE)!h#1+}vWZzFqyJeFUcQk}aS( zh(AZ`Trn%osrRMdj`s43 zOW*VHhOEFO)K=^{)*xvrGrI7y!}(Y*E9|M{TMpC>5G%TGOs-WN{Z|9a)i9?F0W zxV&w$XBHGAYFs#-@b&U%sjXk=so+U(wR?zMT5h50k)=b=Eq2bTofFzqnfvIa+;R&G zk3w4X^%(nJhdb8~Ega}UbqVf;)twftQObY3r$y)3KRKALfC1xb;)OTkep})tPRIql z+JQJ?!qMKppIdI8RnZk1 zQ<+j{kAY56yd+w5s-DdurQMjZz079(+jqHl>$k@Q1A}=l{SLJM1yw8b(I0^)9bPwy zcJLD6iLxg@%NIuF*7OJr!|KxSuhvHU&Xlr5>b%V)I$C7?+I$^-$v?a5a- zM=zXu9)_3=O&trmC#*}XSG;*D^TeN%v94JDs&jtM>->Y3b<0SvQm%SEfqB%+PFzn( z%z>$a$(OX$P>4sJO6rjmMt}BAgT`1(QW-xSm6g9YOd9-WJUFW{ncGaUTU!6I zt6V5_dTH~p=9SP|?Z>I}hI5_sv7%qu0r2vSWLYEKs&*LQwDQ`=4y zpB|npn-cw4GSQns{a%o!RgbK# z5rgsjO65fOAnL|<(LSvIxRLum-6h9sv0AlJoBWaj9i~)Uglm;WwJbSk_yk}RMc526 zTP~!H<>=S8w$`?ZJ~ZRlL1EXF!i=U>nA(LYIyL(Keu;?#Q((C;(V2?fRHpW@#8{u`!^i%ZOf|9Mqhvc9nm#ztMmGlI>zUjk3+AK>ODuZj zEp6<2GNTH&fWj}12HL%)vHb946`+sRkk(5jsYF;x3wKeJb6rAM5Gp7ET5zxm#b?lS z$VE*o2WZqm_g`8g=!8*Q<0CeP31oXYl69&3a%vO0n_2{#*arUif<%Qtr|Py+T?M@+ zbL&S92@S$CxBU{(_9QS-__atvhK$R;rU7iMNU$371?Ayd>@ss$w0qmh(69Fbu(1(# zcZB`*q|J-K7V>N}>n!#l5Zb@)s5lOD07edyBgPnbr>NK{xd(1!<*PfUjWrI%;n3q7 zS?hY z$~y7e!FFFlQa@|+uMp;>$lFg!d1xP0$H>QhUw3+z-cpu;%K}^Jc9Sx^jXSh+Y#A^; zB$>f_3R%L`+O_w%S2w}b@01tTlYUeGe4*F)_#>+aih?&O_ei6loj~S1{ZH_4RO7!0 z>V_oPT6#wqsyY!|-jqN%7Bo{~gGn}_+@iWysQ}F^N~0W+JZw_SaSGp96M9k}_h|2d zX!Y)SDy#|n(2EnVH1|z>!j2cSWhmN<|R}C1~pP`v9e59wXnJXOCa~Lkp36=5ETuDsIUkH3YC{g zQ*KwG!y(DT_yMdPY*M8OV;@gyKNf~wpC})V6~po^MV6BF`ipbxT859%(Jn4falLzW zgBZALoUnT{Ji-g5H(m_2HXPw;O_%i}rLbWeimbq^D$sM7Ok@ml4z6Afg{MZ_@QKP6 zXf~S8WTQ%GtC6T-AR=tz+0|C@IAJft&Akb~TB18uhRP`4jv<&s-x()UWdTBw0zwPq zgpQv77k3PtTTdNpU5yh?Fnok#v752{xJ8EA@BRRu1eieFuQ*jk=L&wg&rTs*4up)CzHTnVqD2SirqGlvzwep z(=mLSLV7^EydN`M*sQ`b+5Jmq@NdU5RnYH26FQ?cVkel!U0XA=-k&7mC13qay&}Bt zC;qSro&E=Om8!f^bmOZ(?L@hHRZuO)&g0og1O{HXZwd|1u-3v?bazsOmpOLty!zj5 z2F=2U*=6_f|2WtY%IkA!$X^52!cSt=>vtl=qTcQS|Vm&<0W&o1!8cI_3gLARFC{98xuWKO$Qy`ZONc_X~EL{w+fL_>7mch>pZW?9c zSgre7P<71_KI2_ygSVZo18O@V4dacti~cTkuH-!7C1Pw>!(;9@48bo+xOfvCBVYR> zTnwAs#A++MLBS3qgAOpP$smb47kg11Fe59ruzX+R2prUDSIEuE=HCUk^*E?+*JN20 z%c9JWvb-1+RqMA!2tSM8n$cdkQVVQd$V_H&p(4q=B3O40<-8ZTM;3nh=0ih(DjUZJ zy6&de=j?4RYp8D$I=f(o$K1@ZZ7$$y+Qzyx;QI3p7=O4K9Y~Wvf5H$JE<^qG33hif znsXXn3;#WeaUy|tojd`xv(ORp_oEljjZ`XzsU6!4`t@qft0v&A#-|hh*1q22=ElaC zxI^Eb;aPsGWk{y+v`f-@oXtL*_J!gWe zBaI}1whs}*X5|lsyk?(A!ud_qhKj47)Z>KbWk1E!G8R5iwL*gPX;Rq8y6r59M{Q^u zLyBFZxx;uk#blahHix1#`bKE_*6F;L2T60_+LWIX;-k@(HnPzt)M0SwO4=t;%Y&?W zRfB&n4a{x)@KavE77MHD3C_+Uy~Dho@U&L^l*Bk`MW$giNUltt(7ri)bh``@A0%Tufx1nvVURtHRzwF zPTjMK9xGo&kq%-3PBbZyd~ll3d!xTG)G+DWIjF@(KImd;+4${PO7engTmDjjc*Ew} zME*G_9y!Rhd#^LT*@LOiMz!IxQX&SF_~ML{L$WMLaG89@kQOG}gdDPZu{kjsZ`w+~=Z+q-B?Z*2=c76((3|r{p*x)Pm0Crg|Y!rzOq>)3gbA8v(Qtl*X zcy$uRuiG@yfY-|Rsz>I0!Be}4z5y4tSkpJ#1YFx- zS37`UGrpvmKHes8&m4J;qO`TIOM%Sn@ss|{YN@$R;2bJ5qjj~m8TlL*1}>Jl4CHwK zErFSBNnc?Yr-|f@W1739pHWs=fVj7?BZTk`ak%gOMJW=QVDja3z_@1%& z%`wV2m0GR_g8D^{2EJ`Z(!dfPRhu-p@O@F$4?zjW#tby!GqANKO7;uRbX$srRMyqL ze3c0UOe33?F(gF{FL7J)hDK&#P3C74M`^z1WRvH%Gg;snx?TQIWegkj>}nst1!`s} zCN@IUj`}f2yx%^O1Sm&bu^t!LRDTdXi`4OsQu~QcB%utGKaolhlv%&!!t}|UDfjDR zoe9|5(pAP8$MQ0eol{bp>6IWPP)$@h>4-Y0Ut$_O^LsaryGN&h2y!N55B%np@46o#{~cqd7cjm^zT&b;a)+F184?s&uDkj6ZOT5mu; z>=m(NUyXI|<+2(KL!ih=0@?)|*|>+IR^V8Do(9PR+%P9eE$hUagJccs1{I4q990m^ z&Y(>6XY)#e^1Z5j*!0}J-I^-E2PQe~BfJDUAY7_$C4(UpQunceKAUTD*rHyR`>=vr zpy&Gtaq9+UA@W0*Q>IH{UMA%oHbt$5S|oO#YVE6g#`Vby_^EY-5z8V- zs7YMuLch0wUdehf{&nR1vMuuyx|UH|gb9qNg;GGBeY^{?8uz}0YbPKKcLCW&v!4db z3vru3KOO(@2qULfQ3mK$mosiJDG9c$5Uj40dRE%0Y z*k-HpXb2JUNp$NSy+O3LR{>)#I&-~fjN&?17Ex(tVYCORL$qakV{;j6;-mEApr5R3 zI2^}m%q`BTd?y~38|P*NGi^y2p;!E0Tr~pR4{*}idHATbLaX1s9^fP{z3k#G$HzVJGQ)!LQuHHnn)oCROudoJH$s8sfWXCcN zvt%QkHXS91LH1VMZ>z~2lRB?fC5yq(V$2?Nz41Ynw zsZTJQTwml~BczgC0{ZUv=koL^y*FoY`><1KiZ{!v^|>3_eHLRcboWxc{{XtRifF6+ zZBb{ixT*CB)yBet*5rH^1KoWixeHT%B6PnCvK}FZrCNoQ>$dasZj zjXVAN+w_TV5EK&Z7?z!(`bU}Ud3L0!>SeM(3~(q{rud6tbB{$O407;%z~_bY3vmQxm7TMpIRd$aeqvsOJV}Vy1u>b392Ssoo$QAwiGS7#naAii=0qNi|m~X z4u&=s(r+UIBQ*JKIB8H^J~DDbZO>-kpLL}NeV@?abvcf9NDe?2$k!jruEnZvQ;-4j zl%K6mcZ*|5&rRxiJx}f%8s92w`hadhOW*uf7^wE4;lsuS>x+T}=$BJ|UXsG;jSov~ zRbVY%_y>5H9+kTv-oU2~FMT#|BW1--HX%=86w`z+lyX>d;Z+^;X)=I^^6J{!081P@ zaLa_ovrW2Nmx5q@`)So?0ltwQl>zDBhFB{v((~qj(hBcL2ZVwJxD75N1Q`5Bp=UcJ z0b9(EX51OA{mgl+OTk{M)B7}V7pSMSVC03M(9-ro4bqQYtlJhe2j&_s(87}KT-;CL zyMHs<4PU@AkA|n}6I!sE+&2Xz$OPqb| z_p%(v{k3mcR7>BqAO$SH<_hmeyif^jjJ+~+TI*s*0+wH|`AK?l2zlYLa6k&=Sdu2f zna!+*C%N?`iJ|2&`#%#|vBL#g9#}+qiWh zgdSsWGinjbNqmaYsH*-Q-P+0(7a@wiks^V<_9 zqON@_-cfvY_s%7+JHlJ&JP{pyKU;E9_gpeh?8VbBc@Lyc^jPC;;Lwue0r>Wu6a!3B zP4gctp0GWS+YCg78QJpT*xiuQ^t$o6m(h=cy#j@U<@30dl@P+%cQjHtoMtTygc;9Z z8gt%ro#=(jhN(@WhY8rcGPq{qX!Rgc+G=My6D2ScE3Xg-?c8G&Wy~gnMr1&j=|0g7 z8FFsy;1|hb@j|&?u+E`h26S7Ri#?4Sv!nYhej98S&A;GHzPN~*4@D4iX-=tXT_aX$ zXC#%zFF`WRDkDSxJEUM?(lAe_Zov(zBU9G;%QP z+GxRaA6~tHTTUn}naYxHUzG4_YOuQpFF3#J#mVbF5q#!NH8|tf(1=*@k_k(#%W2zB5C^3- z>F`)m4}*$ss({!Q?5guh!8xcpsEK}0m{PY1+?kG|#p6UBx+=XOjdhs?Hmd+0k76U7 zWKXFK|2zK$kC0>9r~=K%Ld5}ZD0E^_&bH0a**J$)w;UPJNakx;9CwyeD$ z85y~eB>_53NKz8M#R(V4Ja$gKx)l`JBrIyecgJ8RCbRX1J?B+rV>b3s%F_q(gbNBh zj#5#}*fEyaxQRIEKa(JIWxExbTP1*Mp-rM6xXlpdOsw0C!-(74AS(%8BP|#~NjtU) zt9hxe0QY@uT}uKeZ&bp_N&)j0qEs;C%1m;G_!Z%V7F+>}+>fR&~wN zd#Q%Ouz5CXvRTwa^ELK9EryLIE%Yz_3`!I>U_85E z#6QOJFGX$z{Ut!jx)Sc=wSp!uvhp;&KmX`-Xu2n^#2-QF9fei4uP&4XoB}#*AS;U;{Iq&A)iLgsZ5)b89Bhrr7SJ}_Z5)Bsj9+QOH>k4L z8E)8?WMJh>gCCoI?bt~2)#=#2h+YdUMphPZdXT_RG+vMaw}Mu8xtaV=qAG5ZiDbJS zwM_!#wg|GRoY)q@tK2XrwKdH+oY1+rA(|808G+&LrWhp`jFo^O8DWYqwIT0#gS7{X6%_!pIyJ8A%B=$Bf1;kC0RO2J!aI6ujJk2R^3+@QW zwA0|Y%fCEhrXnEcBjj9Io4WM+;{X^u`?r z&*YyH2jxAKy0q@m^0)~Ui8EBv>}vatFWOzRCFPix&T}Ig*aTL^OoEu6(IAIySBrz)TX0wg37JoHm=lpOL-IiV+5wW zzf+Qu_UQ0;Nx+PVF6~O;4_tTBd^{%Rr1GBl|A6;Fxo2Eijqo!Kd zyPFcw-Z)s!?b}W0I7D@z3T61wr0lytz`aAw4c0K-eUrC`Lvo+OLbie_H-4Sd{4wxU zbELW9{D|9;`zD3ED4nWf%?%gW|6N&ZW`*Z|zKu(1YOs|eT-=7KtAUjk2g7gtitk9G z5f|CxCo#GM@sqNehTu?DJdRn>a6u~8IVuDT_rQeb^9DE_eQ1!CAkcz$<$Dzk=bzxO zwPUb)q9iWcZ9k@br-PcaI*{$zXpW!^{{d)y{9=*z50np!de(%*OEtXSO7S*T0j%zE z-wS^##YJCpv|MYp*6C<&@L}h?ZxaI!A2o^S6}*xGuZTf4Cl<#{sk23_PZ1dGAIul6 zC65Qr&~#S;8Bpgsu60j%)bnJofI!{lID{OJA;ui|l)r#G66QV>VLpBs)2nbU=S{-j z&`()#^SN>qi7GwS19rb8Ef zwTZS3JiSGb72b+QE_XGAvUB#Uj#Vr0=Trk}(Npd#u680ZXzPh7-^Xyw><=tIKREf! z3!VcT`@}~>{Y>y#iZXLuyLK@-o8kRCqtb?P`iwnSfyUFd#jqh=*oW7QK8^{3zXf>z zLT}I4oMzQtZG~#a1*W{K21hXo}qcs{;LI@bUd;}#y94y|$x$vI*AfD?^ z*kzAgP%~HklD7qG)`hKw1#pzsUp1k3PNTKURhBAL;9ZHWsy2Wwzd-R84G`I?`zS&v zqz5NYxJ9wtnLINDeU)2+@zllQZx{08aD^YOJ z_4YmA-ws9w5n{!$t8x^aAgcY>efv*Yj)ypvFjrS>BpV!A-Vbu}ikcsdtgmJ5!Ny)R z|C1u*$2_2|e)Tm-kgg>7nS4Mu{84_U&T0#`E=M-RN$rfZcm00V3N<9zyGey9A4hZQ zO`wo^vs+hxhJ4^khddK)#q!nvr5(XwIZ6QH=6zZkM_v_JX+q0Ugg>P~?GFMW^7jR< zrp{X`vX(PlHh!<1@Q}7@owgm|GTjZt>+RhyQ!CYq+aemcY#m9U8z&AaD2;-`t{gRN zK-xBNDDg|r-PvBwd-PG<7Us%Fv?4{>=qJ$gps06IU+FM+1onP{nEI5u&q2D8LEbZF zaWvmv_V__s?ZwG5w|iK`VI5hN&jv zXnL4B&d*HPzxb<~m$&Zg39Psk8(`{o0Eo-mp~aeIwvG&-ZJhAmB9nSzslSPJ0KjWXQZ9bp1k;WhJ66NkkhS|aQar-#Dp zT{`v(JJtxpdz#5z+Nj8h-oFlqT*M$mVv7v85}kEZ|6m`5gq=cn(N1C-`?6q{tE8VY z9F6fFmyZ+j)dS&R=C;r4aMHE;C9!TQxQliQ(+HIOCi`OyjWOA)$9JrM9Hls8JkXt) zsscF3FEiF1^B0L2@1c}9;kPo_Uw>HqK=}CyQE?jGMb*PJ4jmqCMqW8v)u{6rQC%ht zly3w1kKZEqVEKM)4_y>t9~XUC1<>c8YL5*{t;=?C+6wB&;SxRsCVxaX93Xs%h+?D0 z@ng-%bCng1@(HwPi}kzQ)h7HYN!T|!J4w!*bZZqzkTw1Qdi5{i#jrCd@^?Frb%mel z)(SxS(rz=msODpV5?HkHm^MZ&Lx*3y_DHFBCneZh)0m=hUPHKm2ekUuJDzB42kL&j zRRKKb|J`_IwD>U4yY)((ux-s*0_%B$Sw8LdL{N$=K?yy@AlVh-`a-AvujdT1`;ZEA z{+2%z6uzWa;;iY1PAmqmjOA>_2H2%U&lKEa_84Uz@_dEKVIHL#2Ff~!7Qhq20c3!K zbhaAE2_MT^Tp3+oKgh2!)a?9fp#ge7W}Sw@*p=+(|zbV`8Lh8U5z4uz!4vB-pQ;LB!K{mp5DR_#>GY9&8$3y{EhB?*ly3Pae<_UG&tiqez%(87f&NJ1mre1 z9Ot$;i0~SGhT&1Z=Dxy{LOpPe`CN`)#oB{yKP&|ZCI_}*hi`#*#ys4C6{pEI>Ix&r z_w~lj2ec`!EQ?<`Ac}n&{UhV$Ejh-%c-*7ZR%9O6!wCO93_jxT%X}?R1oVe?LYKzn z`h#E>Q{N9rh@w$(Lvo*rZc?Gt ztka)j-w_kRfN+NX(|0{dJcGwLhER`iabp(+q1a&H=hLF>tj3r6>U(r>U{c(zFu z)=qgO`!P$Uwq_}DF*#bizow9@23LMvbd(nHG%$AVT|eGE95kSw<0xG;GPk_zSM_5A zlv)+uW}U`h{ca>P^k2gzsU-NBO4))6$#YrILE_7^m@E#4KQR-X<_i z(dhzG!o>??HZq~7QZm_^Gt|&32V^fZ57bsY*C|hpmZ|DuKd)-c34Gk^FEaodWPbs( z1`ZIP$%o_LGeVzwiYwNN27`+4JHRLS@u-038P1pL$W}-Rr5RZuaKf>?uCrr3y5fvXc?#_BK|OxC47z5uvmSrnk=e2=uFTh~y+da&^d`i|#9J#| zC?)AsYdiGDVHO{?uZNDS(DPSf0t@lZYDp5&@gVo*m}7VZt}sW8#klf9G(3Wcv7BBS zvtG08!o>XR@g=d=zO%R&)5i$?OPP9yys;NAEcY=+6O=b2naA$X&N@LCxW#9t3*Zj? zW-g4W-k}XTmEBuyGa1W|)%`@HIQw1YE0t$Q%a|T?dM+@T4&IOtlvpgiVYkQ*8d>xs z+Soi_87I#Etsn9!#ON!_`@{1!-vVM2tK|KleP)LG@+VmEO?L6vx}S-(j#%of#aP=k zFSF9$tB&!uu>JZnBlJ(Ce?cJj^$6jEW|tCSdxfuDXMwt*WrXSOG2>+yF{q{7=O?Ne zKi`vPmFQb`?BTnbh@t8sNk|wvsdv-GBzk4Qn@(77e8YNI&9(Q)(DXQ9&&!5Nu8>*l zMg900DPS0kY+z)}bYwgL9?zcg9t<>UWBK;<_YSL0vV;03V~K{{FU5AU$J+!5v8j{3 z^RxcF7>to`a0?@~u(SCOOu4U+v~)MJ+vVQM)WJP%C!H3Rm?pE=P49hMPOly#GuK7+ zvGI=A=}oN4as>IoVvDjr4KtY9>)qqW3=;OLuOwbgS-FEPB9pDTWdsM;1uxT-Z`Eor zt1C((vIj;^O|ppPxWfBJe;LYrO6`s9Se-(hzE<|a#gqQarBsxk=f*O%UjquDkws@i}H^;gAsckED}5{^@69`jc0Xk3xp-ddKGF;;Gnp zZ0!!SwBFbrhS7!>)OyV8lw`LeCDZAt07VG~aQri-J?X20iN%gAGg)uq%9O5@ewhxa z81D1}Ly3N$)*IQwz-~lTSF7*owO`%_w#v7P9>IB)BqQCGA1*!4V$7l(mf3WA44J-O zaQ%CbvnMbc<>X({7#XGb#khPIjPItvfA}( zb;k9Z;n2x%Ii=dI&8&(al;qO1vD?etR@B?J`h|4JE6S$UvfWz}$R;HVKgSZc?{R6! zlAJT};NXH&hc9Nnj3GC%-iuRmOJ7BANI2=v{wH)udB%tVS)p9n;5AvmT_e^vBee;? ze4rWWb@4^-(8+0bkfR)c9kOm%(I=&aZbr_KUU0pri2CZq>TO_;;Rn$m&;4!KcEI)J zkl-st4ZleYROf$axXd2UE0N3o-6s;0R(lNOCUWnIFE+pc^`aI@1C-XBS%GC%QXN_rJH091$zNPm6wrtkY$QuS%t@ z(rGV)--RYCkR>kf!1Bcr!R}P)u=LU#%ETAetf(gE*xnt0r}iV-DjAXLTj*-1Abmv~ z%byZ2aj%oVCIeY-oOGUTep~s7T{b@UfbU-y-#MzY;%$%RoN+dA+y)FPE7!!9MtrYK zTP@SIAAYatwrFB*zX{9F*;?veu5vfR7cW7=Oc;g6H2T)iGN#NS3gI8~S*4qa9kHxNSH|wm8j|lX)b}96-ksWl4qeX8s z@FzD7U8j-FT8}vl$SyvkN@ASK|NeESv?^dkC#W*2^NSQXnalVH9~*9e9=z&5{2C4T ziKV@M*e3YZI~gD1luTgQmiizF1LYLHy6wcG1MUc~l0s^m@Q$kx{hP{=AYYVB%BnxX zG&3=kdq9g8AR{CO>}Jm6eY9#hi{I>i9(;bCLeiM<46hLRBr!C|&YeAO>sP1Ca&Se1 z&4Rqs_PHZ|+6df=oM^uA4`9%NS7kI3t7pN{cP;bu)+L}}73v4ZE6%koi-CP7(xMg~ z!Uxvuq5Fr8Gt~lQlipijGY1C6GT-Wt4Z9r(u-g@yPPt1V%^I4`l64zQ**OPi^g`e; zJwC$2IY%}Qm-MQ){&(<@7=9g#O5mQp@wdZIe93EUb?|T%Ik&invnCBVe7x)B^E3iu zS92(!CzhRaz}(<$3wa^LJ@9M@{-l?NO_%ST=Y2hD|C|P70!(%=2R>$b9oe~H;$ebK z6JyG@2n_0(#}EGl>Q@5u`53N?9WLozd>$gno!`p!RQxD%+03jS^onSw z-ctzQ42U+ce^If*g4Lot7q=j z^b+~u-V#i}d?*gG8uC1)pMlIfSGzN4t$0LD)nt0qbYlKp&6vV8- zW<_vA;Vk7R%8S7Fn6~9|UlWX-{@!j6u)Y2=<(zQ1Dn?%(Kze*{r*kMIUM_Y_?ao%{ zwhprYXKoA?eE?AU-s`*ZFDLod6Z6@M?5vJ>#Io@8y!(-5lGKa7)7LcW`q@2y5;!<< zICW*HI@Vr!jH3ThO}MTC(6EVuTIi1Yg*%9IYs7u9)*eoQ?$NqFZ*^ZR6HJ~bzh9-l zv$_GFX|^qXWvX+V>-9gncl>~pWNu4eY{$7%s%;2|wc2Cwl-LdXxZ|~a%FtsTVct*b zI$$|NB{GMepCj0GJAuAgz9#yBCMS>v-kv>HgC(+p9?6c#+ z3zl%eG3--B#GNue!D74DHN45sc2!2>R&XW`@#%1L_GX=(6jzDG-NPH%yH8GWl+U`0 zVVSUm&pjCEOZHKn{sb}mPUos*t-IXaqenp*s(!247Kny--XBo(r6;6rHd{$&7iD6x zMf|$)y^}X+hCHdzlqQjznjk?UVn@(L?70c089n@&!-gr}Rw+ofZL{=r&?iL8H4RMT zKmsfMbvFL5yryiyYM8a3wm<(mc2oA&PbjMXaLN{F0Y9b^pmFxi@2lw7!f!Jer(6U6 zu|KghR7ZAf!%<*R!UiqblKo!C@gG3HAuXO>B2)Gv|1Th&>*x6a?;rV+H(RkGhV8iY z2lynS;e#P7rc`97zvUI3u@Q&xY*)dm({46rp>%e01?M*;z?T}jfGpV_-yY^xQs5L| z_ffdSIS%D_m_aSsm)%cfJ8gju)#nDtgxXP?vQdh2bDpEszfeAiP`f)_Atr7X{BQkH zXCIBbl>4qh6uG7JZ4}pWfv%)pHIPe`cHEW%xBB3D-Q7`2j0d zWy3ka?dYdDA0shE3^R5a-F$D))t*1cuE2&@(6a-3E%;gGD|P*6j#x|)Fv?EiBB?Sn zGrGxb$mcNRs|yUVp??1MlCI7>8V|BDk0Q_q}U)n-5Jb=TdE&0!KR)Ty$>6w+_(#ciRB(^k$)O@VI^WQS}J z&W;39yU0y~IxOPncuRm&u4LYJDbUj?$4fInm;m*}BeW+rq<^?Y&iYmr zdoSLGdzrMw5VDHsTlC5}`{NOP!l3Cg`lhNf&QXcUGP?EKzM!eHSm8~#K@V~4+hp_* zTGaXwHJeOZ-SMzh4C^OHkrPP+M4KKsz>@5vgDKKcBFrUmFVkFrL0+1Vt`jh*XcWyA zBTi_UZ37vHEP%nZlQHvbTLR47Po>z6_Q>e&rZ_xawg#FMD;^j)LhwnV$T#_lHLiSqeb+|~o zal)iS?$V1aOs~{<<4ovGYm*5||B8+w6vizh>ZmRR)S0YtDY)N8SP=bY$I?a+~EY zz3;+oH2P888!v}xy!{EkdXnvLhCR@cGnxQuO~!W7R&PCgF!V_@Gnf-AQ%?6faJAkn zq|qP#4#6dG=%a|vovkO8XFN#lk*c@Y57-32pFOyc)mHR{<3C16g))1i>>^Db_&ghG zVI>JJjSoV%A1HL$He|h8=||1bR#pG(yQ4%++ok`1I{WfKD7*0ON@%gPA%s@F_O~Qy zn(_7~icq$(jj4p}Bg>R&MpCJSHiS@FhwR&!F*Br+Y>CM-E3%tsL}p?dzSH~re*b)b ze*NV+_gS9j%;`DLx$o<`?il7$v+7>!7YA+*{vEzzF}`rud{u$<;7qm-zU}XXD#^rM zb92E|8BNim=Hd(wvJxk6jv;UuD7@DFtIbX9ya_+d!8hk3Kfq@fgtjdD&Drdc>@7Y2 zntVV7!MIKb_J+a36zhODkVdq&_vN&La$3uY`UnZLgZ zQLLVe9|47KzJD&qL=*f!Tw}>l*EuFaUOUJGh+K*hABmYLUy~{VyeNDYOaJF9 zNnEnk>gl$hVKDNDgPA=T-;yM;=-HREy8n+=$osv$pwOX4Ec5q+Sx z%`)*T;T>g}wMMZRbA(=CyU1$h_+6kEjbH9AcSa`kqqNw%aqOi~^vC27M zNHEjY)uDVB=P+a*IHLzkhRz1si^FBu0>vYRr?88lKJqY^BwuTICPGoZqQ+Af+hsf?9%61oJ1-dAw{X2}&M<|k_*h)I zFM$@8r#>@orXKJCxUCfLSp|HFmE?5*@qmbrB+3xM?^xe``%w-iicxS%?Ee4+m8Nvz zB0B7SI-Dc*#`2~YcJZpKiRs!eh6Xybp^lPr0EozmeC22OVU^(Iml`I+Du8_6Dv?vr z8uD)UhmmGB-3+3iFb{Y)5sAzxX1bn9f7Nmg96_p%)Y&UMai7~vR?)g#5{|^xyXb_4 z)~eKg9Yh#g1~emE*iavXQ|c_u)DxU8eO_b#=&Adh4Xvd6TeR-s^Cw%CDyVZ_J8bmC z@Bb#QrGr;nP;mU-c%_K1ZCfq0TuZ1$sVrenn1dL<_1D9eplArFvCNDnu3lSL*~+-A z)!}k>d1TvAtCe-9XfK(WO&L1A?DRb>(p7c=^^mN7Hb^T=*Myq}V1@(K^bNlRM?WZG zUFGYVucxdFwbC3aoUrEq+Ao+sL^0XU%$?w^1=NHv*8IhjjxVD&K z#FfQ%#*`CJpEmttitN@AE6JbmrwjLjqfR?AeC^AlIxeD`I`rJ`CgC7*Ef#}re7Yd0 zIa5hGSRlE*VQZZ|?feCHbJio>V-hdijuMVy*7bdQTyU*gP;d2w zluUj3HA;(}98)^j{_bw<`_xT&)lEvCo!@a$Ad4nq)ZM-fhGDA+u`GIQ`O3wsNx$ep}% z3m139&*91t8~N83(eXNu=@7FTm59~xu%~;TZG;xLYuF@S!g607W3_r&CN5_!h1W}l z6vvNA3WxAVnyJxM;b+Y!4yo@bz!?k~CFSJKco==L$6^G}<}Owx-}oZq^A5#tFc8>< z2Y#SLZ|E*u&(b>x&-m`BS$pyjssEW|{-+s}@FV%0a}PMs<-uRFPHEQubBKh@gO|#c zny0T7DHH7vE-%3u)hYN~ThqEz1 z6Rl3+&oiBGcH=^0R+je~9{q8rm7lyzgemiwySO-+9V)belhDzfkL&s2>K{3Q54T&x zcwyr0uW(}%QLV$2v_Vs6M|pZn&@YYVl(fWXZa@7B%gZe1!pEf~^d2kx6DuF*J*O|JPQaq$Zr(5#U!JaXu;X^VjX)-kLZmAQa-*Zle3ygg z)LBsFdfd%0bIIjtBB;WC^k0?6@34khZgQgzDL2Ywe-ItB%Dk$b1oQ6HFzQ=h(rGi zxBEO*eIqNl(LQD^i3`v5%q=9~w)P)G{Z|Dp$VLAHx2syJ#=YLo6cNrIWl;-SM9-@_ zvX@I6(7gPrj=Gyq^lxab@~6YL$zS}Hmot+crp;*0&#oPAzd$$OmQXxX2UCrn?q58T zevbG1@k3w9(izbsWvm)J*H`EAS{#;9pTc^ z8nqPEuCkG7Jl+N)ox)W$f#Y2T+-ID;x=1yLzIf?lL~g(jmhnuW^$|^9lMW8nzsRwa z_Pv*IoL$#fPpr;MZ{d|8T$UZW!Ch>tD>S@? zh5g1Vl@6b`&3IhSE4U+v?3a1AH8US!JtW6MDf?qV;RV{CLOLuTVKt=iUtv}S+>Joe zQ)#~LR6+ISK^0cA%=I%S>qI@c*9e^MM{T;Ee2gdpM3#=vAyGK9AQ%XddAi2G4!H5`Mw-|majv?{ zy{GEsI`&95u@aRxu%_&DWKY5}rzt1jGzNtxieK@A7{&STPux?MiGM9elIR7b_ zacu+l)CRx(Q*Q(7LKn)8x>lSC`g%EE6=(UhYn{j=;k~wj^P5%b=QCf=rUi4qqJsZU zu%I1KyYBncjvOmSvKcbfXTG}^#QJGZ0ZwV{nd#BoFNft%7@uurS4v8tfH=4P^ZJZl zIZzM_OR&hzSBvs}nnZ~g`*9cxU3;H+H{8og&uf8(fUwE*LN2LF((QyPY-y5w+e=JN z9$CKj>#X0?*LgXy;?mO&T3V-5B!1F^C6upN z8_->1m$0ndB#H&SUGksZMTK3XlkzXROU4?%4HkFMTemH;w3i3F+)Df~ z7Y!NMu8K9yEHAw=MwgTC3Ou!F^k`4Hu*)4DCk0k;q$M$!f?C{cXuPUv(-)jMiHZzE za?PIgRk%6-dOFiVZ`o$zaWEQ5Z4Nn-7{HhdhF%<8Y>J$8%t$jwKJ%@XuiQ{r$x zBHv_9%SSHxJ9#Ukgf{4oC=9wF+7X3UW2D|qlYCVH&@v>E=Zeg+to3<%82#%q_}Jg; z{Bml#x2WzBqV@%!N3%Q-zbv5O>940f%wA?3UfxBD!_fm276InPkvovv_q=BU;CPwN z5k=j>s~~zFQkeRP<`khAVzh-M;$Mg;vI<*GSw0$bPF@vXI;qL>+Kmx-Fw(Q%OB_cs z%ht^@U5ZVgy)90_U}Kx8q0Uyznozd`_rP^L?A&~+gEXK_looHttBZ193L0*eg-&eb zID>E@lj0ot3KbS3{E6Dre~VwTpa3eqt&-v=iaikZo@@GF@DsKJ9t5YPN=hSEocvaB z{g}IBmC-B=+3w^?KU^(=$C-P zk6vU?P_I*HcLWK8D+4zFm8WVsu#M)RIqf%|iU$zCgM*9?gL6-|$MFsH5FPw5WII3bEtFzjBi`T_vsD3r;Wt<2f%?%x){+04@2(L z*i$-UQt#di@b&1W3($Q^#>CFRQFIu^1XL0-vd5AIUba=JfXsb0i3FDtc3=#alW4yS0dMv*%<= z>r(r<;tNNu6R!*!as;P?&42e2N3r$>JNYixv*`-??qfvZB?omklAdg3#*|pR0OFbA zLhVOU#U}>rNHE9S>@u*eEH;oW1KwXP8CtdJ)5Q9n>zgaxNxw4HXEip!>TYMqW3Lv9P}OkBZ*j<0QvFrI zb%r-KQonn`p;TU4782U5hm-=OVRgOzP59_1P1I9LIYY@o=IS;kNA_=E`k78CK$F?| zw@KUzRH>YQ2EaOxjyRKjV2ua;#X$^XtpD!rRtHwfxP4%$a%~%pR26HCCa2Onp+>=s=wO-*2H8!R43HiG`bCEk5RnG$M>`1a95J@Bfa95daZK$n56LovA{g2x-U+$&jc?ii4}jlrs>%?d(Y37kDej0hMiip z>dsF$tYm=^4!o08a??6bP~H;d?Q@2jQ>pBMBh^|Ir|BCj&)n%8JVF=|e!Ww8fyHQn zXQ;hPqO$w-bN|@=34f%`K=iWRtIKnE>b9vh--pNvaedmR=%u^(f@Uv&abnvjjgs;# z?Rpir^pBW8y*R~tE>2lDm)|`HecNXMF*n~Bb*QJkh%Tc#P~6_stEXIsyp?rJfI_ab zHZ#XFIEMWToO*>e@_u(YkmJJ!MYp0xQZB_YJt*c{tt4_Zfmk>MugxLiC?|Es5tmTO_3N!;meS`wYe1ew$LVWA4buf%fdEP2~5}MDUd@D!Wz`t%KN_Vni z;78Bq&#Snm|v9w99Z=cA`&4n{|7Et`$(Yb zI3lR9YBUKdfPWpKy{5FvZ#k<*I~rh7oq7psanYslc_D-Q4ngP$Y=cy^Wv;SS=he>+ z9ryvue4zcsk9K;;o=~2YPjpwW&DF6vB)YKKc@4Jqe&XlN;(HLHez(f2Y+P>C5g#32 z^9Z5?c!#?95CT>oQuCm-iuTyLCJid0ido=^Y>7dku@ogUuY}x!Bj~gD%&!rj zf;$7sImz2brfN+CtQS(Q{MwhM+cbeAqGrzH+gxREM0l5p*VWwgjmZsF^T;r*3QZQ(3ymHCLc`Y`A zu1Z=psDDrpnEUOBoBv1iAR>re3hF=minl_LY3}WK7mr+8lIK|ZYEiyJ30Qic-oZBw zYhqx!SFhF_nQs5&{6>0%yROMl3-N(I6L4vrtWRAx)$ftHqo0y7o8D)w40OA;igK0j z@@na|cb*`3J1?VTFKt#Nsa!d&N zy#}$BSBk$tUvfIYlphdJ(!^CCqBmz6}Cwd#!BJX6J80G?A^pl@wF< zcWl4VLQ>|K5}>$CrI^GIB~G2=f;;Pf^baOSmu+~=;JLf+O|0O;dJ(roCCol!?EW{9e-=j(sPw!*CCM3Ik zCjDvXGGJ6obhBO_ysW@7pDz3Ov0#C5p5CGlk8%*)>vSNl720+NBt2CKRbX;Q%L+ag z$9A5lf6$M`P4(4$Za>~=78DatQB!NNR0iXPuZQv-M~e+-=5ABa`*2_s;3yv<;!{#1 zuU8M3xqPf!$p6#14?9_GlkaP)TU+t2s>_gsJ6g44TJ$2%j%@Tmyii3e%wlpnqE;Z)H)&2_c zP)PlBWm>cqWRNn%6X*i#`Vq()tGjqhA(qGJoXH4;$mt_TMMK6SgU8lW?YoUU{LhEBQe{ap*wSXm-E@ zg|umonNrA5>;SS_XVLREC1!N{^beB>bmt1Y3+?F7ZkAXX%UIB}or$f7*FtJx`_wh< z#pTzB8M9n|NA{nN1RPc zuzV-vo2|#@SWbKume|sHo!k(7nkA;Y)|P2A=Dm}DumuIb5g)z*GE7v+AM1bEwqPhf zoE?-?muii-v;ngEn!S@2?u618o&pDFHsF}FgO|>p#I9UM|AP*(p;Dh@LETBKrz#e} zcubEy(njw;F34ODT~Q_h3Wawrc2)1MdAb$P&JtAcFg<#so_XRwQ!Sq7=gx30pBSwKSQ?>HWm5&ayVK!V*xWULiE zMAgCj=Yo@Vlf(ttsQz_O*CB|gJiCB%%u!$$?}j|o*Fo9D_`d2QisQe)9nE;aPqz%k ztx>8ox`*>)w}jYjWas>H6ZvWkW(UpJf$#hKecscKvv{3X;2cx@+3GghSCJ*GKf&NF*x8>#q|P1I0S(h`@TB_m0n{nli$}^TfXA$dJzloo>j*~RX3IKIryiEUm%N>#jAZArYh=mwZ}&rL z%--Ry7V+YK(*rZb@Gn#!r7)GjkjwrwY>l=c16Q@AbM-dut=$oK(Ya)_#Zfh}q{=!~%vDU9u^V>jp};694J8w`H9%y~7QdJ85E zMu?iQ18Pck;nd7C9li;;m<;g+8SKr_3m-2rvtTCt{u8!~zW*M|G=L=j5ZF)YhH7Nd zUsDyqT`%2*dU@kF4t{g9zx-W%fgM}wcV#gtSYM4K`fKel@fDe+N1I4Tn9~tYM=k_@YMzPj(7*yhesmjGLVvr05fb-GW*Z7WH;fDfZgS| z*7crtL}4pFe5&sRfNv zLa_7vrOYOFv(S)aRQUVXZ!22#T2@XI)$zlmIBH{1viA0HjnTAEkFtTsjjWKY_) z$wFaoC|a`yVQ7nA4NCW;6?t}*K3XJiHa<8M{LhQgoi@0_txe2wT(H1en`5lV-ms5~ zz0nz~luxukG}5kfJ6i`1iM1#&pema}so1n4*dr;!4<0kIk0|_2ipj38XQ6qaM2!NrL7-;kSj3+h=5$@3ajv{B*#-(xpl~~QyPpT%`T%ew*4ZZVi42u) zP|x!Hzmd5s)3J7aZ?5RckRts z`m(NclpNs7PaGZ~I045AAX}zMiF|(;IAhE^w?^b5+ndCb#YE9nQ%vYLDc}^YIm7G) zVhc{WvgCbY*R5sU^o)1l$v36Ega3oAef3~-tp;lp^uTxraHl4(8a6+}k0_Zm#lglD z0dA-f17zvy-PJLLzrp^6{%gJPS%0sCBJ`XZK!VMbSyrEGCW$8reql{nRY(Oan$7V2 zddBR&EcP=B&O+TlR-Zog=h=0zN@3VORH<_3mB8Ohg66Dwmu*-hHp8{&$~6!eo(Ib1 zUdPS3t)FDDpVdH&YpRcAuDJU7r&t>5j1DTyu6}j`gVZ~nHvbX-dk`PN=xeW5Cjn*6 zSZWk6MCs38uc_Bpx}kksv^4hfw|P>D{~Jmy7XKmw zcQ(c01X-~>Bl<=ZczC$=G-r({Exj+g!048U`L2+w52i0S4{{;R9LcEC;B-H1mSjUy zDw|oEi>~fiRmN*a*zY_xkCfe;JPu9L&{6Da0Yha?-(irRVT53X zH95tDwvxC3r`FV-i=bzakl%n-29L0VgchuY;3W^l@@&SYSc(mg&>%}PEuA!VMgko^ z#fL4DO#&iz6HDx}O*BI;Q6k5}$O4rI;zsCCsk$um1T=b0vY*M0E^MNP@iYfDSi|`G zKGFn72P5!2%`%!PbD7kH=4c*XoM(a^r24YG5Hb>}z8Td zsibsCofHo+vTp2FQ=KWpOX9zvrRvq7Z2fgSISL+Q*LMlg0JlcY~wmaqp$- z)kUyW1U?rj8ql<*+z)1uQb6H^i|9?(uU zQYJA?S6_Fu)s9j7QjU9QE&9Hcff4{cj!$h??@tw7MotIe`ZB}RNaZdjtm+xs=J~9^ z_UK9*X98AplIHqB=I7Ku|11i-f1jp5OH2K68Cmihx+M-!QvXu_@%{Fz*YEu3)H{W} zF$O-zktGASXeOsF=H06g&Y>JOi7Ek|G~sI>%1~ zbMU`h@}$W*%0J$aw3F+PBf(pe;$WIc4Q1> z3n?3aM&G7H@3UMdVnjRj0Y!JOE!JUHzwRg1SyH#0iWLWx-lJ^3?N_dP?a~GTv*%lp z%H_W>=+ZHasqEZur4~bZj)or1m`C$EAZxPymQ=aQjo{q^=O0;XMT@#-MgHH_qicW7 zv|!_fQI=G>7SLY`0QC}-nTTcghoF2x=weCT{HGjvtv{mAhT|WHdX3?iwt`9Qyri_* zbm4f_<~vxXuAn^`+qq=b5?Ekc6>~hh-^cuaPT10?XFVWQ`-r3Ug z1%HK1&#G1Hu#N;P%L5tiDxav%1&VZt4<-}T`OC?@qFWOi z2}KWa^LsF8z}%mQXGi6fu$sZ!R*$5ug?shq*4eF8HkI~UX}i~V1!zr-XPd#&PLdJQ zmKBGnCb`{BxEV7WaB@5H+F9_1XqSdUQb?cq-D8-QQzqE&j$o2b%OVX#|1^}(hrJx? zby8wKNsbjqJ8O8Fu(#HUQB&T#O^9Yg6xj4qjsmM6=xi1*Dpavo+^w!yiO{hyMky$f zD7)S;f8kk-xELJ_x?-rMUaiqisD#jC4^ z5FXE^xh|S<6j&;6M|a2gI}e$2<3`byWC0FIM3`j)QKlhdf%`a z=67bLP+!Z+(*%^H(gOPF6d!6W34f!>5~EZKkb33U`0 zI=X1S^}eyM2`VH-tm+5tIuT1M<>JP{6FF-ZVG$&Y#otRZ5Ypk4dit_gyk?$-YV8f& zJl%p=Da;$Y%geA;O8-_O&ieX~B%SOSumO@9ou4PSZG{7o^`sC-I2 zA?RGtWh|&{uj6iHB`A-ieejpfEsU{0)DNn@s*qT69*5@B#+7d@jop~rI@!i~7^vTq zT!Gi^MsaNLod)x3^hf5hI=54|3S8fCPl~CHLU%l>wCYaBOcT|)a~|>AJDn5H?+*fq z@7f=`{*ZUC$nT7pEy^uWXFZtoZVNNN8RJ{SZH3bTyQIdoEv%jpC3**^{<(5*3o|7| zVO!voKUba)cIU+8XxC)&yzH=xtgTPNoiX%MO0x&N@uCSABr_DuBvaTh_{#ABI=DBE zvy5mn*@NEmtUOO<7Z@uPh`9Z*3;h&=r}y3N<|)isbe6;cxUE-z*l#7qz3wT)<5qhg z79E5L{bBP5829Y|Z^0G_CNNQgZbaHgB!P^CYQnxHbGrkPvVg-dR8+`WZO{GQ3-aGx z_||n^qf>$}+5;@bPO2=~>x!#JYk^_>d67LeUdIpIl(LZ&got^o!U75Z9C(n(^Z=NZ zQa1oxTGhM>2nHrLx5TBQJwM=+*;}@v3?J~jRv#&mw)b2}LwPhtu z2bYqyw)WgrzBMy6Djo=@FM|w*QPYb3JA08=1>wo}i_rXGwY$rTbEEXvo$$STb=h~A zO(13E0g!DnVCSDKlya;G_tj)ThDi-6khL-i5JxVwHi2xG-mgFo%0qvWQvRtH`KQ`( pnaTgR8pxRW|M&0elNGHm8i_FU)}T_5Jao<3({>hR|6aWPe*n#8R8s%| literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/examples/pyuidemo.py b/vendor/Twisted-10.0.0/doc/core/examples/pyuidemo.py new file mode 100755 index 000000000000..68ec79d15133 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/pyuidemo.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +import pyui +from twisted.internet import reactor, pyuisupport + +def onButton(self): + print "got a button" + +def onQuit(self): + reactor.stop() + +def main(): + pyuisupport.install(args=(640, 480), kw={'renderer': '2d'}) + + w = pyui.widgets.Frame(50, 50, 400, 400, "clipme") + b = pyui.widgets.Button("A button is here", onButton) + q = pyui.widgets.Button("Quit!", onQuit) + + w.addChild(b) + w.addChild(q) + w.pack() + + w.setBackImage("pyui_bg.png") + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/rotatinglog.py b/vendor/Twisted-10.0.0/doc/core/examples/rotatinglog.py new file mode 100644 index 000000000000..baacd445253f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/rotatinglog.py @@ -0,0 +1,26 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +An example of using the rotating log. +""" + +from twisted.python import log +from twisted.python import logfile + +# rotate every 100 bytes +f = logfile.LogFile("test.log", "/tmp", rotateLength=100) + +# setup logging to use our new logfile +log.startLogging(f) + +# print a few message +for i in range(10): + log.msg("this is a test of the logfile: %s" % i) + +# rotate the logfile manually +f.rotate() + +log.msg("goodbye") diff --git a/vendor/Twisted-10.0.0/doc/core/examples/row_example.py b/vendor/Twisted-10.0.0/doc/core/examples/row_example.py new file mode 100644 index 000000000000..357ba2775444 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/row_example.py @@ -0,0 +1,105 @@ +import random + +from twisted.internet import reactor + +from twisted.enterprise import adbapi, row, reflector, sqlreflector + +from row_util import * + +""" This example show using twisted.enterpise.row to load objects from +a database and manipulate them. +""" + +manager = None + +def gotRooms(rooms): + print "got Rooms.", rooms + if not rooms: + print "no rooms found!" + reactor.stop() + + for room in rooms: + print "room ", room + for child in room.furniture: + print "furn ", child + if hasattr(child, "childRows"): + for inner in child.childRows: + print "inner ", inner + + room.moveTo( int(random.random() * 100) , int(random.random() * 100) ) + manager.updateRow(room).addCallback(onUpdate) + +def gotFurniture(furniture): + for f in furniture: + print f + reactor.stop() + +def onUpdate(data): + print "updated row." + # create a new room + global newRoom + newRoom = RoomRow() + newRoom.assignKeyAttr("roomId", kf.getNextKey()) + newRoom.town_id = 20 + newRoom.name = 'newRoom1' + newRoom.owner = 'fred' + newRoom.posx = 100 + newRoom.posy = 100 + newRoom.width = 15 + newRoom.height = 20 + + #insert row into database + manager.insertRow(newRoom).addCallback(onInsert) + +def onInsert(data): + global newRoom + print "row inserted" + print newRoom.roomId + manager.deleteRow(newRoom).addCallback(onDelete) + +def onDelete(data): + print "row deleted." + return manager.loadObjectsFrom("furniture", whereClause=[("furnId",reflector.EQUAL,53)], forceChildren=1 ).addCallback(onSelected) + +def onSelected(furn): + for f in furn: + print "\ngot Furn:", f + if hasattr(f, "childRows"): + for l in f.childRows: + print " ", l + reactor.stop() + +def gotRooms2(rooms): + print "got more rooms", rooms + reactor.stop() + +def tick(): + reactor.callLater(0.5, tick) + +newRoom = None + + +# use this line for postgresql test +dbpool = adbapi.ConnectionPool("pyPgSQL.PgSQL", database="test") + +# use this line for SQLite test +#dbpool = adbapi.ConnectionPool("sqlite", db="test") + +# use this line for Interbase / Firebird +#dbpool = adbapi.ConnectionPool("kinterbasdb", dsn="localhost:/test.gdb",user="SYSDBA",password="masterkey") + +# use this for MySQL +#dbpool = adbapi.ConnectionPool("MySQLdb", db="test", passwd="pass") + + +def kickOffTests(ignoredResult=0): + global manager + manager = sqlreflector.SQLReflector(dbpool, [RoomRow, FurnitureRow, RugRow, LampRow]) + manager.loadObjectsFrom("testrooms", forceChildren=1).addCallback(gotRooms) + +kf = KeyFactory(100000, 50000) + +# make sure we can be shut down on windows. +reactor.callLater(0.5, tick) +reactor.callLater(0.4, kickOffTests) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/row_schema.sql b/vendor/Twisted-10.0.0/doc/core/examples/row_schema.sql new file mode 100644 index 000000000000..a545b5eb8bd6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/row_schema.sql @@ -0,0 +1,65 @@ +DROP TABLE testrooms; +DROP TABLE furniture; +DROP TABLE rugs; +DROP TABLE lamps; + +CREATE TABLE testrooms +( + roomId int PRIMARY KEY, + town_id int, + name varchar(64), + owner varchar(64), + posx int, + posy int, + width int, + height int +); + +CREATE TABLE furniture +( + furnId int PRIMARY KEY, + roomId int, + name varchar(64), + posx int, + posy int +); + +CREATE TABLE rugs +( + rugId int PRIMARY KEY, + roomId int, + name varchar(64) +); + +CREATE TABLE lamps +( + lampId int PRIMARY KEY, + furnId int, + furnName varchar(64), + lampName varchar(64) +); + + +INSERT INTO testrooms VALUES (10, 100, 'testroom1', 'someguy', 10, 10, 20, 20); +INSERT INTO testrooms VALUES (11, 100, 'testroom2', 'someguy', 30, 10, 20, 20); +INSERT INTO testrooms VALUES (12, 100, 'testroom3', 'someguy', 50, 10, 20, 20); + +INSERT INTO furniture VALUES (50, 10, 'chair1', 10, 10); +INSERT INTO furniture VALUES (51, 10, 'chair2', 14, 10); +INSERT INTO furniture VALUES (52, 12, 'chair3', 14, 10); +INSERT INTO furniture VALUES (53, 12, 'chair4', 10, 12); +INSERT INTO furniture VALUES (54, 12, 'chair5', 18, 13); +INSERT INTO furniture VALUES (55, 12, 'couch', 22, 3); + +INSERT INTO rugs VALUES (81, 10, 'a big rug'); +INSERT INTO rugs VALUES (82, 10, 'a blue rug'); +INSERT INTO rugs VALUES (83, 11, 'a red rug'); +INSERT INTO rugs VALUES (84, 11, 'a green rug'); +INSERT INTO rugs VALUES (85, 12, 'a dirty rug'); + +INSERT INTO lamps VALUES (21, 50, 'chair1', 'a big lamp1'); +INSERT INTO lamps VALUES (22, 50, 'chair1', 'a big lamp2'); +INSERT INTO lamps VALUES (23, 53, 'chair4', 'a big lamp3'); +INSERT INTO lamps VALUES (24, 53, 'chair4', 'a big lamp4'); +INSERT INTO lamps VALUES (25, 53, 'chair4', 'a big lamp5'); +INSERT INTO lamps VALUES (26, 54, 'couch', 'a big lamp6'); diff --git a/vendor/Twisted-10.0.0/doc/core/examples/row_util.py b/vendor/Twisted-10.0.0/doc/core/examples/row_util.py new file mode 100644 index 000000000000..f674604a10f3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/row_util.py @@ -0,0 +1,103 @@ +from twisted.enterprise import row + + +################################################## +########## Definitions of Row Classes ############ +################################################## + +class KeyFactory: + """This is a lame, but simple way to generate keys. + For real code, use the database instead.""" + def __init__(self, minimum, pool): + self.min = minimum + self.pool = minimum + pool + self.current = self.min + + def getNextKey(self): + next = self.current + 1 + self.current = next + if self.current >= self.pool: + raise ValueError("Key factory key pool exceeded.") + return next + +def myRowFactory(rowClass, data, kw): + newRow = rowClass() + newRow.__dict__.update(kw) + return newRow + +class RoomRow(row.RowObject): + rowColumns = [ + ("roomId", "int"), + ("town_id", "int"), + ("name", "varchar"), + ("owner", "varchar"), + ("posx", "int"), + ("posy", "int"), + ("width", "int"), + ("height", "int") + ] + rowKeyColumns = [("roomId","int")] + rowTableName = "testrooms" + rowFactoryMethod = [myRowFactory] + + def __init__(self): + self.furniture = [] + + def addStuff(self, stuff): + self.furniture.append(stuff) + + def moveTo(self, x, y): + self.posx = x + self.posy = y + + def __repr__(self): + return "" % (self.roomId, self.name, self.owner, self.posx, self.posy) + +class FurnitureRow(row.RowObject): + rowColumns = [ + ("furnId", "int"), + ("roomId", "int"), + ("name", "varchar"), + ("posx", "int"), + ("posy", "int") + ] + rowKeyColumns = [("furnId","int")] + rowTableName = "furniture" + rowForeignKeys = [("testrooms", [("roomId","int")], [("roomId","int")], "addStuff", 1) ] + + def __repr__(self): + return "Furniture #%s: room #%s (%s) (%s,%s)" % (self.furnId, self.roomId, self.name, self.posx, self.posy) + +class RugRow(row.RowObject): + rowColumns = [ + ("rugId", "int"), + ("roomId", "int"), + ("name", "varchar") + ] + rowKeyColumns = [("rugId","int")] + rowTableName = "rugs" + rowFactoryMethod = [myRowFactory] + rowForeignKeys = [( "testrooms", [("roomId","int")],[("roomId","int")], "addStuff", 1) ] + + def __repr__(self): + return "Rug %#s: room #%s, (%s)" % (self.rugId, self.roomId, self.name) + +class LampRow(row.RowObject): + rowColumns = [ + ("lampId", "int"), + ("furnId", "int"), + ("furnName", "varchar"), + ("lampName", "varchar") + ] + rowKeyColumns = [("lampId","int")] + rowTableName = "lamps" + rowForeignKeys = [("furniture", + [("furnId","int"),("furnName", "varchar")], # child table columns (this table) + [("furnId","int"),("name", "varchar")], # parent table columns (the other table) + None, + 1) + ] + # NOTE: this has no containerMethod so children will be added to "childRows" + + def __repr__(self): + return "Lamp #%s" % self.lampId diff --git a/vendor/Twisted-10.0.0/doc/core/examples/server.pem b/vendor/Twisted-10.0.0/doc/core/examples/server.pem new file mode 100644 index 000000000000..80ef9dcf3be9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/server.pem @@ -0,0 +1,36 @@ +-----BEGIN CERTIFICATE----- +MIIDBjCCAm+gAwIBAgIBATANBgkqhkiG9w0BAQQFADB7MQswCQYDVQQGEwJTRzER +MA8GA1UEChMITTJDcnlwdG8xFDASBgNVBAsTC00yQ3J5cHRvIENBMSQwIgYDVQQD +ExtNMkNyeXB0byBDZXJ0aWZpY2F0ZSBNYXN0ZXIxHTAbBgkqhkiG9w0BCQEWDm5n +cHNAcG9zdDEuY29tMB4XDTAwMDkxMDA5NTEzMFoXDTAyMDkxMDA5NTEzMFowUzEL +MAkGA1UEBhMCU0cxETAPBgNVBAoTCE0yQ3J5cHRvMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDm5ncHNAcG9zdDEuY29tMFwwDQYJKoZIhvcNAQEB +BQADSwAwSAJBAKy+e3dulvXzV7zoTZWc5TzgApr8DmeQHTYC8ydfzH7EECe4R1Xh +5kwIzOuuFfn178FBiS84gngaNcrFi0Z5fAkCAwEAAaOCAQQwggEAMAkGA1UdEwQC +MAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJhdGVkIENlcnRpZmljYXRl +MB0GA1UdDgQWBBTPhIKSvnsmYsBVNWjj0m3M2z0qVTCBpQYDVR0jBIGdMIGagBT7 +hyNp65w6kxXlxb8pUU/+7Sg4AaF/pH0wezELMAkGA1UEBhMCU0cxETAPBgNVBAoT +CE0yQ3J5cHRvMRQwEgYDVQQLEwtNMkNyeXB0byBDQTEkMCIGA1UEAxMbTTJDcnlw +dG8gQ2VydGlmaWNhdGUgTWFzdGVyMR0wGwYJKoZIhvcNAQkBFg5uZ3BzQHBvc3Qx +LmNvbYIBADANBgkqhkiG9w0BAQQFAAOBgQA7/CqT6PoHycTdhEStWNZde7M/2Yc6 +BoJuVwnW8YxGO8Sn6UJ4FeffZNcYZddSDKosw8LtPOeWoK3JINjAk5jiPQ2cww++ +7QGG/g5NDjxFZNDJP1dGiLAxPW6JXwov4v0FmdzfLOZ01jDcgQQZqEpYlgpuI5JE +WUQ9Ho4EzbYCOQ== +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIBPAIBAAJBAKy+e3dulvXzV7zoTZWc5TzgApr8DmeQHTYC8ydfzH7EECe4R1Xh +5kwIzOuuFfn178FBiS84gngaNcrFi0Z5fAkCAwEAAQJBAIqm/bz4NA1H++Vx5Ewx +OcKp3w19QSaZAwlGRtsUxrP7436QjnREM3Bm8ygU11BjkPVmtrKm6AayQfCHqJoT +ZIECIQDW0BoMoL0HOYM/mrTLhaykYAVqgIeJsPjvkEhTFXWBuQIhAM3deFAvWNu4 +nklUQ37XsCT2c9tmNt1LAT+slG2JOTTRAiAuXDtC/m3NYVwyHfFm+zKHRzHkClk2 +HjubeEgjpj32AQIhAJqMGTaZVOwevTXvvHwNEH+vRWsAYU/gbx+OQB+7VOcBAiEA +oolb6NMg/R3enNPvS1O4UU1H8wpaF77L4yiSWlE0p4w= +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE REQUEST----- +MIIBDTCBuAIBADBTMQswCQYDVQQGEwJTRzERMA8GA1UEChMITTJDcnlwdG8xEjAQ +BgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3DQEJARYObmdwc0Bwb3N0MS5jb20w +XDANBgkqhkiG9w0BAQEFAANLADBIAkEArL57d26W9fNXvOhNlZzlPOACmvwOZ5Ad +NgLzJ1/MfsQQJ7hHVeHmTAjM664V+fXvwUGJLziCeBo1ysWLRnl8CQIDAQABoAAw +DQYJKoZIhvcNAQEEBQADQQA7uqbrNTjVWpF6By5ZNPvhZ4YdFgkeXFVWi5ao/TaP +Vq4BG021fJ9nlHRtr4rotpgHDX1rr+iWeHKsx4+5DRSy +-----END CERTIFICATE REQUEST----- diff --git a/vendor/Twisted-10.0.0/doc/core/examples/shaper.py b/vendor/Twisted-10.0.0/doc/core/examples/shaper.py new file mode 100644 index 000000000000..573d67c23071 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/shaper.py @@ -0,0 +1,52 @@ +# -*- Python -*- + +"""Example of rate-limiting your web server. + +Caveat emptor: While the transfer rates imposed by this mechanism will +look accurate with wget's rate-meter, don't forget to examine your network +interface's traffic statistics as well. The current implementation tends +to create lots of small packets in some conditions, and each packet carries +with it some bytes of overhead. Check to make sure this overhead is not +costing you more bandwidth than you are saving by limiting the rate! +""" + +from twisted.protocols import htb +# for picklability +import shaper + +serverFilter = htb.HierarchicalBucketFilter() +serverBucket = htb.Bucket() + +# Cap total server traffic at 20 kB/s +serverBucket.maxburst = 20000 +serverBucket.rate = 20000 + +serverFilter.buckets[None] = serverBucket + +# Web service is also limited per-host: +class WebClientBucket(htb.Bucket): + # Your first 10k is free + maxburst = 10000 + # One kB/s thereafter. + rate = 1000 + +webFilter = htb.FilterByHost(serverFilter) +webFilter.bucketFactory = shaper.WebClientBucket + +servertype = "web" # "chargen" + +if servertype == "web": + from twisted.web import server, static + site = server.Site(static.File("/var/www")) + site.protocol = htb.ShapedProtocolFactory(site.protocol, webFilter) +elif servertype == "chargen": + from twisted.protocols import wire + from twisted.internet import protocol + + site = protocol.ServerFactory() + site.protocol = htb.ShapedProtocolFactory(wire.Chargen, webFilter) + #site.protocol = wire.Chargen + +from twisted.internet import reactor +reactor.listenTCP(8000, site) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/shoutcast.py b/vendor/Twisted-10.0.0/doc/core/examples/shoutcast.py new file mode 100644 index 000000000000..2280580188c6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/shoutcast.py @@ -0,0 +1,26 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Example Shoutcast client. Run with: + +python shoutcast.py localhost 8080 +""" + +import sys + +from twisted.internet import protocol, reactor +from twisted.protocols.shoutcast import ShoutcastClient + +class Test(ShoutcastClient): + def gotMetaData(self, data): + print "meta:", data + + def gotMP3Data(self, data): + pass + +host = sys.argv[1] +port = int(sys.argv[2]) + +protocol.ClientCreator(reactor, Test).connectTCP(host, port) +reactor.run() \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/examples/simple.tac b/vendor/Twisted-10.0.0/doc/core/examples/simple.tac new file mode 100644 index 000000000000..02b3f81e4c46 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/simple.tac @@ -0,0 +1,39 @@ +# You can run this .tac file directly with: +# twistd -ny simple.tac + +from twisted.application import service, internet +from twisted.protocols import wire +from twisted.internet import protocol +from twisted.python import util + +application = service.Application('test') +s = service.IServiceCollection(application) +factory = protocol.ServerFactory() +factory.protocol = wire.Echo +internet.TCPServer(8080, factory).setServiceParent(s) + +internet.TCPServer(8081, factory).setServiceParent(s) +internet.TimerService(5, util.println, "--MARK--").setServiceParent(s) + +class Foo(protocol.Protocol): + def connectionMade(self): + self.transport.write('lalala\n') + def dataReceived(self, data): + print `data` + +factory = protocol.ClientFactory() +factory.protocol = Foo +internet.TCPClient('localhost', 8081, factory).setServiceParent(s) + +class FooService(service.Service): + def startService(self): + service.Service.startService(self) + print 'lala, starting' + def stopService(self): + service.Service.stopService(self) + print 'lala, stopping' + print self.parent.getServiceNamed(self.name) is self + +foo = FooService() +foo.setName('foo') +foo.setServiceParent(s) diff --git a/vendor/Twisted-10.0.0/doc/core/examples/simpleclient.py b/vendor/Twisted-10.0.0/doc/core/examples/simpleclient.py new file mode 100644 index 000000000000..04907f39a9b4 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/simpleclient.py @@ -0,0 +1,49 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +An example client. Run simpleserv.py first before running this. +""" + +from twisted.internet import reactor, protocol + + +# a client protocol + +class EchoClient(protocol.Protocol): + """Once connected, send a message, then print the result.""" + + def connectionMade(self): + self.transport.write("hello, world!") + + def dataReceived(self, data): + "As soon as any data is received, write it back." + print "Server said:", data + self.transport.loseConnection() + + def connectionLost(self, reason): + print "connection lost" + +class EchoFactory(protocol.ClientFactory): + protocol = EchoClient + + def clientConnectionFailed(self, connector, reason): + print "Connection failed - goodbye!" + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print "Connection lost - goodbye!" + reactor.stop() + + +# this connects the protocol to a server runing on port 8000 +def main(): + f = EchoFactory() + reactor.connectTCP("localhost", 8000, f) + reactor.run() + +# this only runs if the module was *not* imported +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/simpleserv.py b/vendor/Twisted-10.0.0/doc/core/examples/simpleserv.py new file mode 100644 index 000000000000..938db119a1ae --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/simpleserv.py @@ -0,0 +1,26 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.internet import reactor, protocol + + +class Echo(protocol.Protocol): + """This is just about the simplest possible protocol""" + + def dataReceived(self, data): + "As soon as any data is received, write it back." + self.transport.write(data) + + +def main(): + """This runs the protocol on port 8000""" + factory = protocol.ServerFactory() + factory.protocol = Echo + reactor.listenTCP(8000,factory) + reactor.run() + +# this only runs if the module was *not* imported +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/stdin.py b/vendor/Twisted-10.0.0/doc/core/examples/stdin.py new file mode 100644 index 000000000000..28dc7fdd2836 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/stdin.py @@ -0,0 +1,30 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +An example of reading a line at a time from standard input +without blocking the reactor. +""" + +from twisted.internet import stdio +from twisted.protocols import basic + +class Echo(basic.LineReceiver): + from os import linesep as delimiter + + def connectionMade(self): + self.transport.write('>>> ') + + def lineReceived(self, line): + self.sendLine('Echo: ' + line) + self.transport.write('>>> ') + +def main(): + stdio.StandardIO(Echo()) + from twisted.internet import reactor + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/stdiodemo.py b/vendor/Twisted-10.0.0/doc/core/examples/stdiodemo.py new file mode 100644 index 000000000000..004fa451b969 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/stdiodemo.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Example using stdio, Deferreds, LineReceiver and twisted.web.client. + +Note that the WebCheckerCommandProtocol protocol could easily be used in e.g. +a telnet server instead; see the comments for details. + +Based on an example by Abe Fettig. +""" + +from twisted.internet import stdio, reactor +from twisted.protocols import basic +from twisted.web import client + +class WebCheckerCommandProtocol(basic.LineReceiver): + delimiter = '\n' # unix terminal style newlines. remove this line + # for use with Telnet + + def connectionMade(self): + self.sendLine("Web checker console. Type 'help' for help.") + + def lineReceived(self, line): + # Ignore blank lines + if not line: return + + # Parse the command + commandParts = line.split() + command = commandParts[0].lower() + args = commandParts[1:] + + # Dispatch the command to the appropriate method. Note that all you + # need to do to implement a new command is add another do_* method. + try: + method = getattr(self, 'do_' + command) + except AttributeError, e: + self.sendLine('Error: no such command.') + else: + try: + method(*args) + except Exception, e: + self.sendLine('Error: ' + str(e)) + + def do_help(self, command=None): + """help [command]: List commands, or show help on the given command""" + if command: + self.sendLine(getattr(self, 'do_' + command).__doc__) + else: + commands = [cmd[3:] for cmd in dir(self) if cmd.startswith('do_')] + self.sendLine("Valid commands: " +" ".join(commands)) + + def do_quit(self): + """quit: Quit this session""" + self.sendLine('Goodbye.') + self.transport.loseConnection() + + def do_check(self, url): + """check : Attempt to download the given web page""" + client.getPage(url).addCallback( + self.__checkSuccess).addErrback( + self.__checkFailure) + + def __checkSuccess(self, pageData): + self.sendLine("Success: got %i bytes." % len(pageData)) + + def __checkFailure(self, failure): + self.sendLine("Failure: " + failure.getErrorMessage()) + + def connectionLost(self, reason): + # stop the reactor, only because this is meant to be run in Stdio. + reactor.stop() + +if __name__ == "__main__": + stdio.StandardIO(WebCheckerCommandProtocol()) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/testlogging.py b/vendor/Twisted-10.0.0/doc/core/examples/testlogging.py new file mode 100644 index 000000000000..7a8c93ddb903 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/testlogging.py @@ -0,0 +1,41 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""Test logging. + +Message should only be printed second time around. +""" + +from twisted.python import log +from twisted.internet import reactor + +import sys, warnings + +def test(i): + print "printed", i + log.msg("message %s" % i) + warnings.warn("warning %s" % i) + try: + raise RuntimeError, "error %s" % i + except: + log.err() + +def startlog(): + log.startLogging(sys.stdout) + +def end(): + reactor.stop() + +# pre-reactor run +test(1) + +# after reactor run +reactor.callLater(0.1, test, 2) +reactor.callLater(0.2, startlog) + +# after startLogging +reactor.callLater(0.3, test, 3) +reactor.callLater(0.4, end) + +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/classes.nib b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/classes.nib new file mode 100644 index 000000000000..71cb4598734d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/classes.nib @@ -0,0 +1,13 @@ +{ + IBClasses = ( + {CLASS = FirstResponder; LANGUAGE = ObjC; SUPERCLASS = NSObject; }, + { + ACTIONS = {doTwistzillaFetch = id; }; + CLASS = MyAppDelegate; + LANGUAGE = ObjC; + OUTLETS = {messageTextField = id; progressIndicator = id; resultTextField = id; }; + SUPERCLASS = NSObject; + } + ); + IBVersion = 1; +} \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/info.nib b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/info.nib new file mode 100644 index 000000000000..4f99a2de8efc --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/info.nib @@ -0,0 +1,24 @@ + + + + + IBEditorPositions + + 29 + 127 344 318 44 0 0 1600 1002 + + IBFramework Version + 291.0 + IBLockedObjects + + 204 + + IBOpenObjects + + 21 + 29 + + IBSystem Version + 6L60 + + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/keyedobjects.nib b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/English.lproj/MainMenu.nib/keyedobjects.nib new file mode 100644 index 0000000000000000000000000000000000000000..e5caaf0fb2c7f7c50062f77a5697a18c5c15f865 GIT binary patch literal 14896 zcmb7q30zcF`~P{)IqV>pZCD3p*kN!kpyHOf;6@6HAnxJ_BcjOQ%;1`Oj+NPBrD>UE zCN61emS$<1TUlyZnwC~(Wm#F4?PX>CpL>TbMN?ux>}bz zH}@0)LJ1?B_z+*>$3GsCYI9aites|e4o}4&SFNLdc&cOkBzu+HRi5g0G*l{~C1nHb zGwszqn+JvySn$NwF!+V|lK_%Qa!4-eNV<@lNKZ0?j3lGTXfl@A$rLh;Oec4eh2#OU zm@FlalU3vivX-nT8_82-GkJ!*Kwc!f$pLZ@N4!dokfY=c@+NtUyhlDD=g3FoJo%3N z00bz6K_Vo93CxfTDUb?jkPaD;30cq;_fSKTexiAlIgF9d$EQTfUAUp(*;c+EA4y$1utcM-&JiGwAVK3~1 z18@*thF9P<_zb>+-zlL$Ddp6M`cZ!xL_=s8jid$|O=D>SHPIBBO4Dct&8BUsm8vw4 zwx^wFS9%leM!VBqv=1$!x6pxfFfFIU=?FTKj;58=M#s}?T07(>9x{|J`YyPoJb4>1Mi>K0}|S z+v)Rk7k!cLrZ3U`^cDIVJxq_#*Xf(|1bv&Hr0>u(^nLmPJxf2L=jo^PGkSr3Nx!0B z(@XR_`UCxu{zQMJSLpBbPkNR9%?P86u{4&+vRF22%dAXg`K&$bz&f$ctPAVPy0Px8 z7b|4FS${T=6|)jHh?TM-tc+FQ)i5@kjbvl+Y%Hr}Ha4Ew*#uU@CbG$RZ!)W6^=vA0 zvqm-zsTH%CY!;h?$GL1ClWab_jV-{dJJ~{ZH(SK+WB0Qq>_N7aJ=Os`Rn`* zevH4#-{Qyl3H~-e$xrcj_`CcxKf~YS@AD7%St#Hi4o$6ZtgHJ-2_=Cfhy;@m(uRZ* zg@loC5Im86k$k};;FtgoZqU2Lnj)!3aC zsa170muo~xS&4mydr)n4rJ|ISRn)p_$Jg11)z()#rn@Ri%7!=`?$IS>_}o=tt828o z^o&%fO|>`QLAr&G`g+V+n7&+iRbrdc;@KciI>RL98tEaVoobUjM6|0>D4G*zO?Nma z;cCMZF3gRSw%V z1!&K@k()_(RF~D^(VpnJn)DF(Zwi(*lwTVN} zu9ZrdcS4zaW}Us*=9;XV#zUl+t!gqV5~q|^Iqmj(ZOSzcy86)!ngIH_2oW>F8)gt9 z0>v~jT?8m?6e=1;kO;0+VrI{7*RDg3sa^Yab8<|x3p%yWF}2TcKWEO!^66MYXVuo# z*-*nCzFsAcDc6XfLd{f?al}T(V;ocwbZD%dnsX1ayBZwz)pjQ;w5)ObG#u<2*6eY@ zE!qf|N97YpjVM}2CX!k*iA*MSB2<`!MdXUQ(>m>X;y~&RWGZo@zFnweH)$lr7$t@F zx;hkVnyy?_XP`VOV!7oL{_VHHcey=vKTh8 z)@|>VM-)sDTJ$d?>zeTAA0j=8B5fcmg;6Ao(1`wn)F!f~S1$1o*g~Gx-A~hc-&%WJ zwWi*+WDD6!>PZ2L+*8k%+Fj3tXUVo^Bk9A}lV{0x@*LSgp2y*79`or>(B57vdx8q5 zU32hvab1p70JR}sYK46o!3$KH*~!Y?Q%?c7+Gg<&UMf8_}w1z5(dF- zWG~rA_KS3pDzd^k{6$2{@SfGxPP@z1rKJnJOkN>}l6&T&rPGvz8}0ZSD(x^TEmLHS zuJX9&KlJuGSyPm2Eztftk{c&$8b5}UvoTFJPrl~In%tEbk;lmiRJskL@+3y(DWvc& zIZe)xVszwU+YG&Ipu#neFScRIo`D){sP(v~8pdf#j}b)w6FFhIfx>zM>Ao-8ddzZ` ze28QQBOz^O6FEx?^a$=ypvA2o!g&R#$AUR?JQF@9pPwN_Ug&b8!Y zk2OEg|c;CQTmJIEAlnDM83h}w|ITIIi@ifwU|azS73^D)YW0b zP*Ud77lA{xB!}g)c}Rcn5HVTgiF{F|goi=6m?UbvY3}{m74kdzgZxRZVl4he{>D33k!fM0)1_yfs%zP>+w1H#Hn)~*uy|qaxfX9)c-U&|?M}3M*+iSuUfmlr2kH)8pr(9g zgWV;Di;6J+aM4Q)6NO@+=-?6SW(eN|5fBMcV1Q_dfmn!xct`-FC=eY*C(&7S5naVi zqMNu`bQe7;mFyDx@N(0T+Ny~r%`Z)T9nL8>w`p3QHP_T&!~8SeVRKfS+{m<2iPJ!a z-PFIlc;J6N3`bdYJTX|GqKR3lL^r3l{~R9cp*6U7-~W7;s0&qI)VHr`TKoSdNJ^o@ zX&-z|Jk;nkhg0Kd%AHZryP$7D-;SD>V`{Fgvup6j)l#Tf&zj5o_Wn=S%}X3yR9OC> z5A*bied?=h4X(yIG+ni6u*2m>Z>??6b(a6nIeJJzHdxS-UeINCufaPxq+lp&%#P3m z>uD~=VtXAgw9C`517&~0%2gW zhtw^ikLZ6Ak)lSYJM8Mr6u|(Z=|R8r49kV$Cdkz=DV9Erz(Gym z>nVLRpcIBc8I+S1FqAwA6<9(vt=A#I!yO5)R0z|tbttNL*_|4I){_CU@GN|&C=Lty zL@T)6U^t9`kuV0IStld97v)Vb3Py{;Vn`E=g-TH(2Hlwdv3P(D^M7fj67OnetEu=O z<^0A;STvv-CzT=6X*o%k%d^NDn5dP6TD=I{oHLuC#=HJWfY26K>2-(e8Kem&VJ)6r zp!ryy-ICjJ&WPEFO>;~g+Ix~eI-6r1{xo&ng8--7=o;!#igU2$-+=R~W*I8z1S@$d7P)6Zz4rj|Ffi21j9CZG#pZ zYhi(BaT;E&guAo~26w|fS`CAH$z)gr_rZO5ydNGQlQB1Ya1Nf7+nhCaw??Ybsn=vN zLD<89BzG4G8xX8x92c}hV65vY&l|_1E2|yede+6WtfjCF$#`$BgQZ@a^!|U5>0w=} zl7qf}>tC_14qNTh}bLLbkhS{_8 z@j-`9x)s}_S=zPt7JXO+PiVD9ugo4Tt@3E;AAJKfkz!awhQeAMlQ~7bK!A!1@6bsD z5&1exjWjj{AEJLeNoK+ZttEhs@Dyw!b73=VA@ksA*ouw80#XOhz_U;Q+sHoHPUgdN zdSXK)&}V96?KB%EZ}dI{BR%Wfc4s|iTrD+ez+NARDyVfJI>Ab6n~6@G>tH9m=mExK&?T=w@C@GrFKNg^7j7Nw@fzD3R-JW&T~_}nx%r*)?Ur^3>{?tjNA@9jRbOv&*KD8C zpcSwtIHZM%)~-*0!&{YjI08rEb$A1g!JF_F9ETI|Hk^c0@D98Sr{N5|2k*lNa27s< zbMO&-44=Sx_*Bdi3&h>x0kKRxDxMJQ#ZzLdcuwpTd&SG*us9~(7N^CB;=K4=d@U}E zpTsZXH}SicKrX=N@CAGc7cqehw2ilGO=y{(8N?mpb}<_|k7*=|ut#%A z2$B#f!5|@GxQ6DYAf79o(jx0oCWP*^+ML3P_NvKKY|hCo2l$Hlx`ng>`Xb8QXd&Gl zaQ83}MfI|4q5&SOVdbT`=MaG%`UHn_ipJRHZge7VPo|-5XsC{ZN@~Yzk%Pb+586hT z!?^|@Dx?6z#`Dl~_nJ*;IE@f@i91_00F8oQ$pW|{7GiMq()OWDJ?rXBZilI_6InF_ z0~&*iwqfP2udx@^>rc8|=MhKa#r@)*)_E9dB27Xbi^RR+K5gC~n*OPoCQFEy5I1}< zGIm?7*3t$q!}N*Zk@-)$`+nW`_g{2zXX#6vxKCPg>`nD)1>*8 zm)ijJmn!Wp?tx*V#WtvMik5J-b{Q_vVl7^22{F<^7OU3OYEecDLC4V{x-K?g%I!HGG4nqdc*$2)Dn^GBx7l<*#qg`I#^y(#G1yjbue*V# z9eGX=Pm4{hteQ@wwYcg@a3vh~wTaj)wv6guTWvQDa!XGS+NcEr?a?Ge?v7kH^@F6sm>5Dh#jp{oz<+f=ZBY}_i3Sx z@;26Mr11O(d9TOwTslv@BzCnOA()=JH)%y>AE6%h|>i|7SUq)K*i}%C_*X?|WzA7PGLe{9V z2E&EJj~>*M$Y%()ZX|EAxlAJ*jV8~QN21$NQ&Xn{ z)!O<-N25z;rq96jw&3_7Jtw{rU$oBgV=POb;DWvs7l#(sIb8PA>0_EJy?)t(*5~vK z@vXSjI<1Qo;WpCxrb63xa@&Vh>xb)Efqp~3B@^jo@uT=be5dIPUq8|B#rI>1XJWK_ zH>(h#;BJZw3!&B->ifAKgz_`}MejNhOblwoS2Efrf){A~tO1Q{^#eKmMlTbi_<6V{ zgti6N^4TBcDoXJyx&rP#HrBbX@o72eFER-ST|tD`@Xu|^Ys26R2FRyQ{DA3sK(90~8DYQ2|$HG`Ri=c~8 zm&J4ki^L5@eAU9D=)F-BCDAWvJ7!{LdW=qB$t;DX!mpT@uS&pogiHdLfUg6CC4@?dk`OH+U4lh|Dj{D& zI|+y&21pnvAwfc#gd7RoB_K4yirQJiAPIdWV72cgL6I<2La$aGgr(C&mVplPm-riZ zW2cwaw?5QDhcJXo5Bg<^l z?53W&cxB}pkkZ^6J05n^YTC}zqg`efX$HkVjp&rR4#7KxO*Y;AI z^C+wE3Nd0622K!*X8sr+@hlJnD28Uy`2S73Hyg!9d#!1dkT^=yr^y33y*$Qvd5l|4 zdm{Ki>HmRAwNvp-Fsx6vE}jE(LxUz>TM5={C6YNAHj_6?=r*h`e!`%+*o6NrO97kCW_S&iDn@4wGX8eM##@rUkLjR2v zdP(~K8Q6NHp)zl0cX&2EoBUvoM47|egHu^~1CDCaZ@X35yp#L63 zGZ~R~EDNSXyi)Dis@9YjonUXXlk60GhrP>Avoq{H9PvK;fSqL@vUBVs_Ay?6!p^f# z*=Otm`<#7&*I%-W>?`&)yTrbcV3ROjLY0JS33dq+B-BWlD4|xuBngux)Jd2kpJhn8&e zo7r&xQf(Lg`b&Y9^xIz>RGx=_E$4ez$U{rpwY=|@nTL4f`*5H8UG(4>ILAzIoCU!tI*&!7*JFgiS^BWn1YmUZX8yP{{vPQ8 zA7Y^kgOBMsc$>vzT*P5=On@US5H7-L4cfz}7)6P2lEuQC*i1w)Gkk)vqtFQ$D?2bM zPh(D-&r;wFOU2w)fV=J=5M%SQcml458+js6;wEn9$vlOp@-&{#Gk7M?;@RB7b9h^B zi$O4uM_qlBj%s@VSI9Bz!61qJ*y`d@bRUgl{B#E8(()?<9OL;RgvnO87~U${yub zr9>H`3{zfHPAI#S+myGI2IVE?4ds629wkP3M(Ls$m8X@rl?-LHvPoH}Jf!SamMGg4 zhjNSZvht{MpVD91syv}=P>Pi@rL(eM*`mCv>{MP+<|u=dS;`J2N;#lxR%(&KWJWxA1bfIPmcHD=fvmuCH}jQzfXjZ*{7{fXP*H+ z#Xf_5hWM2GRQQbW8SOLH$L3SzGr_0UXR=Sd&s3jTJ_~#n`rPgFpwCL5RX$sMp7%N6 zbJXXo&!;|@e17)%%QwU~+&A9W>f6z`mv3+1e!fM%vwZLLUF^Hcccbri-~GO)d_VX7 z(GUDW{6hW0{0x3EesO*Yeu;i2zhu8uzjVJ$ze2wfzhQpk{Oo?y{1*B>==Z4KO21Wp ztNotx+wAv(-!8x1elPj$^E=>o&hLWXw|>9*bAMldfB!)LApaEq{{Ex=C-}SlrT-%T z)&5)jcly8RzsG;C|I7Y|{NMCH?tjAny#Gc2@BFX${}Dj6ufzkw0^$P90qFr<0}2C5 z1I7eQ3aAUH4`>K*2Dk%e2P_VFJYapm#(+%$&jsuXcr)O5z}o?511<(!3ivtDFHi|I z2Brt*2X+q}7+4WFCa^Yee&Fqa3j$XL?hV`@crfslz$1aL2c8PN5cpN#rND24SddSU zUr=O_Imi;!BdAx<(4c8SGU&db1iu}8Hu&@4%fVNI{|LSs z{C5Zq;UT^u{vm-O5g~?{4}>;_ZV7!MbXVx!(EXtYLr;fZ2>nf=3Rip;e~g1@C00pREJ}Oye7jPw zOvU)P3q5-|#=v@vqUSJ1K30BI{!p$ee}~bq(6H#R*s#PfQ&@6XMp#yuC9FqSuds1p zjbYQn?g?8OwmNKm*w(OD!(I#$$Ku7*d3o5Qoh+l6-s?-<@Wyk~ex_?Ym@ za9em)_|))O;funThHnUeHvIYUec?yLPlo>%0TF=_Q4!G*u@MOo=7^Mtt`YqsN+U)` z)I>ZGu_EHJh*c4*Bi2V8h1_H8m1d&8dexK7s@x9~w#t(@vkDn4hH(tcgk6#nNF8;~*m*QWJKNbIO z{F(SG@qZ-n1Y<&WLQaA;VNgP8LRrGlgz*XXgoz1t3H1rn6K+koE8(7mMF~$QJd?03 zVQ0dN2`?ucO*ozKUcv_nejpHVRyj48%6V}`N6@fPDi<1ph0<9Oo~<1FK? z#<|8-#?{6(#tp`&jN6U7jISCG8;=^lHhyEgZ2Zaii}CM7pTzLQ$V5Y8$HdNwT@$+{ z-jZ0HI5=^1;@HHR#F>c;5*H@kowzA+OXAkVy@~r1k0hQ+ypZ@s;>D!Eq~N5iq+61P zC5=cLm2_*;+$52-AZcOJLrH6r)+IfebU5i~(ut(^lD0{xAiY5=?2P zn@okKa#Mw=($rvbnr=5OGd*nDWZGleYdUEAsXRbF-HM`Bz&9lsN%))$|d4c&Z z^S$Q#%}dP7%*)M>njbf>Hm@~5X@1JQ#r%wUyZL$ZF7qDqKJ!8IA@gDLMf2C@Z_MAD ze@u={j!uqC&Pui|7N=S;5 z5|a{_QkYVlGB~9yr8=c1Wm3wN6nDyPDfgxA7UPJ1WquXIS~=>h5C>GA3L>7&wbPv4UMO#1fpo#_YCkEXwq{!99m^glCv zGyF5MGIBE1jBXj-Gsb09WlYGJmmxEnGS+2m$k>~)KjUI%cxF^)Or|+AKeIz-r_8RI zy)%n5OESl0Ix=TuF3Nm9^X1G-nU^zv$owOVXN6|PXQgIkW#wn>%Q~E$m)#|MP4@ci zjoF*Cw`On4-jTgCdw2HU>;u`aWWSbuH2YZg@$8e??`FT3eKz}}?DN?dvcJs!I{Vw~ z@3Vi({x$pe?5h@HVHRIYfF;-xY6-VQSz;{lmPCu$l4{AYWLw%=axLvG-7MWL{VfA5 z<(3LdrNw5cwlr8~S{7LDw=A}-u&lRiv+T7TwVbrPV>xH}*z%3#7t3!szB%DJ@j0fP zwmAhkopSo*l;n)cnV93wnUix@&XSx*bJn)4ZR@mlw05y}v-YqSTKigytOKostV66r zt;4OOtYfV|c4+F*5Ar&(uOZ?(>|&bQuSU1+_>dY^T%b*c4X>k8{i>l4;B z*7eqn*3H(f)@{}u)}7Yf*1gsP)>o{rS&v$eS&v&!THm$4XFY5E$a>y-!TP23YwNex z@2x*sf3^N@y{ZzGslIA}8mxw@;cAo`qsFU=s##4{Gt_Lgt(vR0Qw!A2>P>2QwU^pQ z?XM0{OVm=eTpgy4R7b00)p6>0wOXB^PE;qUb!xrZpgL8zI!&FS&Qfnx=c+=TuimaM zP#3CqtM{t+sSl`2)TQb}>T-33`k4B-`h?n~u2t8o8`P)N&Fa(YGwL?=IrVvUr~0D0 zN8PLLR}ZSMsIRJr)uZYg>YM6u^=Mhix&FC3S_m=+$iqn43 literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/README.txt b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/README.txt new file mode 100644 index 000000000000..96010e23df26 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/README.txt @@ -0,0 +1,6 @@ +Requires PyObjC 1.3.1 (svn r1589 or later) + +To run the demo: + +python setup.py py2app +open dist/Twistzilla.app diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py new file mode 100644 index 000000000000..9a7806c1e1e2 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py @@ -0,0 +1,79 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +# import needed classes/functions from Cocoa +from Foundation import * +from AppKit import * + +# import Nib loading functionality from AppKit +from PyObjCTools import NibClassBuilder, AppHelper + +from twisted.internet import _threadedselect +_threadedselect.install() + +from twisted.internet import reactor, protocol +from twisted.web import http +from twisted.python import log +import sys, urlparse + +# create ObjC classes as defined in MainMenu.nib +NibClassBuilder.extractClasses("MainMenu") +class TwistzillaClient(http.HTTPClient): + def __init__(self, delegate, urls): + self.urls = urls + self.delegate = delegate + + def connectionMade(self): + self.sendCommand('GET', str(self.urls[2])) + self.sendHeader('Host', '%s:%d' % (self.urls[0], self.urls[1])) + self.sendHeader('User-Agent', 'CocoaTwistzilla') + self.endHeaders() + + def handleResponse(self, data): + self.delegate.gotResponse_(data) + +class MyAppDelegate(NibClassBuilder.AutoBaseClass): + def gotResponse_(self, html): + s = self.resultTextField.textStorage() + s.replaceCharactersInRange_withString_((0, s.length()), html) + self.progressIndicator.stopAnimation_(self) + + def doTwistzillaFetch_(self, sender): + s = self.resultTextField.textStorage() + s.deleteCharactersInRange_((0, s.length())) + self.progressIndicator.startAnimation_(self) + u = urlparse.urlparse(self.messageTextField.stringValue()) + pos = u[1].find(':') + if pos == -1: + host, port = u[1], 80 + else: + host, port = u[1][:pos], int(u[1][pos+1:]) + if u[2] == '': + fname = '/' + else: + fname = u[2] + host = host.encode('utf8') + fname = fname.encode('utf8') + protocol.ClientCreator(reactor, TwistzillaClient, self, (host, port, fname)).connectTCP(host, port).addErrback(lambda f:self.gotResponse_(f.getBriefTraceback())) + + def applicationDidFinishLaunching_(self, aNotification): + """ + Invoked by NSApplication once the app is done launching and + immediately before the first pass through the main event + loop. + """ + self.messageTextField.setStringValue_("http://www.twistedmatrix.com/") + reactor.interleave(AppHelper.callAfter) + + def applicationShouldTerminate_(self, sender): + if reactor.running: + reactor.addSystemEventTrigger( + 'after', 'shutdown', AppHelper.stopEventLoop) + reactor.stop() + return False + return True + +if __name__ == '__main__': + log.startLogging(sys.stdout) + AppHelper.runEventLoop() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/setup.py b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/setup.py new file mode 100644 index 000000000000..f3afe8a39727 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/setup.py @@ -0,0 +1,14 @@ +""" +Script for building the example. + +Usage: + python setup.py py2app +""" + +from distutils.core import setup +import py2app + +setup( + app = ['Twistzilla.py'], + data_files = ["English.lproj"], +) diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/README b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/README new file mode 100644 index 000000000000..5d3feabe30a9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/README @@ -0,0 +1,15 @@ +The examples in this directory import a private module from the +twisted.internet package. The _threadedselect module provides an object +which is similar to a Twisted reactor in many ways, but which is not +actually intended to be used in the same way as a Twisted reactor (it has a +method named interleave which is intended to be the main entrypoint). This +functionality should be considered highly experimental and the API subject +to change at any time. + +Possibly the best way to make use of this functionality is to use it to +implement an object which actually presents the Twisted reactor interface +(specifically, an object with a run method). That object can then be used +by application-code in the usual way. + +Another course of action is to avoid _threadedselect entirely until the +issues surrounding it have been resolved. diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/blockingdemo.py b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/blockingdemo.py new file mode 100644 index 000000000000..2d46bafbee23 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/blockingdemo.py @@ -0,0 +1,92 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.internet import _threadedselect +_threadedselect.install() + +from twisted.internet.defer import Deferred +from twisted.python.failure import Failure +from twisted.internet import reactor +from twisted.python.runtime import seconds +from itertools import count +from Queue import Queue, Empty + +class TwistedManager(object): + def __init__(self): + self.twistedQueue = Queue() + self.key = count() + self.results = {} + + def getKey(self): + # get a unique identifier + return self.key.next() + + def start(self): + # start the reactor + reactor.interleave(self.twistedQueue.put) + + def _stopIterating(self, value, key): + self.results[key] = value + + def stop(self): + # stop the reactor + key = self.getKey() + reactor.addSystemEventTrigger('after', 'shutdown', + self._stopIterating, True, key) + reactor.stop() + self.iterate(key) + + def getDeferred(self, d): + # get the result of a deferred or raise if it failed + key = self.getKey() + d.addBoth(self._stopIterating, key) + res = self.iterate(key) + if isinstance(res, Failure): + res.raiseException() + return res + + def poll(self, noLongerThan=1.0): + # poll the reactor for up to noLongerThan seconds + base = seconds() + try: + while (seconds() - base) <= noLongerThan: + callback = self.twistedQueue.get_nowait() + callback() + except Empty: + pass + + def iterate(self, key=None): + # iterate the reactor until it has the result we're looking for + while key not in self.results: + callback = self.twistedQueue.get() + callback() + return self.results.pop(key) + +def fakeDeferred(msg): + d = Deferred() + def cb(): + print "deferred called back" + d.callback(msg) + reactor.callLater(2, cb) + return d + +def fakeCallback(): + print "twisted is still running" + +def main(): + m = TwistedManager() + print "starting" + m.start() + print "setting up a 1sec callback" + reactor.callLater(1, fakeCallback) + print "getting a deferred" + res = m.getDeferred(fakeDeferred("got it!")) + print "got the deferred:", res + print "stopping" + m.stop() + print "stopped" + + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/pygamedemo.py b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/pygamedemo.py new file mode 100644 index 000000000000..a2bec3388f03 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/threadedselect/pygamedemo.py @@ -0,0 +1,78 @@ +from __future__ import generators + +# import Twisted and install +from twisted.internet import _threadedselect +_threadedselect.install() +from twisted.internet import reactor + +import os + +import pygame +from pygame.locals import * + +try: + import pygame.fastevent as eventmodule +except ImportError: + import pygame.event as eventmodule + + +# You can customize this if you use your +# own events, but you must OBEY: +# +# USEREVENT <= TWISTEDEVENT < NUMEVENTS +# +TWISTEDEVENT = USEREVENT + +def postTwistedEvent(func): + # if not using pygame.fastevent, this can explode if the queue + # fills up.. so that's bad. Use pygame.fastevent, in pygame CVS + # as of 2005-04-18. + eventmodule.post(eventmodule.Event(TWISTEDEVENT, iterateTwisted=func)) + +def helloWorld(): + print "hello, world" + reactor.callLater(1, helloWorld) +reactor.callLater(1, helloWorld) + +def twoSecondsPassed(): + print "two seconds passed" +reactor.callLater(2, twoSecondsPassed) + +def eventIterator(): + while True: + yield eventmodule.wait() + while True: + event = eventmodule.poll() + if event.type == NOEVENT: + break + else: + yield event + +def main(): + pygame.init() + if hasattr(eventmodule, 'init'): + eventmodule.init() + screen = pygame.display.set_mode((300, 300)) + + # send an event when twisted wants attention + reactor.interleave(postTwistedEvent) + # make shouldQuit a True value when it's safe to quit + # by appending a value to it. This ensures that + # Twisted gets to shut down properly. + shouldQuit = [] + reactor.addSystemEventTrigger('after', 'shutdown', shouldQuit.append, True) + + for event in eventIterator(): + if event.type == TWISTEDEVENT: + event.iterateTwisted() + if shouldQuit: + break + elif event.type == QUIT: + reactor.stop() + elif event.type == KEYDOWN and event.key == K_ESCAPE: + reactor.stop() + + pygame.quit() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/twistd-logging.tac b/vendor/Twisted-10.0.0/doc/core/examples/twistd-logging.tac new file mode 100644 index 000000000000..2302558a229d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/twistd-logging.tac @@ -0,0 +1,33 @@ +# Invoke this script with: + +# $ twistd -ny twistd-logging.tac + +# It will create a log file named "twistd-logging.log". The log file will +# be formatted such that each line contains the representation of the dict +# structure of each log message. + +from twisted.application.service import Application +from twisted.python.log import ILogObserver, msg +from twisted.python.util import untilConcludes +from twisted.internet.task import LoopingCall + + +logfile = open("twistd-logging.log", "a") + + +def log(eventDict): + # untilConcludes is necessary to retry the operation when the system call + # has been interrupted. + untilConcludes(logfile.write, "Got a log! %r\n" % eventDict) + untilConcludes(logfile.flush) + + +def logSomething(): + msg("A log message") + + +LoopingCall(logSomething).start(1) + +application = Application("twistd-logging") +application.setComponent(ILogObserver, log) + diff --git a/vendor/Twisted-10.0.0/doc/core/examples/wxacceptance.py b/vendor/Twisted-10.0.0/doc/core/examples/wxacceptance.py new file mode 100644 index 000000000000..8f73f823da1b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/wxacceptance.py @@ -0,0 +1,113 @@ +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Acceptance tests for wxreactor. + +Please test on Linux, Win32 and OS X: +1. Startup event is called at startup. +2. Scheduled event is called after 2 seconds. +3. Shutdown takes 3 seconds, both when quiting from menu and when closing + window (e.g. Alt-F4 in metacity). This tests reactor.stop() and + wxApp.ExitEventLoop(). +4. 'hello, world' continues to be printed even when modal dialog is open + (use dialog menu item), when menus are held down, when window is being + dragged. +""" + +import sys, time + +try: + from wx import Frame as wxFrame, DefaultPosition as wxDefaultPosition, \ + Size as wxSize, Menu as wxMenu, MenuBar as wxMenuBar, \ + EVT_MENU, MessageDialog as wxMessageDialog, App as wxApp +except ImportError, e: + from wxPython.wx import * + +from twisted.python import log +from twisted.internet import wxreactor +wxreactor.install() +from twisted.internet import reactor, defer + + +# set up so that "hello, world" is printed continously +dc = None +def helloWorld(): + global dc + print "hello, world", time.time() + dc = reactor.callLater(0.1, helloWorld) +dc = reactor.callLater(0.1, helloWorld) + +def twoSecondsPassed(): + print "two seconds passed" + +def printer(s): + print s + +def shutdown(): + print "shutting down in 3 seconds" + if dc.active(): + dc.cancel() + reactor.callLater(1, printer, "2...") + reactor.callLater(2, printer, "1...") + reactor.callLater(3, printer, "0...") + d = defer.Deferred() + reactor.callLater(3, d.callback, 1) + return d + +def startup(): + print "Start up event!" + +reactor.callLater(2, twoSecondsPassed) +reactor.addSystemEventTrigger("after", "startup", startup) +reactor.addSystemEventTrigger("before", "shutdown", shutdown) + + +ID_EXIT = 101 +ID_DIALOG = 102 + +class MyFrame(wxFrame): + def __init__(self, parent, ID, title): + wxFrame.__init__(self, parent, ID, title, wxDefaultPosition, wxSize(300, 200)) + menu = wxMenu() + menu.Append(ID_DIALOG, "D&ialog", "Show dialog") + menu.Append(ID_EXIT, "E&xit", "Terminate the program") + menuBar = wxMenuBar() + menuBar.Append(menu, "&File") + self.SetMenuBar(menuBar) + EVT_MENU(self, ID_EXIT, self.DoExit) + EVT_MENU(self, ID_DIALOG, self.DoDialog) + # you really ought to do this instead of reactor.stop() in + # DoExit, but for the sake of testing we'll let closing the + # window shutdown wx without reactor.stop(), to make sure that + # still does the right thing. + #EVT_CLOSE(self, lambda evt: reactor.stop()) + + def DoDialog(self, event): + dl = wxMessageDialog(self, "Check terminal to see if messages are still being " + "printed by Twisted.") + dl.ShowModal() + dl.Destroy() + + def DoExit(self, event): + reactor.stop() + + +class MyApp(wxApp): + + def OnInit(self): + frame = MyFrame(None, -1, "Hello, world") + frame.Show(True) + self.SetTopWindow(frame) + return True + + +def demo(): + log.startLogging(sys.stdout) + app = MyApp(0) + reactor.registerWxApp(app) + reactor.run() + + +if __name__ == '__main__': + demo() diff --git a/vendor/Twisted-10.0.0/doc/core/examples/wxdemo.py b/vendor/Twisted-10.0.0/doc/core/examples/wxdemo.py new file mode 100644 index 000000000000..3db2c0fd56bd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/examples/wxdemo.py @@ -0,0 +1,64 @@ +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + +"""Demo of wxPython integration with Twisted.""" + +import sys + +from wx import Frame, DefaultPosition, Size, Menu, MenuBar, App +from wx import EVT_MENU, EVT_CLOSE + +from twisted.python import log +from twisted.internet import wxreactor +wxreactor.install() + +# import t.i.reactor only after installing wxreactor: +from twisted.internet import reactor + + +ID_EXIT = 101 + +class MyFrame(Frame): + def __init__(self, parent, ID, title): + Frame.__init__(self, parent, ID, title, DefaultPosition, Size(300, 200)) + menu = Menu() + menu.Append(ID_EXIT, "E&xit", "Terminate the program") + menuBar = MenuBar() + menuBar.Append(menu, "&File") + self.SetMenuBar(menuBar) + EVT_MENU(self, ID_EXIT, self.DoExit) + + # make sure reactor.stop() is used to stop event loop: + EVT_CLOSE(self, lambda evt: reactor.stop()) + + def DoExit(self, event): + reactor.stop() + + +class MyApp(App): + + def twoSecondsPassed(self): + print "two seconds passed" + + def OnInit(self): + frame = MyFrame(None, -1, "Hello, world") + frame.Show(True) + self.SetTopWindow(frame) + # look, we can use twisted calls! + reactor.callLater(2, self.twoSecondsPassed) + return True + + +def demo(): + log.startLogging(sys.stdout) + + # register the App instance with Twisted: + app = MyApp(0) + reactor.registerWxApp(app) + + # start the event loop: + reactor.run() + + +if __name__ == '__main__': + demo() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/application.html b/vendor/Twisted-10.0.0/doc/core/howto/application.html new file mode 100644 index 000000000000..c1e0ff51be0b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/application.html @@ -0,0 +1,376 @@ + + +Twisted Documentation: Using the Twisted Application Framework + + + + +

            Using the Twisted Application Framework

            + +
            + + + +

            Introduction

            + +

            Audience

            + +

            The target audience of this document is a Twisted user who wants to deploy a +significant amount of Twisted code in a re-usable, standard and easily +configurable fashion. A Twisted user who wishes to use the Application +framework needs to be familiar with developing Twisted servers and/or clients.

            + +

            Goals

            + +
              +
            • To introduce the Twisted Application infrastructure.
            • + +
            • To explain how to deploy your Twisted application using .tac + files and twistd
            • + +
            • To outline the existing Twisted services.
            • +
            + +

            Overview

            + +

            The Twisted Application infrastructure takes care of running and stopping +your application. Using this infrastructure frees you from from having to +write a large amount of boilerplate code by hooking your application into +existing tools that manage daemonization, logging, choosing a reactor and more.

            + +

            The major tool that manages Twisted applications is a command-line utility +called twistd. twistd is cross platform, and is the +recommended tool for running Twisted applications.

            + + +

            The core component of the Twisted Application infrastructure is the twisted.application.service.Application object — an +object which represents your application. However, Application doesn't provide +anything that you'd want to manipulate directly. Instead, Application acts as +a container of any Services (objects implementing IService) that your application +provides. Most of your interaction with the Application infrastructure will be +done through Services.

            + +

            By Service, we mean anything in your application that can be started +and stopped. Typical services include web servers, FTP servers and SSH +clients. Your Application object can contain many services, and can even +contain structured hierarchies of Services using IServiceCollections.

            + +

            Here's a simple example of constructing an Application object which +represents an echo server that runs on TCP port 7001.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

            from twisted.application import internet, service +from somemodule import EchoFactory + +port = 7001 +factory = EchoFactory() + +# this is the important bit +application = service.Application("echo") # create the Application +echoService = internet.TCPServer(port, factory) # create the service +# add the service to the application +echoService.setServiceParent(application) +
            + +

            See Writing Servers for an explanation of +EchoFactory.

            + +

            This example creates a simple hierarchy: +

            +   application
            +   |
            +   `- echoService
            +
            More complicated hierarchies of services can be created using +IServiceCollection. You will most likely want to do this to manage Services +which are dependent on other Services. For example, a proxying Twisted +application might want its server Service to only start up after the associated +Client service.

            + + +

            Using application

            + +

            twistd and tac

            + +

            To handle start-up and configuration of your Twisted application, the +Twisted Application infrastructure uses .tac files. +.tac are Python files which configure an Application object and assign this +object to the top-level variable application.

            + +

            The following is a simple example of a .tac file:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +

            # You can run this .tac file directly with: +# twistd -ny service.tac + +""" +This is an example .tac file which starts a webserver on port 8080 and +serves files from the current working directory. + +The important part of this, the part that makes it a .tac file, is +the final root-level section, which sets up the object called 'application' +which twistd will look for +""" + +import os +from twisted.application import service, internet +from twisted.web import static, server + +def getWebService(): + """ + Return a service suitable for creating an application object. + + This service is a simple web server that serves files on port 8080 from + underneath the current working directory. + """ + # create a resource to serve static files + fileServer = server.Site(static.File(os.getcwd())) + return internet.TCPServer(8080, fileServer) + +# this is the core part of any tac file, the creation of the root-level +# application object +application = service.Application("Demo application") + +# attach the service to its parent application +service = getWebService() +service.setServiceParent(application) +
            + +

            twistd is a program that runs Twisted applications using a +.tac file. In its most simple form, it takes a single argument +-y and a tac file name. For example, you can run the above server +with the command twistd -y service.tac.

            + +

            By default, twistd daemonizes and logs to a file called +twistd.log. More usually, when debugging, you will want your +application to run in the foreground and log to the command line. To run the +above file like this, use the command twistd -noy +service.tac

            + +

            For more information, see the twistd man page.

            + +

            Customizing twistd logging in a .tac application

            + +

            +The logging behavior can be customized through an API +accessible from .tac files. The ILogObserver component can be +set on an Application in order to customize the default log observer that +twistd will use. +

            + +

            +Here is an example of how to use DailyLogFile, which rotates the log once +per day. +

            + +

            1 +2 +3 +4 +5 +6 +7 +

            from twisted.application.service import Application +from twisted.python.log import ILogObserver, FileLogObserver +from twisted.python.logfile import DailyLogFile + +application = Application("myapp") +logfile = DailyLogFile("my.log", "/tmp") +application.setComponent(ILogObserver, FileLogObserver(logfile).emit) +
            + +

            +invoking twistd -y my.tac will create a log file +at/tmp/my.log. +

            + +

            Services provided by Twisted

            + +

            Twisted provides several services that you want to know about.

            + +

            Each of these services (except TimerService) has a corresponding +connect or listen method on the reactor, and the constructors for +the services take the same arguments as the reactor methods. The +connect methods are for clients and the listen methods are for +servers. For example, TCPServer corresponds to reactor.listenTCP and TCPClient +corresponds to reactor.connectTCP.

            + +
            +
            TCPServer +
            + +
            TCPClient +
            + +
            + Services which allow you to make connections and listen for connections + on TCP ports. + +
            + +
            UNIXServer
            + +
            UNIXClient
            + +
            + Services which listen and make connections over UNIX sockets. + +
            + +
            SSLServer
            + +
            SSLClient
            + +
            Services which allow you to make SSL connections and run SSL servers. + +
            + +
            UDPServer
            + +
            UDPClient
            + +
            Services which allow you to send and receive data over UDP + + +

            See also the UDP documentation.

            +
            + +
            UNIXDatagramServer
            + +
            UNIXDatagramClient
            + +
            Services which send and receive data over UNIX datagram sockets. + +
            + +
            MulticastServer
            + +
            + A server for UDP socket methods that support multicast. + +
            + +
            TimerService
            + +
            + A service to periodically call a function. +
            + +
            + +

            Service Collection

            + +

            IServiceCollection objects contain +IService objects. +IService objects can be added to IServiceCollection by calling setServiceParent and detached +by using disownServiceParent.

            + +

            The standard implementation of IServiceCollection is MultiService, which also implements +IService. MultiService is useful for creating a new Service which combines two +or more existing Services. For example, you could create a DNS Service as a +MultiService which has a TCP and a UDP Service as children.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +

            from twisted.application import internet, service +from twisted.names import server, dns, hosts + +port = 53 + +# Create a MultiService, and hook up a TCPServer and a UDPServer to it as +# children. +dnsService = service.MultiService() +hostsResolver = hosts.Resolver('/etc/hosts') +tcpFactory = server.DNSServerFactory([hostsResolver]) +internet.TCPServer(port, tcpFactory).setServiceParent(dnsService) +udpFactory = dns.DNSDatagramProtocol(tcpFactory) +internet.UDPServer(port, udpFactory).setServiceParent(dnsService) + +# Create an application as normal +application = service.Application("DNSExample") + +# Connect our MultiService to the application, just like a normal service. +dnsService.setServiceParent(application) +
            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/basics.html b/vendor/Twisted-10.0.0/doc/core/howto/basics.html new file mode 100644 index 000000000000..cfaf899dc953 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/basics.html @@ -0,0 +1,99 @@ + + +Twisted Documentation: The Basics + + + + +

            The Basics

            + +
            + + +

            Application

            + +

            Twisted programs usually work with +twisted.application.service.Application. +This class usually holds all persistent configuration of +a running server -- ports to bind to, places where connections +to must be kept or attempted, periodic actions to do and almost +everything else. It is the root object in a tree of services implementing +IService.

            + +

            Other HOWTOs describe how to write custom code for Applications, +but this one describes how to use already written code (which can be +part of Twisted or from a third-party Twisted plugin developer). The +Twisted distribution comes with an important tool to deal with +Applications, twistd.

            + +

            Applications are just Python objects, which can +be created and manipulated in the same ways as any other object. +

            + +

            twistd

            + +

            The Twisted Daemon is a program that knows how to run Applications. +This program +is twistd(1). Strictly +speaking, twistd is not necessary -- +fetching the application, getting the IService component, +calling startService, scheduling stopService when +the reactor shuts down, and then calling reactor.run() could be +done manually. twistd(1), however, supplies +many options which are highly useful for program set up.

            + +

            twistd supports choosing a reactor (for more on +reactors, see Choosing a Reactor), logging +to a logfile, daemonizing and more. twistd supports all +Applications mentioned above -- and an additional one. Sometimes +it is convenient to write the code for building a class in straight +Python. One big source of such Python files is the doc/examples +directory. When a straight Python file which defines an Application +object called application is used, use the -y +option.

            + +

            When twistd runs, it records its process id in a +twistd.pid file (this can be configured via a command line +switch). In order to shutdown the twistd process, kill that +pid (usually you would do kill `cat twistd.pid`). +

            + +

            As always, the gory details are in the manual page.

            + +

            tap2deb

            + +

            +For Twisted-based server application developers who want to deploy on +Debian, Twisted supplies the tap2deb program. This program +wraps a Twisted Application file (of any of the supported formats -- Python, +source, xml or pickle) +in a Debian package, including correct installation and removal scripts +and init.d scripts. This frees the installer from manually +stopping or starting the service, and will make sure it goes properly up +on startup and down on shutdown and that it obeys the init levels. +

            + +

            +For the more savvy Debian users, the +tap2deb also generates the source package, allowing her +to modify and polish things which automated software cannot detect +(such as dependencies or relationships to virtual packages). In addition, +the Twisted team itself intends to produce Debian packages for some common +services, such as web servers and an inetd replacement. Those packages +will enjoy the best of all worlds -- both the consistency which comes +from being based on the tap2deb and the delicate manual +tweaking of a Debian maintainer, insuring perfect integration with +Debian. +

            + +

            tap2rpm

            + +

            tap2rpm is similar to tap2deb, except that +it generates RPMs for Redhat and other related platforms.

            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/book.tex b/vendor/Twisted-10.0.0/doc/core/howto/book.tex new file mode 100644 index 000000000000..716ab97ab351 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/book.tex @@ -0,0 +1,116 @@ +\documentclass[oneside]{book} +\usepackage[dvips]{graphicx} +\usepackage{times,mathptmx} +\usepackage{ifthen} +\usepackage{hyperref} + +\usepackage{geometry} +\geometry{verbose,letterpaper,tmargin=1in,bmargin=0.5in,lmargin=1in,rmargin=1in} + +\setlength{\oddsidemargin}{0in} +\setlength{\textwidth}{\paperwidth} +\addtolength{\textwidth}{-2in} + +\newcommand{\loreref}[1]{% + \ifthenelse{\value{page}=\pageref{#1}}% + { (this page)}% + { (page \pageref{#1})}% +} + + +\title{The Twisted Documentation} +\author{The Twisted Development Team} + +\tolerance=1000 +\sloppy + +\begin{document} +\maketitle +\tableofcontents + +\chapter{Introduction} + +\input{vision.tex} +\input{overview.tex} +\input{internet-overview.tex} + +\chapter{Tutorial} + +\input{servers.tex} +\input{clients.tex} +\input{quotes.tex} +\input{design.tex} +\input{tutorial/index.tex} +\input{tutorial/intro.tex} +\input{tutorial/protocol.tex} +\input{tutorial/style.tex} +\input{tutorial/components.tex} +\input{tutorial/backends.tex} +\input{tutorial/web.tex} +\input{tutorial/pb.tex} +\input{tutorial/factory.tex} +\input{tutorial/client.tex} +\input{tutorial/library.tex} +\input{tutorial/configuration.tex} + +\chapter{Low-Level Twisted } + +\input{reactor-basics.tex} +\input{udp.tex} +\input{process.tex} +\input{defer.tex} +\input{gendefer.tex} +\input{deferredindepth.tex} +\input{time.tex} +\input{threading.tex} +\input{choosing-reactor.tex} + +\chapter{High-Level Twisted} + +\input{basics.tex} +\input{plugin.tex} +\input{tap.tex} +\input{components.tex} +\input{cred.tex} +\input{application.tex} + +\chapter{Utilities} + +\input{options.tex} +\input{logging.tex} +\input{dirdbm.tex} +\input{telnet.tex} +\input{testing.tex} + +\chapter{Twisted RDBMS support} + +\input{rdbms.tex} +\input{row.tex} + +\chapter{Perspective Broker} +\input{pb.tex} +\input{pb-intro.tex} +\input{pb-usage.tex} +\input{pb-copyable.tex} +\input{pb-cred.tex} + +\chapter{Manual Pages} + +\input{../man/manhole-man.tex} +\clearpage +\input{../man/tap2deb-man.tex} +\clearpage +\input{../man/tap2rpm-man.tex} +\clearpage +\input{../man/tapconvert-man.tex} +\clearpage +\input{../man/trial-man.tex} +\clearpage +\input{../man/twistd-man.tex} + +\chapter{Appendix} + +\input{glossary.tex} +\input{../specifications/banana.tex} + +\end{document} diff --git a/vendor/Twisted-10.0.0/doc/core/howto/choosing-reactor.html b/vendor/Twisted-10.0.0/doc/core/howto/choosing-reactor.html new file mode 100644 index 000000000000..d8920e32b6ef --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/choosing-reactor.html @@ -0,0 +1,355 @@ + + +Twisted Documentation: Choosing a Reactor and GUI Toolkit Integration + + + + +

            Choosing a Reactor and GUI Toolkit Integration

            + +
            + + +

            Overview

            + +

            Twisted provides a variety of implementations of the twisted.internet.reactor. The specialized + implementations are suited for different purposes and are + designed to integrate better with particular platforms.

            + +

            The general purpose reactor implementations are:

            + + + +

            Platform-specific reactor implementations exist for:

            + + + +

            The remaining custom reactor implementations provide support + for integrating with the native event loops of various graphical + toolkits. This lets your Twisted application use all of the + usual Twisted APIs while still being a graphical application.

            + +

            Twisted currently integrates with the following graphical + toolkits:

            + + + +

            When using applications that runnable using twistd, e.g. + TAPs or plugins, there is no need to choose a reactor explicitly, since + this can be chosen using twistd's -r option.

            + +

            In all cases, the event loop is started by calling reactor.run(). In all cases, the event loop + should be stopped with reactor.stop().

            + +

            IMPORTANT: installing a reactor should be the first thing + done in the app, since any code that does + from twisted.internet import reactor will automatically + install the default reactor if the code hasn't already installed one.

            + +

            Reactor Functionality

            + + + + + + + + + + + + +
            StatusTCPSSLUDPThreadingProcessesSchedulingPlatforms
            select()StableYYYYYYUnix, Win32
            poll()StableYYYYYYUnix
            Win32 (WFMO)ExperimentalYYYYYYWin32
            Win32 (IOCP)ExperimentalYYNNNYWin32
            CoreFoundationUnmaintainedYYYYYYOS X
            epollStableYYYYYYLinux 2.6
            GtkStableYYYYYYUnix, Win32
            wxExperimentalYYYYYYUnix, Win32
            kqueueExperimentalYYYYYYFreeBSD
            + +

            General Purpose Reactors

            + +

            Select()-based Reactor

            + +

            The select reactor is currently the default reactor on all + platforms. The following code will install it, if no other reactor has + been installed:

            + +

            1 +

            from twisted.internet import reactor +
            + +

            In the future, if another reactor becomes the default, but the + select reactor is desired, it may be installed via:

            + +

            1 +2 +3 +4 +

            from twisted.internet import selectreactor +selectreactor.install() + +from twisted.internet import reactor +
            + +

            Poll()-based Reactor

            + +

            The PollReactor will work on any platform that provides poll() (while OS X provides poll(), it is not recommended to use the + PollReactor on OS X due to bugs in its implementation of the call). + With larger numbers of connected sockets, it may provide for better + performance than the SelectReactor.

            + +

            1 +2 +3 +4 +

            from twisted.internet import pollreactor +pollreactor.install() + +from twisted.internet import reactor +
            + +

            Platform-Specific Reactors

            + +

            KQueue

            + +

            The KQueue Reactor allows Twisted to use FreeBSD's kqueue mechanism for + event scheduling. See instructions in the twisted.internet.kqreactor's + docstring for installation notes.

            + +

            1 +2 +3 +4 +

            from twisted.internet import kqreactor +kqreactor.install() + +from twisted.internet import reactor +
            + + +

            Win32 (WFMO)

            + +

            The Win32 reactor is not yet complete and has various limitations + and issues that need to be addressed. The reactor supports GUI integration + with the win32gui module, so it can be used for native Win32 GUI applications. +

            + +

            1 +2 +3 +4 +

            from twisted.internet import win32eventreactor +win32eventreactor.install() + +from twisted.internet import reactor +
            + +

            Win32 (IOCP)

            + +

            + Windows provides a fast, scalable event notification system known as IO + Completion Ports, or IOCP for short. Twisted includes a reactor based + on IOCP which is nearly complete. The reactor has a handful of known + bugs and lacks SSL support. +

            + +

            1 +2 +3 +4 +

            from twisted.internet import iocpreactor +iocpreactor.install() + +from twisted.internet import reactor +
            + +

            Epoll-based Reactor

            + +

            The EPollReactor will work on any platform that provides + epoll, today only Linux 2.6 and over. The + implementation of the epoll reactor currently uses the Level Triggered + interface, which is basically like poll() but scales much better.

            + +

            1 +2 +3 +4 +

            from twisted.internet import epollreactor +epollreactor.install() + +from twisted.internet import reactor +
            + +

            GUI Integration Reactors

            + +

            GTK+

            + +

            Twisted integrates with PyGTK, versions 1.2 (gtkreactor) and 2.0 + (gtk2reactor). Sample applications using GTK+ and + Twisted are available in the Twisted SVN.

            + +

            GTK-2.0 split the event loop out of the GUI toolkit, into a separate + module called glib. To run an application using the glib event + loop, use the glib2reactor. This will be slightly faster + than gtk2reactor (and does not require a working X display), + but cannot be used to run GUI applications.

            + +

            1 +2 +3 +4 +

            from twisted.internet import gtkreactor # for gtk-1.2 +gtkreactor.install() + +from twisted.internet import reactor +
            + +

            1 +2 +3 +4 +

            from twisted.internet import gtk2reactor # for gtk-2.0 +gtk2reactor.install() + +from twisted.internet import reactor +
            + +

            1 +2 +3 +4 +

            from twisted.internet import glib2reactor # for non-GUI apps +glib2reactor.install() + +from twisted.internet import reactor +
            + +

            CoreFoundation

            + +

            Twisted integrates with PyObjC, version 1.0. Sample applications using Cocoa and Twisted + are available in the examples directory under + threadedselect/Cocoa.

            + +

            1 +2 +3 +4 +

            from twisted.internet import cfreactor +cfreactor.install() + +from twisted.internet import reactor +
            + +

            Non-Reactor GUI Integration

            + +

            Tkinter

            + +

            The support for Tkinter doesn't use a specialized reactor. Instead, there is + some specialized support code:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

            from Tkinter import * +from twisted.internet import tksupport + +root = Tk() + +# Install the Reactor support +tksupport.install(root) + +# at this point build Tk app as usual using the root object, +# and start the program with "reactor.run()", and stop it +# with "reactor.stop()". +
            + +

            wxPython

            + +

            Twisted currently supports two methods of integrating + wxPython. Unfortunately, neither method will work on all wxPython + platforms (such as GTK2 or Windows). It seems that the only + portable way to integrate with wxPython is to run it in a separate + thread. One of these methods may be sufficient if your wx app is + limited to a single platform.

            + +

            As with Tkinter, the support for integrating + Twisted with a wxPython + application uses specialized support code rather than a simple reactor.

            + +

            1 +2 +3 +4 +5 +

            from wxPython.wx import * +from twisted.internet import wxsupport, reactor + +myWxAppInstance = wxApp(0) +wxsupport.install(myWxAppInstance) +
            + +

            However, this has issues when running on Windows, so Twisted now + comes with alternative wxPython support using a reactor. Using + this method is probably better. Initialization is done in two + stages. In the first, the reactor is installed:

            + +

            1 +2 +

            from twisted.internet import wxreactor +wxreactor.install() +
            + +

            Later, once a wxApp instance has + been created, but before reactor.run() + is called:

            + +

            1 +2 +

            myWxAppInstance = wxApp(0) +reactor.registerWxApp(myWxAppInstance) +
            + +

            An example Twisted application that uses WxWindows can be found + in doc/examples/wxdemo.py.

            + +

            PyUI

            + +

            As with Tkinter, the support for integrating + Twisted with a PyUI + application uses specialized support code rather than a simple reactor.

            + +

            1 +2 +3 +

            from twisted.internet import pyuisupport, reactor + +pyuisupport.install(args=(640, 480), kw={'renderer': 'gl'}) +
            + +

            An example Twisted application that uses PyUI can be found in doc/examples/pyuidemo.py.

            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/clients.html b/vendor/Twisted-10.0.0/doc/core/howto/clients.html new file mode 100644 index 000000000000..191908f45b2e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/clients.html @@ -0,0 +1,635 @@ + + +Twisted Documentation: Writing Clients + + + + +

            Writing Clients

            + +
            + + +

            Overview

            + +

            Twisted is a framework designed to be very flexible, and let you write + powerful clients. The cost of this flexibility is a few layers in the way + to writing your client. This document covers creating clients that can be + used for TCP, SSL and Unix sockets, UDP is covered in + a different document.

            + +

            At the base, the place where you actually implement the protocol parsing + and handling, is the Protocol class. This class will usually be decended + from twisted.internet.protocol.Protocol. Most + protocol handlers inherit either from this class or from one of its + convenience children. An instance of the protocol class will be + instantiated when you connect to the server, and will go away when the + connection is finished. This means that persistent configuration is not + saved in the Protocol.

            + +

            The persistent configuration is kept in a Factory class, which usually + inherits from twisted.internet.protocol.ClientFactory. The default + factory class just instantiate the Protocol, and then sets on it an + attribute called factory which points to itself. This let + the Protocol access, and possibly modify, the persistent + configuration.

            + +

            Protocol

            + +

            As mentioned above, this, and auxiliary classes and functions, is where + most of the code is. A Twisted protocol handles data in an asynchronous + manner. What this means is that the protocol never waits for an event, but + rather responds to events as they arrive from the network.

            + +

            Here is a simple example:

            + +

            1 +2 +3 +4 +5 +6 +

            from twisted.internet.protocol import Protocol +from sys import stdout + +class Echo(Protocol): + def dataReceived(self, data): + stdout.write(data) +
            + +

            This is one of the simplest protocols. It simply writes to standard + output whatever it reads from the connection. There are many events it + does not respond to. Here is an example of a Protocol responding to + another event.

            + +

            1 +2 +3 +4 +5 +6 +

            from twisted.internet.protocol import Protocol + +class WelcomeMessage(Protocol): + def connectionMade(self): + self.transport.write("Hello server, I am the client!\r\n") + self.transport.loseConnection() +
            + +

            This protocol connects to the server, sends it a welcome message, and + then terminates the connection.

            + +

            The connectionMade event is usually where set up of the Protocol + object happens, as well as any initial greetings (as in the + WelcomeMessage protocol above). Any tearing down of Protocol-specific + objects is done in connectionLost.

            + +

            Simple, single-use clients

            + +

            In many cases, the protocol only needs to connect to the server once, + and the code just wants to get a connected instance of the protocol. In + those cases twisted.internet.protocol.ClientCreator provides the + appropriate API.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +

            from twisted.internet import reactor +from twisted.internet.protocol import Protocol, ClientCreator + +class Greeter(Protocol): + def sendMessage(self, msg): + self.transport.write("MESSAGE %s\n" % msg) + +def gotProtocol(p): + p.sendMessage("Hello") + reactor.callLater(1, p.sendMessage, "This is sent in a second") + reactor.callLater(2, p.transport.loseConnection) + +c = ClientCreator(reactor, Greeter) +c.connectTCP("localhost", 1234).addCallback(gotProtocol) +
            + +

            ClientFactory

            + +

            We use reactor.connect* and a ClientFactory. The ClientFactory is in + charge of creating the Protocol, and also receives events relating to the + connection state. This allows it to do things like reconnect on the event + of a connection error. Here is an example of a simple ClientFactory that + uses the Echo protocol (above) and also prints what state the connection + is in.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +

            from twisted.internet.protocol import Protocol, ClientFactory +from sys import stdout + +class Echo(Protocol): + def dataReceived(self, data): + stdout.write(data) + +class EchoClientFactory(ClientFactory): + def startedConnecting(self, connector): + print 'Started to connect.' + + def buildProtocol(self, addr): + print 'Connected.' + return Echo() + + def clientConnectionLost(self, connector, reason): + print 'Lost connection. Reason:', reason + + def clientConnectionFailed(self, connector, reason): + print 'Connection failed. Reason:', reason +
            + +

            To connect this EchoClientFactory to a server, you could use this + code:

            + +

            1 +2 +3 +

            from twisted.internet import reactor +reactor.connectTCP(host, port, EchoClientFactory()) +reactor.run() +
            + +

            Note that clientConnectionFailed + is called when a connection could not be established, and that clientConnectionLost + is called when a connection was made and then disconnected.

            + +

            Reconnection

            + +

            Many times, the connection of a client will be lost unintentionally due + to network errors. One way to reconnect after a disconnection would be to + call connector.connect() when the + connection is lost: +

            + +

            1 +2 +3 +4 +5 +

            from twisted.internet.protocol import ClientFactory + +class EchoClientFactory(ClientFactory): + def clientConnectionLost(self, connector, reason): + connector.connect() +
            + +

            The connector passed as the first argument is the interface between a + connection and a protocol. When the connection fails and the factory + receives the clientConnectionLost event, the factory can call connector.connect() to start the connection over + again from scratch.

            + +

            + However, most programs that want this functionality should implement ReconnectingClientFactory instead, + which tries to reconnect if a connection is lost or fails, and which + exponentially delays repeated reconnect attempts. +

            + +

            + Here is the Echo protocol implemented with a ReconnectingClientFactory: +

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +

            from twisted.internet.protocol import Protocol, ReconnectingClientFactory +from sys import stdout + +class Echo(Protocol): + def dataReceived(self, data): + stdout.write(data) + +class EchoClientFactory(ReconnectingClientFactory): + def startedConnecting(self, connector): + print 'Started to connect.' + + def buildProtocol(self, addr): + print 'Connected.' + print 'Resetting reconnection delay' + self.resetDelay() + return Echo() + + def clientConnectionLost(self, connector, reason): + print 'Lost connection. Reason:', reason + ReconnectingClientFactory.clientConnectionLost(self, connector, reason) + + def clientConnectionFailed(self, connector, reason): + print 'Connection failed. Reason:', reason + ReconnectingClientFactory.clientConnectionFailed(self, connector, + reason) +
            + +

            A Higher-Level Example: ircLogBot

            + +

            Overview of ircLogBot

            + +

            The clients so far have been fairly simple. A more complicated + example comes with Twisted Words in the doc/examples directory.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +

            # twisted imports +from twisted.words.protocols import irc +from twisted.internet import reactor, protocol +from twisted.python import log + +# system imports +import time, sys + + +class MessageLogger: + """ + An independent logger class (because separation of application + and protocol logic is a good thing). + """ + def __init__(self, file): + self.file = file + + def log(self, message): + """Write a message to the file.""" + timestamp = time.strftime("[%H:%M:%S]", time.localtime(time.time())) + self.file.write('%s %s\n' % (timestamp, message)) + self.file.flush() + + def close(self): + self.file.close() + + +class LogBot(irc.IRCClient): + """A logging IRC bot.""" + + nickname = "twistedbot" + + def connectionMade(self): + irc.IRCClient.connectionMade(self) + self.logger = MessageLogger(open(self.factory.filename, "a")) + self.logger.log("[connected at %s]" % + time.asctime(time.localtime(time.time()))) + + def connectionLost(self, reason): + irc.IRCClient.connectionLost(self, reason) + self.logger.log("[disconnected at %s]" % + time.asctime(time.localtime(time.time()))) + self.logger.close() + + + # callbacks for events + + def signedOn(self): + """Called when bot has succesfully signed on to server.""" + self.join(self.factory.channel) + + def joined(self, channel): + """This will get called when the bot joins the channel.""" + self.logger.log("[I have joined %s]" % channel) + + def privmsg(self, user, channel, msg): + """This will get called when the bot receives a message.""" + user = user.split('!', 1)[0] + self.logger.log("<%s> %s" % (user, msg)) + + # Check to see if they're sending me a private message + if channel == self.nickname: + msg = "It isn't nice to whisper! Play nice with the group." + self.msg(user, msg) + return + + # Otherwise check to see if it is a message directed at me + if msg.startswith(self.nickname + ":"): + msg = "%s: I am a log bot" % user + self.msg(channel, msg) + self.logger.log("<%s> %s" % (self.nickname, msg)) + + def action(self, user, channel, msg): + """This will get called when the bot sees someone do an action.""" + user = user.split('!', 1)[0] + self.logger.log("* %s %s" % (user, msg)) + + # irc callbacks + + def irc_NICK(self, prefix, params): + """Called when an IRC user changes their nickname.""" + old_nick = prefix.split('!')[0] + new_nick = params[0] + self.logger.log("%s is now known as %s" % (old_nick, new_nick)) + + + # For fun, override the method that determines how a nickname is changed on + # collisions. The default method appends an underscore. + def alterCollidedNick(self, nickname): + """ + Generate an altered version of a nickname that caused a collision in an + effort to create an unused related name for subsequent registration. + """ + return nickname + '^' + + + +class LogBotFactory(protocol.ClientFactory): + """A factory for LogBots. + + A new protocol instance will be created each time we connect to the server. + """ + + # the class of the protocol to build when new connection is made + protocol = LogBot + + def __init__(self, channel, filename): + self.channel = channel + self.filename = filename + + def clientConnectionLost(self, connector, reason): + """If we get disconnected, reconnect to server.""" + connector.connect() + + def clientConnectionFailed(self, connector, reason): + print "connection failed:", reason + reactor.stop() + + +if __name__ == '__main__': + # initialize logging + log.startLogging(sys.stdout) + + # create factory protocol and application + f = LogBotFactory(sys.argv[1], sys.argv[2]) + + # connect factory to this host and port + reactor.connectTCP("irc.freenode.net", 6667, f) + + # run bot + reactor.run() +
            + +

            ircLogBot.py connects to an IRC server, joins a channel, and logs all + traffic on it to a file. It demonstrates some of the connection-level + logic of reconnecting on a lost connection, as well as storing persistent + data in the Factory.

            + +

            Persistent Data in the Factory

            + +

            Since the Protocol instance is recreated each time the connection is + made, the client needs some way to keep track of data that should be + persisted. In the case of the logging bot, it needs to know which channel + it is logging, and where to log it to.

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

            from twisted.internet import protocol +from twisted.protocols import irc + +class LogBot(irc.IRCClient): + + def connectionMade(self): + irc.IRCClient.connectionMade(self) + self.logger = MessageLogger(open(self.factory.filename, "a")) + self.logger.log("[connected at %s]" % + time.asctime(time.localtime(time.time()))) + + def signedOn(self): + self.join(self.factory.channel) + + +class LogBotFactory(protocol.ClientFactory): + + protocol = LogBot + + def __init__(self, channel, filename): + self.channel = channel + self.filename = filename +
            + +

            When the protocol is created, it gets a reference to the factory as + self.factory. It can then access attributes of the factory in its logic. + In the case of LogBot, it opens the file and connects to the channel + stored in the factory.

            + +

            Further Reading

            + +

            The Protocol + class used throughout this document is a base implementation of IProtocol used in + most Twisted applications for convenience. To learn about the + completeIProtocol interface, see the API documentation for + IProtocol.

            + +

            The transport attribute used in some examples in this + document provides the + ITCPTransport interface. To learn about the complete interface, see + the API documentation for ITCPTransport.

            + +

            Interface classes are a way of specifying what methods and attributes an + object has and how they behave. See the + Components: Interfaces and Adapters document.

            +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/components.html b/vendor/Twisted-10.0.0/doc/core/howto/components.html new file mode 100644 index 000000000000..1feb5a7b7e06 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/components.html @@ -0,0 +1,600 @@ + + +Twisted Documentation: Components: Interfaces and Adapters + + + + +

            Components: Interfaces and Adapters

            + +
            + + +

            Object oriented programming languages allow programmers to reuse portions of +existing code by creating new classes of objects which subclass another +class. When a class subclasses another, it is said to inherit all of its +behaviour. The subclass can then override and extend the behavior +provided to it by the superclass. Inheritance is very useful in many situations, +but because it is so convenient to use, often becomes abused in large software +systems, especially when multiple inheritance is involved. One solution is to +use delegation instead of inheritance where appropriate. +Delegation is simply the act of asking another object to perform a task +for an object. To support this design pattern, which is often referred to as the +components pattern because it involves many small interacting components, +interfaces and adapters were created by the Zope 3 team.

            + +

            Interfaces are simply markers which objects can use to say I +implement this interface. Other objects may then make requests like +Please give me an object which implements interface X for object type Y. +Objects which implement an interface for another object type are called +adapters.

            + +

            The superclass-subclass relationship is said to be an is-a relationship. +When designing object hierarchies, object modellers use subclassing when they +can say that the subclass is the same class as the superclass. For +example:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +

            class Shape: + sideLength = 0 + def getSideLength(self): + return self.sideLength + + def setSideLength(self, sideLength): + self.sideLength = sideLength + + def area(self): + raise NotImplementedError, "Subclasses must implement area" + +class Triangle(Shape): + def area(self): + return (self.sideLength * self.sideLength) / 2 + +class Square(Shape): + def area(self): + return self.sideLength * self.sideLength +
            + +

            In the above example, a Triangle is-a Shape, so it subclasses Shape, +and a Square is-a Shape, so it also subclasses Shape.

            + +

            However, subclassing can get complicated, especially when Multiple +Inheritance enters the picture. Multiple Inheritance allows a class to inherit +from more than one base class. Software which relies heavily on inheritance +often ends up having both very wide and very deep inheritance trees, meaning +that one class inherits from many superclasses spread throughout the system. +Since subclassing with Multiple Inheritance means implementation +inheritance, locating a method's actual implementation and ensuring the +correct method is actually being invoked becomes a challenge. For example:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

            class Area: + sideLength = 0 + def getSideLength(self): + return self.sideLength + + def setSideLength(self, sideLength): + self.sideLength = sideLength + + def area(self): + raise NotImplementedError, "Subclasses must implement area" + +class Color: + color = None + def setColor(self, color): + self.color = color + + def getColor(self): + return self.color + +class Square(Area, Color): + def area(self): + return self.sideLength * self.sideLength +
            + +

            The reason programmers like using implementation inheritance is because it +makes code easier to read since the implementation details of Area are in a +separate place than the implementation details of Color. This is nice, because +conceivably an object could have a color but not an area, or an area but not a +color. The problem, though, is that Square is not really an Area or a Color, but +has an area and color. Thus, we should really be using another object oriented +technique called composition, which relies on delegation rather than +inheritance to break code into small reusable chunks. Let us continue with the +Multiple Inheritance example, though, because it is often used in practice.

            + +

            What if both the Color and the Area base class defined the same method, +perhaps calculate? Where would the implementation come from? The +implementation that is located for Square().calculate() depends on +the method resolution order, or MRO, and can change when programmers change +seemingly unrelated things by refactoring classes in other parts of the system, +causing obscure bugs. Our first thought might be to change the calculate method +name to avoid name clashes, to perhaps calculateArea and +calculateColor. While explicit, this change could potentially +require a large number of changes throughout a system, and is error-prone, +especially when attempting to integrate two systems which you didn't write.

            + +

            Let's imagine another example. We have an electric appliance, say a hair +dryer. The hair dryer is american voltage. We have two electric sockets, one of +them an american 110 Volt socket, and one of them a foreign 220 Volt socket. If +we plug the hair dryer into the 220 Volt socket, it is going to expect 110 Volt +current and errors will result. Going back and changing the hair dryer to +support both plug110Volt and plug220Volt methods would +be tedious, and what if we decided we needed to plug the hair dryer into yet +another type of socket? For example:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +

            class HairDryer: + def plug(self, socket): + if socket.voltage() == 110: + print "I was plugged in properly and am operating." + else: + print "I was plugged in improperly and " + print "now you have no hair dryer any more." + +class AmericanSocket: + def voltage(self): + return 110 + +class ForeignSocket: + def voltage(self): + return 220 +
            + +

            Given these classes, the following operations can be performed:

            + +
            +>>> hd = HairDryer()
            +>>> am = AmericanSocket()
            +>>> hd.plug(am)
            +I was plugged in properly and am operating.
            +>>> fs = ForeignSocket()
            +>>> hd.plug(fs)
            +I was plugged in improperly and 
            +now you have no hair dryer any more.
            +
            + +

            We are going to attempt to solve this problem by writing an Adapter for the +ForeignSocket which converts the voltage for use with an American +hair dryer. An Adapter is a class which is constructed with one and only one +argument, the adaptee or original object. In this example, we +will show all code involved for clarity:

            + +

            1 +2 +3 +4 +5 +6 +

            class AdaptToAmericanSocket: + def __init__(self, original): + self.original = original + + def voltage(self): + return self.original.voltage() / 2 +
            + +

            Now, we can use it as so:

            + +
            +>>> hd = HairDryer()
            +>>> fs = ForeignSocket()
            +>>> adapted = AdaptToAmericanSocket(fs)
            +>>> hd.plug(adapted)
            +I was plugged in properly and am operating.
            +
            + +

            So, as you can see, an adapter can 'override' the original implementation. It +can also 'extend' the interface of the original object by providing methods the +original object did not have. Note that an Adapter must explicitly delegate any +method calls it does not wish to modify to the original, otherwise the Adapter +cannot be used in places where the original is expected. Usually this is not a +problem, as an Adapter is created to conform an object to a particular interface +and then discarded.

            + +

            Interfaces and Components in Twisted code

            + +

            Adapters are a useful way of using multiple classes to factor code into +discrete chunks. However, they are not very interesting without some more +infrastructure. If each piece of code which wished to use an adapted object had +to explicitly construct the adapter itself, the coupling between components +would be too tight. We would like to achieve loose coupling, and this is +where twisted.python.components comes in.

            + +

            First, we need to discuss Interfaces in more detail. As we mentioned +earlier, an Interface is nothing more than a class which is used as a marker. +Interfaces should be subclasses of zope.interface.Interface, and +have a very odd look to python programmers not used to them:

            + +

            1 +2 +3 +4 +5 +6 +

            from zope.interface import Interface + +class IAmericanSocket(Interface): + def voltage(): + """Return the voltage produced by this socket object, as an integer. + """ +
            + +

            Notice how it looks just like a regular class definition, other than +inheriting from Interface? However, the method definitions inside +the class block do not have any method body! Since Python does not have any +native language-level support for Interfaces like Java does, this is what +distinguishes an Interface definition from a Class.

            + +

            Now that we have a defined Interface, we can talk about objects using terms +like this: The AmericanSocket class implements the +IAmericanSocket interface and Please give me an object which +adapts ForeignSocket to the IAmericanSocket +interface. We can make declarations about what interfaces a certain +class implements, and we can request adapters which implement a certain +interface for a specific class.

            + +

            Let's look at how we declare that a class implements an interface:

            + +

            1 +2 +3 +4 +5 +6 +7 +8 +

            from zope.interface import implements + +class AmericanSocket: + + implements(IAmericanSocket) + + def voltage(self): + return 110 +
            + +

            So, to declare that a class implements an interface, we simply call +zope.interface.implements at the class level.

            + +

            Now, let's say we want to rewrite the AdaptToAmericanSocket +class as a real adapter. In this case we also specify it as implementing +IAmericanSocket:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +

            from zope.interface import implements + +class AdaptToAmericanSocket: + + implements(IAmericanSocket) + + def __init__(self, original): + """ + Pass the original ForeignSocket object as original + """ + self.original = original + + def voltage(self): + return self.original.voltage() / 2 +
            + +

            Notice how we placed the implements declaration on this adapter class. So +far, we have not achieved anything by using components other than requiring us +to type more. In order for components to be useful, we must use the +component registry. Since AdaptToAmericanSocket implements +IAmericanSocket and regulates the voltage of a +ForeignSocket object, we can register +AdaptToAmericanSocket as an IAmericanSocket adapter +for the ForeignSocket class. It is easier to see how this is +done in code than to describe it:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

            from zope.interface import Interface, implements +from twisted.python import components + +class IAmericanSocket(Interface): + def voltage(): + """Return the voltage produced by this socket object, as an integer. + """ + +class AmericanSocket: + implements(IAmericanSocket) + + def voltage(self): + return 110 + +class ForeignSocket: + def voltage(self): + return 220 + +class AdaptToAmericanSocket: + + implements(IAmericanSocket) + + def __init__(self, original): + self.original = original + + def voltage(self): + return self.original.voltage() / 2 + +components.registerAdapter( + AdaptToAmericanSocket, + ForeignSocket, + IAmericanSocket) +
            + +

            Now, if we run this script in the interactive interpreter, we can discover a +little more about how to use components. The first thing we can do is discover +whether an object implements an interface or not:

            + +
            +>>> IAmericanSocket.implementedBy(AmericanSocket)
            +True
            +>>> IAmericanSocket.implementedBy(ForeignSocket)
            +False
            +>>> am = AmericanSocket() 
            +>>> fs = ForeignSocket()
            +>>> IAmericanSocket.providedBy(am)
            +True
            +>>> IAmericanSocket.providedBy(fs)
            +False
            +
            + +

            As you can see, the AmericanSocket instance claims to +implement IAmericanSocket, but the ForeignSocket +does not. If we wanted to use the HairDryer with the +AmericanSocket, we could know that it would be safe to do so by +checking whether it implements IAmericanSocket. However, if we +decide we want to use HairDryer with a ForeignSocket +instance, we must adapt it to IAmericanSocket before +doing so. We use the interface object to do this:

            + +
            +>>> IAmericanSocket(fs)
            +<__main__.AdaptToAmericanSocket instance at 0x1a5120>
            +
            + +

            When calling an interface with an object as an argument, the interface +looks in the adapter registry for an adapter which implements the interface for +the given instance's class. If it finds one, it constructs an instance of the +Adapter class, passing the constructor the original instance, and returns it. +Now the HairDryer can safely be used with the adapted +ForeignSocket. But what happens if we attempt to adapt an object +which already implements IAmericanSocket? We simply get back the +original instance:

            + +
            +>>> IAmericanSocket(am)
            +<__main__.AmericanSocket instance at 0x36bff0>
            +
            + +

            So, we could write a new smart HairDryer which +automatically looked up an adapter for the socket you tried to plug it into:

            + +

            1 +2 +3 +4 +5 +

            class HairDryer: + def plug(self, socket): + adapted = IAmericanSocket(socket) + assert adapted.voltage() == 110, "BOOM" + print "I was plugged in properly and am operating" +
            + +

            Now, if we create an instance of our new smart HairDryer +and attempt to plug it in to various sockets, the HairDryer will +adapt itself automatically depending on the type of socket it is plugged in +to:

            + +
            +>>> am = AmericanSocket()
            +>>> fs = ForeignSocket()
            +>>> hd = HairDryer()
            +>>> hd.plug(am)
            +I was plugged in properly and am operating
            +>>> hd.plug(fs)
            +I was plugged in properly and am operating
            +
            + +

            Voila; the magic of components.

            + +

            Components and Inheritance

            + +

            If you inherit from a class which implements some interface, and your new +subclass declares that it implements another interface, the implements will be +inherited by default.

            + +

            For example, pb.Root is a class +which implements IPBRoot. This interface indicates that an +object has remotely-invokable methods and can be used as the initial object +served by a new Broker instance. It has an implements setting +like:

            + +

            1 +2 +3 +4 +

            from zope.interface import implements + +class Root(Referenceable): + implements(IPBRoot) +
            + +

            Suppose you have your own class which implements your +IMyInterface interface:

            + +

            1 +2 +3 +4 +5 +6 +7 +

            from zope.interface import implements, Interface + +class IMyInterface(Interface): + pass + +class MyThing: + implements(IMyInterface) +
            + +

            Now if you want to make this class inherit from pb.Root, +the interfaces code will automatically determine that it also implements +IPBRoot:

            + +

            1 +2 +3 +4 +5 +6 +7 +8 +

            from twisted.spread import pb +from zope.interface import implements, Interface + +class IMyInterface(Interface): + pass + +class MyThing(pb.Root): + implements(IMyInterface) +
            + +
            +>>> from twisted.spread.flavors import IPBRoot
            +>>> IPBRoot.implementedBy(MyThing)
            +True
            +
            + +

            If you want MyThing to inherit from pb.Root but +not implement IPBRoot like pb.Root does, +use implementOnly:

            + +

            1 +2 +3 +4 +5 +6 +7 +8 +

            from twisted.spread import pb +from zope.interface import implementsOnly, Interface + +class IMyInterface(Interface): + pass + +class MyThing(pb.Root): + implementsOnly(IMyInterface) +
            + +
            +>>> from twisted.spread.pb import IPBRoot
            +>>> IPBRoot.implementedBy(MyThing)
            +False
            +
            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/cred.html b/vendor/Twisted-10.0.0/doc/core/howto/cred.html new file mode 100644 index 000000000000..7b420fa382eb --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/cred.html @@ -0,0 +1,566 @@ + + +Twisted Documentation: Cred: Pluggable Authentication + + + + +

            Cred: Pluggable Authentication

            + +
            + + +

            Goals

            + +

            Cred is a pluggable authentication system for servers. It allows any +number of network protocols to connect and authenticate to a system, and +communicate to those aspects of the system which are meaningful to the specific +protocol. For example, Twisted's POP3 support passes a username and +password set of credentials to get back a mailbox for the specified email +account. IMAP does the same, but retrieves a slightly different view of the +same mailbox, enabling those features specific to IMAP which are not available +in other mail protocols.

            + +

            Cred is designed to allow both the backend implementation of the business +logic - called the avatar - and the authentication database - called +the credential checker - to be decided during deployment. For example, +the same POP3 server should be able to authenticate against the local UNIX +password database or an LDAP server without having to know anything about how +or where mail is stored.

            + +

            To sketch out how this works - a Realm corresponds to an application +domain and is in charge of avatars, which are network-accessible business logic +objects. To connect this to an authentication database, a top-level object +called a Portal stores a +realm, and a number of credential checkers. Something that wishes to log in, +such as a Protocol, +stores a reference to the portal. Login consists of passing credentials and a +request interface (e.g. POP3's IMailbox) to the portal. The portal passes +the credentials to the appropriate credential checker, which returns an avatar +ID. The ID is passed to the realm, which returns the appropriate avatar. For a +Portal that has a realm that creates mailbox objects and a credential checker +that checks /etc/passwd, login consists of passing in a username/password and +the IMailbox interface to the portal. The portal passes this to the /etc/passwd +credential checker, gets back a avatar ID corresponding to an email account, +passes that to the realm and gets back a mailbox object for that email +account.

            + +

            Putting all this together, here's how a login request will typically be +processed:

            + + + +

            Cred objects

            +

            The Portal

            +

            This is the the core of login, the point of integration between all the objects +in the cred system. There is one +concrete implementation of Portal, and no interface - it does a very +simple task. A Portal +associates one (1) Realm with a collection of +CredentialChecker instances. (More on those later.)

            + +

            If you are writing a protocol that needs to authenticate against +something, you will need a reference to a Portal, and to nothing else. +This has only 2 methods -

            + +
              +
            • login(credentials, mind, *interfaces) + +

              The docstring is quite expansive (see twisted.cred.portal), but in +brief, this is what you call when you need to call in order to connect +a user to the system. Typically you only pass in one interface, and the mind is +None. The interfaces are the possible interfaces the returned +avatar is expected to implement, in order of preference. +The result is a deferred which fires a tuple of:

              +
                +
              • interface the avatar implements (which was one of the interfaces passed in the *interfaces +tuple)
              • +
              • an object that implements that interface (an avatar)
              • +
              • logout, a 0-argument callable which disconnects the connection that was +established by this call to login
              • +
              +

              The logout method has to be called when the avatar is logged out. For POP3 this means +when the protocol is disconnected or logged out, etc..

              +
            • +
            • registerChecker(checker, *credentialInterfaces) + +

              which adds a CredentialChecker to the portal. The optional list of interfaces are interfaces of credentials +that the checker is able to check.

              +
            + +

            The CredentialChecker

            + +

            This is an object implementing ICredentialsChecker which resolves some +credentials to an avatar ID. + +Whether the credentials are stored in an in-memory data structure, an +Apache-style htaccess file, a UNIX password database, an SSH key database, +or any other form, an implementation of ICredentialsChecker is +how this data is connected to cred. + +A credential checker +stipulates some requirements of the credentials it can check by +specifying a credentialInterfaces attribute, which is a list of +interfaces. Credentials passed to its requestAvatarId method must +implement one of those interfaces.

            + +

            For the most part, these things will just check usernames and passwords +and produce the username as the result, but hopefully we will be seeing +some public-key, challenge-response, and certificate based credential +checker mechanisms soon.

            + +

            A credential checker should raise an error if it cannot authenticate +the user, and return twisted.cred.checkers.ANONYMOUS +for anonymous access.

            + +

            The Credentials

            +

            Oddly enough, this represents some credentials that the user presents. +Usually this will just be a small static blob of data, but in some +cases it will actually be an object connected to a network protocol. +For example, a username/password pair is static, but a +challenge/response server is an active state-machine that will require +several method calls in order to determine a result.

            + +

            Twisted comes with a number of credentials interfaces and implementations +in the twisted.cred.credentials module, +such as IUsernamePassword +and IUsernameHashedPassword.

            + +

            The Realm

            +

            A realm is an interface which connects your universe of business +objects to the authentication system.

            + +

            IRealm is another one-method interface:

            + +
              +
            • requestAvatar(avatarId, mind, *interfaces) + +

              This method will typically be called from 'Portal.login'. The avatarId +is the one returned by a CredentialChecker.

              + +
              Note: Note that avatarId must always be a string. In +particular, do not use unicode strings. If internationalized support is needed, +it is recommended to use UTF-8, and take care of decoding in the realm.
              + +

              The important thing to realize about this method is that if it is being +called, the user has already authenticated. Therefore, if possible, +the Realm should create a new user if one does not already exist +whenever possible. Of course, sometimes this will be impossible +without more information, and that is the case that the interfaces +argument is for.

              +
            • +
            + +

            Since requestAvatar should be called from a Deferred callback, it may +return a Deferred or a synchronous result.

            + +

            The Avatar

            + +

            An avatar is a business logic object for a specific user. For POP3, it's +a mailbox, for a first-person-shooter it's the object that interacts with +the game, the actor as it were. Avatars are specific to an application, +and each avatar represents a single user.

            + +

            The Mind

            + +

            As mentioned before, the mind is usually None, so you can skip this +bit if you want.

            + +

            Masters of Perspective Broker already know this object as the ill-named +client object. There is no mind class, or even interface, but it +is an object which serves an important role - any notifications which are to be +relayed to an authenticated client are passed through a 'mind'. In addition, it +allows passing more information to the realm during login in addition to the +avatar ID.

            + +

            The name may seem rather unusual, but considering that a Mind is +representative of the entity on the other end of a network connection +that is both receiving updates and issuing commands, I believe it is +appropriate.

            + +

            Although many protocols will not use this, it serves an important role. + It is provided as an argument both to the Portal and to the Realm, +although a CredentialChecker should interact with a client program +exclusively through a Credentials instance.

            + +

            Unlike the original Perspective Broker client object, a Mind's +implementation is most often dictated by the protocol that is +connecting rather than the Realm. A Realm which requires a particular +interface to issue notifications will need to wrap the Protocol's mind +implementation with an adapter in order to get one that conforms to its +expected interface - however, Perspective Broker will likely continue +to use the model where the client object has a pre-specified remote +interface.

            + +

            (If you don't quite understand this, it's fine. It's hard to explain, +and it's not used in simple usages of cred, so feel free to pass None +until you find yourself requiring something like this.)

            + +

            Responsibilities

            + +

            Server protocol implementation

            + +

            The protocol implementor should define the interface the avatar should implement, +and design the protocol to have a portal attached. When a user logs in using the +protocol, a credential object is created, passed to the portal, and an avatar +with the appropriate interface is requested. When the user logs out or the protocol +is disconnected, the avatar should be logged out.

            + +

            The protocol designer should not hardcode how users are authenticated or the +realm implemented. For example, a POP3 protocol implementation would require a portal whose +realm returns avatars implementing IMailbox and whose credential checker accepts +username/password credentials, but that is all. Here's a sketch of how the code +might look - note that USER and PASS are the protocol commands used to login, and +the DELE command can only be used after you are logged in:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +

            from zope.interface import Interface + +from twisted.protocols import basic +from twisted.python import log +from twisted.cred import credentials, error +from twisted.internet import defer + +class IMailbox(Interface): + """Interface specification for mailbox.""" + def deleteMessage(index): pass + + +class POP3(basic.LineReceiver): + # ... + def __init__(self, portal): + self.portal = portal + + def do_DELE(self, i): + # uses self.mbox, which is set after login + i = int(i)-1 + self.mbox.deleteMessage(i) + self.successResponse() + + def do_USER(self, user): + self._userIs = user + self.successResponse('USER accepted, send PASS') + + def do_PASS(self, password): + if self._userIs is None: + self.failResponse("USER required before PASS") + return + user = self._userIs + self._userIs = None + d = defer.maybeDeferred(self.authenticateUserPASS, user, password) + d.addCallback(self._cbMailbox, user) + + def authenticateUserPASS(self, user, password): + if self.portal is not None: + return self.portal.login( + cred.credentials.UsernamePassword(user, password), + None, + IMailbox + ) + raise error.UnauthorizedLogin() + + def _cbMailbox(self, ial, user): + interface, avatar, logout = ial + + if interface is not IMailbox: + self.failResponse('Authentication failed') + log.err("_cbMailbox() called with an interface other than IMailbox") + return + + self.mbox = avatar + self._onLogout = logout + self.successResponse('Authentication succeeded') + log.msg("Authenticated login for " + user) +
            + +

            Application implementation

            + +

            The application developer can implement realms and credential checkers. For example, +she might implement a realm that returns IMailbox implementing avatars, using MySQL +for storage, or perhaps a credential checker that uses LDAP for authentication. +In the following example, the Realm for a simple remote object service (using +Twisted's Perspective Broker protocol) is implemented:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

            from twisted.spread import pb +from twisted.cred.portal import IRealm + +class SimplePerspective(pb.Avatar): + + def perspective_echo(self, text): + print 'echoing',text + return text + + def logout(self): + print self, "logged out" + + +class SimpleRealm: + implements(IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective in interfaces: + avatar = SimplePerspective() + return pb.IPerspective, avatar, avatar.logout + else: + raise NotImplementedError("no interface") +
            + +

            Deployment

            + +

            Deployment involves tying together a protocol, an appropriate realm and a credential +checker. For example, a POP3 server can be constructed by attaching to it a portal +that wraps the MySQL-based realm and an /etc/passwd credential checker, or perhaps +the LDAP credential checker if that is more useful. The following example shows +how the SimpleRealm in the previous example is deployed using an in-memory credential checker:

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

            from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred.portal import Portal +from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse + +portal = Portal(SimpleRealm()) +checker = InMemoryUsernamePasswordDatabaseDontUse() +checker.addUser("guest", "password") +portal.registerChecker(checker) +reactor.listenTCP(9986, pb.PBServerFactory(portal)) +reactor.run() +
            + +

            Cred plugins

            + +

            Authentication with cred plugins

            + +

            Cred offers a plugin architecture for authentication methods. The +primary API for this architecture is the command-line; the plugins are +meant to be specified by the end-user when deploying a TAP (twistd +plugin).

            + +

            For more information on writing a twistd plugin and using cred +plugins for your application, please refer to the Writing a twistd plugin document.

            + +

            Building a cred plugin

            + +

            To build a plugin for cred, you should first define an authType, a short one-word string that defines +your plugin to the command-line. Once you have this, the convention is +to create a file named myapp_plugins.py in the +twisted.plugins module path.

            + +

            Below is an example file structure for an application that defines +such a plugin:

            + +
              +
            • MyApplication/ +
                +
              • setup.py
              • +
              • myapp/ +
                  +
                • __init__.py
                • +
                • cred.py
                • +
                • server.py
                • +
                +
              • +
              • twisted/ +
                  +
                • plugins/ +
                    +
                  • myapp_plugins.py
                  • +
                  +
                • +
                +
              • +
              +
            • +
            + +

            +Once you have created this structure within your application, you can +create the code for your cred plugin by building a factory class which +implements ICheckerFactory. +These factory classes should not consist of a tremendous amount of +code. Most of the real application logic should reside in the cred +checker itself. (For help on building those, scroll up.) +

            + +

            +The core purpose of the CheckerFactory is to translate an argstring, which is passed on the command line, +into a suitable set of initialization parameters for a Checker +class. In most cases this should be little more than constructing a +dictionary or a tuple of arguments, then passing them along to a new +checker instance. +

            + +

            1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

            from zope.interface import implements + +from twisted import plugin +from twisted.cred import checkers +from myapp.cred import SpecialChecker + +class SpecialCheckerFactory(object): + """ + A checker factory for a specialized (fictional) API. + """ + # The class needs to implement both of these interfaces + # for the plugin system to find our factory. + implements(checkers.ICheckerFactory, plugin.IPlugin) + + # This tells AuthOptionsMixin how to find this factory. + authType = "special" + + # This is a one-line explanation of what arguments, if any, + # your particular cred plugin requires at the command-line. + argStringFormat = "A colon-separated key=value list." + + # This help text can be multiple lines. It will be displayed + # when someone uses the "--help-auth-type special" command. + authHelp = """Some help text goes here ...""" + + # This will be called once per command-line. + def generateChecker(self, argstring=""): + argdict = dict((x.split('=') for x in argstring.split(':'))) + return SpecialChecker(**dict) + +# We need to instantiate our class for the plugin to work. +theSpecialCheckerFactory = SpecialCheckerFactory() +
            + +

            For more information on how your plugin can be used in your +application (and by other application developers), please see the Writing a twistd plugin document.

            + +

            Conclusion

            + +

            After reading through this tutorial, you should be able to +

            +
              +
            • Understand how the cred architecture applies to your application
            • +
            • Integrate your application with cred's object model
            • +
            • Deploy an application that uses cred for authentication
            • +
            • Allow your users to use command-line authentication plugins
            • +
            + +
            + +

            Index

            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/debug-with-emacs.html b/vendor/Twisted-10.0.0/doc/core/howto/debug-with-emacs.html new file mode 100644 index 000000000000..49c2b90b7e9f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/debug-with-emacs.html @@ -0,0 +1,65 @@ + + +Twisted Documentation: Debugging Python(Twisted) with Emacs + + + + +

            Debugging Python(Twisted) with Emacs

            +
              +
              + + + +1 + +
                +
              • Open up your project files. sometimes emacs can't find them if you + don't have them open before-hand.
              • + +
              • Make sure you have a program called pdb somewhere + in your PATH, with the following contents: + +
                #!/bin/sh
                +exec python2.3 /usr/lib/python2.3/pdb.py $1 $2 $3 $4 $5 $6 $7 $8 $9
                +  
              • + +
              • Run M-x pdb in emacs. If you usually run your + program as python foo.py, your command line should be pdb + foo.py, for twistd and trial just + add -b to the command line, e.g.: twistd -b -y my.tac
              • + +
              • while pdb waits for your input, go to a place in your code and hit + C-x SPC to insert a break-point. pdb should say something happy. + Do this in as many points as you wish.
              • + +
              • Go to your pdb buffer and hit c; this runs as normal until a + break-point is found.
              • + +
              • once you get to a breakpoint, use s to step, n to run the + current line without stepping through the functions it calls, w + to print out the current stack, u and d to go up and down a + level in the stack, p foo to print result of expression foo.
              • + +
              • recommendations for effective debugging: +
                  +
                • use p self a lot; just knowing the class where the current code + is isn't enough most of the time.
                • +
                • use w to get your bearings, it'll re-display the current-line/arrow
                • +
                • after you use w, use u and d and lots more p self on the + different stack-levels.
                • +
                • If you've got a big code-path that you need to grok, keep another + buffer open and list the code-path there (e.g., I had a + nasty-evil Deferred recursion, and this helped me tons)
                • +
                +
              • +
              + + +

              Footnotes

              1. POKEY THE PENGUIN IS COPYRIGHT © 1998-2002 +THE AUTHORS
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/defer.html b/vendor/Twisted-10.0.0/doc/core/howto/defer.html new file mode 100644 index 000000000000..f1a76d8fee9b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/defer.html @@ -0,0 +1,840 @@ + + +Twisted Documentation: Deferred Reference + + + + +

              Deferred Reference

              + +
              + + + +

              This document is a guide to the behaviour of the twisted.internet.defer.Deferred object, and to various +ways you can use them when they are returned by functions.

              + +

              This document assumes that you are familiar with the basic principle that +the Twisted framework is structured around: asynchronous, callback-based +programming, where instead of having blocking code in your program or using +threads to run blocking code, you have functions that return immediately and +then begin a callback chain when data is available.

              + +

              +After reading this document, the reader should expect to be able to +deal with most simple APIs in Twisted and Twisted-using code that +return Deferreds. +

              + +
                +
              • what sorts of things you can do when you get a Deferred from a +function call; and
              • +
              • how you can write your code to robustly handle errors in Deferred +code.
              • +
              + + +

              Deferreds

              + +

              Twisted uses the Deferred object to manage the callback +sequence. The client application attaches a series of functions to the +deferred to be called in order when the results of the asychronous request are +available (this series of functions is known as a series of +callbacks, or a callback chain), together +with a series of functions to be called if there is an error in the +asychronous request (known as a series of errbacks or an +errback chain). The asychronous library code calls the first +callback when the result is available, or the first errback when an error +occurs, and the Deferred object then hands the results of each +callback or errback function to the next function in the chain.

              + +

              Callbacks

              + +

              A twisted.internet.defer.Deferred is a promise that +a function will at some point have a result. We can attach callback functions +to a Deferred, and once it gets a result these callbacks will be called. In +addition Deferreds allow the developer to register a callback for an error, +with the default behavior of logging the error. The deferred mechanism +standardizes the application programmer's interface with all sorts of +blocking or delayed operations.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +

              from twisted.internet import reactor, defer + +def getDummyData(x): + """ + This function is a dummy which simulates a delayed result and + returns a Deferred which will fire with that result. Don't try too + hard to understand this. + """ + d = defer.Deferred() + # simulate a delayed result by asking the reactor to fire the + # Deferred in 2 seconds time with the result x * 3 + reactor.callLater(2, d.callback, x * 3) + return d + +def printData(d): + """ + Data handling function to be added as a callback: handles the + data by printing the result + """ + print d + +d = getDummyData(3) +d.addCallback(printData) + +# manually set up the end of the process by asking the reactor to +# stop itself in 4 seconds time +reactor.callLater(4, reactor.stop) +# start up the Twisted reactor (event loop handler) manually +reactor.run() +
              + +

              Multiple callbacks

              + +

              Multiple callbacks can be added to a Deferred. The first callback in the +Deferred's callback chain will be called with the result, the second with the +result of the first callback, and so on. Why do we need this? Well, consider +a Deferred returned by twisted.enterprise.adbapi - the result of a SQL query. +A web widget might add a callback that converts this result into HTML, and +pass the Deferred onwards, where the callback will be used by twisted to +return the result to the HTTP client. The callback chain will be bypassed in +case of errors or exceptions.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +

              from twisted.internet import reactor, defer + +class Getter: + def gotResults(self, x): + """ + The Deferred mechanism provides a mechanism to signal error + conditions. In this case, odd numbers are bad. + + This function demonstrates a more complex way of starting + the callback chain by checking for expected results and + choosing whether to fire the callback or errback chain + """ + if x % 2 == 0: + self.d.callback(x*3) + else: + self.d.errback(ValueError("You used an odd number!")) + + def _toHTML(self, r): + """ + This function converts r to HTML. + + It is added to the callback chain by getDummyData in + order to demonstrate how a callback passes its own result + to the next callback + """ + return "Result: %s" % r + + def getDummyData(self, x): + """ + The Deferred mechanism allows for chained callbacks. + In this example, the output of gotResults is first + passed through _toHTML on its way to printData. + + Again this function is a dummy, simulating a delayed result + using callLater, rather than using a real asynchronous + setup. + """ + self.d = defer.Deferred() + # simulate a delayed result by asking the reactor to schedule + # gotResults in 2 seconds time + reactor.callLater(2, self.gotResults, x) + self.d.addCallback(self._toHTML) + return self.d + +def printData(d): + print d + +def printError(failure): + import sys + sys.stderr.write(str(failure)) + +# this series of callbacks and errbacks will print an error message +g = Getter() +d = g.getDummyData(3) +d.addCallback(printData) +d.addErrback(printError) + +# this series of callbacks and errbacks will print "Result: 12" +g = Getter() +d = g.getDummyData(4) +d.addCallback(printData) +d.addErrback(printError) + +reactor.callLater(4, reactor.stop); reactor.run() +
              + +

              Visual Explanation

              + +
              + +
              + +
                +
              1. Requesting method (data sink) requests data, gets + Deferred object.
              2. + +
              3. Requesting method attaches callbacks to Deferred + object.
              4. +
              + + +
                + +
              1. When the result is ready, give it to the Deferred + object. .callback(result) if the operation succeeded, + .errback(failure) if it failed. Note that + failure is typically an instance of a twisted.python.failure.Failure + instance.
              2. + +
              3. Deferred object triggers previously-added (call/err)back + with the result or failure. + Execution then follows the following rules, going down the + chain of callbacks to be processed. + +
                  +
                • Result of the callback is always passed as the first + argument to the next callback, creating a chain of + processors.
                • + +
                • If a callback raises an exception, switch to + errback.
                • + +
                • An unhandled failure gets passed down the line of + errbacks, this creating an asynchronous analog to a + series to a series of except: + statements.
                • + +
                • If an errback doesn't raise an exception or return a + twisted.python.failure.Failure + instance, switch to callback.
                • +
              4. +
              +
              + +

              Errbacks

              + +

              Deferred's error handling is modeled after Python's +exception handling. In the case that no errors occur, all the +callbacks run, one after the other, as described above.

              + +

              If the errback is called instead of the callback (e.g. because a DB query +raised an error), then a twisted.python.failure.Failure is passed into the first +errback (you can add multiple errbacks, just like with callbacks). You can +think of your errbacks as being like except blocks +of ordinary Python code.

              + +

              Unless you explicitly raise an error in except +block, the Exception is caught and stops +propagating, and normal execution continues. The same thing happens with +errbacks: unless you explicitly return a Failure or (re-)raise an exception, the error stops +propagating, and normal callbacks continue executing from that point (using the +value returned from the errback). If the errback does returns a Failure or raise an exception, then that is passed to the +next errback, and so on.

              + +

              Note: If an errback doesn't return anything, then it effectively +returns None, meaning that callbacks will continue +to be executed after this errback. This may not be what you expect to happen, +so be careful. Make sure your errbacks return a Failure (probably the one that was passed to it), or a +meaningful return value for the next callback.

              + +

              Also, twisted.python.failure.Failure instances have +a useful method called trap, allowing you to effectively do the equivalent +of:

              + +

              1 +2 +3 +4 +5 +6 +

              try: + # code that may throw an exception + cookSpamAndEggs() +except (SpamException, EggException): + # Handle SpamExceptions and EggExceptions + ... +
              + +

              You do this by:

              +

              1 +2 +3 +4 +5 +6 +

              def errorHandler(failure): + failure.trap(SpamException, EggException) + # Handle SpamExceptions and EggExceptions + +d.addCallback(cookSpamAndEggs) +d.addErrback(errorHandler) +
              + +

              If none of arguments passed to failure.trap +match the error encapsulated in that Failure, then +it re-raises the error.

              + +

              There's another potential gotcha here. There's a +method twisted.internet.defer.Deferred.addCallbacks +which is similar to, but not exactly the same as, addCallback followed by addErrback. In particular, consider these two cases:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

              # Case 1 +d = getDeferredFromSomewhere() +d.addCallback(callback1) # A +d.addErrback(errback1) # B +d.addCallback(callback2) +d.addErrback(errback2) + +# Case 2 +d = getDeferredFromSomewhere() +d.addCallbacks(callback1, errback1) # C +d.addCallbacks(callback2, errback2) +
              + +

              If an error occurs in callback1, then for Case 1 +errback1 will be called with the failure. For Case +2, errback2 will be called. Be careful with your +callbacks and errbacks.

              + +

              What this means in a practical sense is in Case 1, "A" will +handle a success condition from getDeferredFromSomewhere, and +"B" will handle any errors that occur from either the upstream +source, or that occur in 'A'. In Case 2, "C"'s errback1 +will only handle an error condition raised by +getDeferredFromSomewhere, it will not do any handling of +errors raised in callback1.

              + + +

              Unhandled Errors

              + +

              If a Deferred is garbage-collected with an unhandled error (i.e. it would +call the next errback if there was one), then Twisted will write the error's +traceback to the log file. This means that you can typically get away with not +adding errbacks and still get errors logged. Be careful though; if you keep a +reference to the Deferred around, preventing it from being garbage-collected, +then you may never see the error (and your callbacks will mysteriously seem to +have never been called). If unsure, you should explicitly add an errback after +your callbacks, even if all you do is:

              + +

              1 +2 +3 +

              # Make sure errors get logged +from twisted.python import log +d.addErrback(log.err) +
              + +

              Handling either synchronous or asynchronous results

              +

              +In some applications, there are functions that might be either asynchronous or +synchronous. For example, a user authentication function might be able to +check in memory whether a user is authenticated, allowing the authentication +function to return an immediate result, or it may need to wait on +network data, in which case it should return a Deferred to be fired +when that data arrives. However, a function that wants to check if a user is +authenticated will then need to accept both immediate results and +Deferreds. +

              + +

              +In this example, the library function authenticateUser uses the +application function isValidUser to authenticate a user: +

              + +

              1 +2 +3 +4 +5 +

              def authenticateUser(isValidUser, user): + if isValidUser(user): + print "User is authenticated" + else: + print "User is not authenticated" +
              + +

              +However, it assumes that isValidUser returns immediately, +whereas isValidUser may actually authenticate the user +asynchronously and return a Deferred. It is possible to adapt this +trivial user authentication code to accept either a +synchronous isValidUser or an +asynchronous isValidUser, allowing the library to handle +either type of function. It is, however, also possible to adapt +synchronous functions to return Deferreds. This section describes both +alternatives: handling functions that might be synchronous or +asynchronous in the library function (authenticateUser) +or in the application code. +

              + +

              Handling possible Deferreds in the library code

              + +

              +Here is an example of a synchronous user authentication function that might be +passed to authenticateUser: +

              + +
              + +

              +However, here's an asynchronousIsValidUser function that returns +a Deferred: +

              + +

              1 +2 +3 +4 +5 +6 +

              from twisted.internet import reactor + +def asynchronousIsValidUser(d, user): + d = Deferred() + reactor.callLater(2, d.callback, user in ["Alice", "Angus", "Agnes"]) + return d +
              + +

              Our original implementation of authenticateUser expected +isValidUser to be synchronous, but now we need to change it to handle both +synchronous and asynchronous implementations of isValidUser. For this, we +use maybeDeferred to +call isValidUser, ensuring that the result of isValidUser is a Deferred, +even if isValidUser is a synchronous function: +

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

              from twisted.internet import defer + +def printResult(result): + if result: + print "User is authenticated" + else: + print "User is not authenticated" + +def authenticateUser(isValidUser, user): + d = defer.maybeDeferred(isValidUser, user) + d.addCallback(printResult) +
              + +

              +Now isValidUser could be either synchronousIsValidUser or +asynchronousIsValidUser. +

              + +

              It is also possible to modify synchronousIsValidUser to return +a Deferred, see Generating Deferreds for more +information.

              + +

              DeferredList

              + +

              Sometimes you want to be notified after several different events have all +happened, rather than waiting for each one individually. For example, you may +want to wait for all the connections in a list to close. twisted.internet.defer.DeferredList is the way to do +this.

              + +

              To create a DeferredList from multiple Deferreds, you simply pass a list of +the Deferreds you want it to wait for:

              +

              1 +2 +

              # Creates a DeferredList +dl = defer.DeferredList([deferred1, deferred2, deferred3]) +
              + +

              You can now treat the DeferredList like an ordinary Deferred; you can call +addCallbacks and so on. The DeferredList will call its callback +when all the deferreds have completed. The callback will be called with a list +of the results of the Deferreds it contains, like so:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +

              def printResult(result): + for (success, value) in result: + if success: + print 'Success:', value + else: + print 'Failure:', value.getErrorMessage() +deferred1 = defer.Deferred() +deferred2 = defer.Deferred() +deferred3 = defer.Deferred() +dl = defer.DeferredList([deferred1, deferred2, deferred3], consumeErrors=True) +dl.addCallback(printResult) +deferred1.callback('one') +deferred2.errback(Exception('bang!')) +deferred3.callback('three') +# At this point, dl will fire its callback, printing: +# Success: one +# Failure: bang! +# Success: three +# (note that defer.SUCCESS == True, and defer.FAILURE == False) +
              + +

              A standard DeferredList will never call errback, but failures in Deferreds +passed to a DeferredList will still errback unless consumeErrors +is passed True. See below for more details about this and other +flags which modify the behavior of DeferredList.

              + +
              Note: +

              If you want to apply callbacks to the individual Deferreds that +go into the DeferredList, you should be careful about when those callbacks +are added. The act of adding a Deferred to a DeferredList inserts a callback +into that Deferred (when that callback is run, it checks to see if the +DeferredList has been completed yet). The important thing to remember is +that it is this callback which records the value that goes into the +result list handed to the DeferredList's callback.

              + + + +

              Therefore, if you add a callback to the Deferred after adding the +Deferred to the DeferredList, the value returned by that callback will not +be given to the DeferredList's callback. To avoid confusion, we recommend not +adding callbacks to a Deferred once it has been used in a DeferredList.

              +
              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +

              def printResult(result): + print result +def addTen(result): + return result + " ten" + +# Deferred gets callback before DeferredList is created +deferred1 = defer.Deferred() +deferred2 = defer.Deferred() +deferred1.addCallback(addTen) +dl = defer.DeferredList([deferred1, deferred2]) +dl.addCallback(printResult) +deferred1.callback("one") # fires addTen, checks DeferredList, stores "one ten" +deferred2.callback("two") +# At this point, dl will fire its callback, printing: +# [(1, 'one ten'), (1, 'two')] + +# Deferred gets callback after DeferredList is created +deferred1 = defer.Deferred() +deferred2 = defer.Deferred() +dl = defer.DeferredList([deferred1, deferred2]) +deferred1.addCallback(addTen) # will run *after* DeferredList gets its value +dl.addCallback(printResult) +deferred1.callback("one") # checks DeferredList, stores "one", fires addTen +deferred2.callback("two") +# At this point, dl will fire its callback, printing: +# [(1, 'one), (1, 'two')] +
              + +

              Other behaviours

              + +

              DeferredList accepts three keyword arguments that modify its behaviour: +fireOnOneCallback, fireOnOneErrback and +consumeErrors. If fireOnOneCallback is set, the +DeferredList will immediately call its callback as soon as any of its Deferreds +call their callback. Similarly, fireOnOneErrback will call errback +as soon as any of the Deferreds call their errback. Note that DeferredList is +still one-shot, like ordinary Deferreds, so after a callback or errback has been +called the DeferredList will do nothing further (it will just silently ignore +any other results from its Deferreds).

              + +

              The fireOnOneErrback option is particularly useful when you +want to wait for all the results if everything succeeds, but also want to know +immediately if something fails.

              + +

              The consumeErrors argument will stop the DeferredList from +propagating any errors along the callback chains of any Deferreds it contains +(usually creating a DeferredList has no effect on the results passed along the +callbacks and errbacks of their Deferreds). Stopping errors at the DeferredList +with this option will prevent Unhandled error in Deferred warnings from +the Deferreds it contains without needing to add extra errbacks1.

              + + + +

              Class Overview

              + +

              This is an overview API reference for Deferred from the point of using a +Deferred returned by a function. It is not meant to be a +substitute for the docstrings in the Deferred class, but can provide guidelines +for its use.

              + +

              There is a parallel overview of functions used by the Deferred's +creator in Generating Deferreds.

              + +

              Basic Callback Functions

              + +
                +
              • + addCallbacks(self, callback[, errback, callbackArgs, + callbackKeywords, errbackArgs, errbackKeywords]) + +

                This is the method you will use to interact + with Deferred. It adds a pair of callbacks parallel to + each other (see diagram above) in the list of callbacks + made when the Deferred is called back to. The signature of + a method added using addCallbacks should be + myMethod(result, *methodArgs, + **methodKeywords). If your method is passed in the + callback slot, for example, all arguments in the tuple + callbackArgs will be passed as + *methodArgs to your method.

                + +

                There are various convenience methods that are + derivative of addCallbacks. I will not cover them in detail + here, but it is important to know about them in order to + create concise code.

                + +
                  +
                • + addCallback(callback, *callbackArgs, + **callbackKeywords) + +

                  Adds your callback at the next point in the + processing chain, while adding an errback that will + re-raise its first argument, not affecting further + processing in the error case.

                  + +

                  Note that, while addCallbacks (plural) requires the arguments to be + passed in a tuple, addCallback (singular) takes all its remaining + arguments as things to be passed to the callback function. The reason is + obvious: addCallbacks (plural) cannot tell whether the arguments are + meant for the callback or the errback, so they must be specifically + marked by putting them into a tuple. addCallback (singular) knows that + everything is destined to go to the callback, so it can use Python's + * and ** syntax to collect the remaining arguments.

                  + +
                • + +
                • + addErrback(errback, *errbackArgs, + **errbackKeywords) + +

                  Adds your errback at the next point in the + processing chain, while adding a callback that will + return its first argument, not affecting further + processing in the success case.

                  +
                • + +
                • + addBoth(callbackOrErrback, + *callbackOrErrbackArgs, + **callbackOrErrbackKeywords) + +

                  This method adds the same callback into both sides + of the processing chain at both points. Keep in mind + that the type of the first argument is indeterminate if + you use this method! Use it for finally: + style blocks.

                  +
                • +
              • + +
              + + +

              Chaining Deferreds

              + +

              If you need one Deferred to wait on another, all you need to do is return a +Deferred from a method added to addCallbacks. Specifically, if you return +Deferred B from a method added to Deferred A using A.addCallbacks, Deferred A's +processing chain will stop until Deferred B's .callback() method is called; at +that point, the next callback in A will be passed the result of the last +callback in Deferred B's processing chain at the time.

              + +

              If this seems confusing, don't worry about it right now -- when you run into +a situation where you need this behavior, you will probably recognize it +immediately and realize why this happens. If you want to chain deferreds +manually, there is also a convenience method to help you.

              + +
                +
              • + chainDeferred(otherDeferred) + +

                Add otherDeferred to the end of this + Deferred's processing chain. When self.callback is called, + the result of my processing chain up to this point will be + passed to otherDeferred.callback. Further + additions to my callback chain do not affect + otherDeferred

                +

                This is the same as self.addCallbacks(otherDeferred.callback, + otherDeferred.errback)

                +
              • +
              + +

              See also

              + +
                +
              1. Generating Deferreds, an introduction to +writing asynchronous functions that return Deferreds.
              2. +
              + +

              Footnotes

              1. Unless of course a later callback starts a fresh error — +but as we've already noted, adding callbacks to a Deferred after its used in a +DeferredList is confusing and usually avoided.
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/deferredindepth.html b/vendor/Twisted-10.0.0/doc/core/howto/deferredindepth.html new file mode 100644 index 000000000000..317e28ddfb3a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/deferredindepth.html @@ -0,0 +1,2183 @@ + + +Twisted Documentation: Deferreds are beautiful! (A Tutorial) + + + + +

              Deferreds are beautiful! (A Tutorial)

              + +
              + + +

              Introduction

              + +

              Deferreds are quite possibly the single most confusing topic that a +newcomer to Twisted has to deal with. I am going to forgo the normal talk +about what deferreds are, what they aren't, and why they're used in Twisted. +Instead, I'm going show you the logic behind what they +do.

              + + +

              A deferred allows you to encapsulate the logic that you'd normally use to +make a series of function calls after receiving a result into a single object. +In the examples that follow, I'll first show you what's going to go on behind +the scenes in the deferred chain, then show you the deferred API calls that set +up that chain. All of these examples are runnable code, so feel free to play +around with them.

              + + +

              A simple example

              + +First, a simple example so that we have something to talk about: + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Here we have the simplest case, a single callback and a single errback. +""" + +num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + global num; num += 1 + print "callback %s" % (num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + global num; num = 0 + deferredExample() +
              + +

              And the output: (since both methods in the example produce the same output, +it will only be shown once.)

              + +
              +callback 1
              +        got result: success
              +
              + +

              Here we have the simplest case. A deferred with a single callback and a +single errback. Normally, a function would create a deferred and hand it back +to you when you request an operation that needs to wait for an event for +completion. The object you called then does d.callback(result) +when the results are in. +

              + +

              The thing to notice is that there is only one result that is passed from +method to method, and that the result returned from a method is the argument +to the next method in the chain. In case of an exception, result is set to an +instance of Failure +that describes the exception.

              + +

              Errbacks

              +

              Failure in requested operation

              +

              Things don't always go as planned, and sometimes the function that +returned the deferred needs to alert the callback chain that an error +has occurred.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +This example is analogous to a function calling .errback(failure) +""" + + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + +def behindTheScenes(result): + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(result): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + + d.errback(result) + + +if __name__ == '__main__': + result = None + try: + raise RuntimeError, "*doh*! failure!" + except: + result = failure.Failure() + behindTheScenes(result) + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample(result) +
              + +
              +errback
              +we got an exception: Traceback (most recent call last):
              +--- exception caught here ---
              +  File "deferred_ex1a.py", line 73, in ?
              +    raise RuntimeError, "*doh*! failure!"
              +exceptions.RuntimeError: *doh*! failure!
              +
              + +

              The important thing to note (as it will come up again in later examples) +is that the callback isn't touched, the failure goes right to the errback. +Also note that the errback trap()s the expected exception type. If you don't +trap the exception, an error will be logged when the deferred is +garbage-collected. +

              + + +

              Exceptions raised in callbacks

              + +

              Now let's see what happens when our callback raises an +exception

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Here we have a slightly more involved case. The deferred is called back with a +result. the first callback returns a value, the second callback, however +raises an exception, which is handled by the errback. +""" + + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + +def behindTheScenes(result): + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +

              And the output: (note, tracebacks will be edited slightly to conserve +space)

              + +
              +callback 1
              +        got result: success
              +callback 2
              +        got result: yay! handleResult was successful!
              +        about to raise exception
              +errback
              +we got an exception: Traceback (most recent call last):
              +--- <exception caught here> ---
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line
              +326, in _runCallbacks
              +    self.result = callback(self.result, *args, **kw)
              +  File "./deferred_ex1.py", line 32, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              + +

              If your callback raises an exception, the next method to be called will be +the next errback in your chain.

              + + +

              Exceptions will only be handled by errbacks

              + +

              If a callback raises an exception the next method to be called will be next +errback in the chain. If the chain is started off with a failure, the first +method to be called will be the first errback.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +This example shows an important concept that many deferred newbies +(myself included) have trouble understanding. + +when an error occurs in a callback, the first errback after the error +occurs will be the next method called. (in the next example we'll +see what happens in the 'chain' after an errback). +""" + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + # now, let's make the error happen in the first callback + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # note: this callback will be skipped because + # result is a failure + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +
              +callback 1
              +        got result: success
              +        about to raise exception
              +errback
              +we got an exception: Traceback (most recent call last):
              +  File "./deferred_ex2.py", line 85, in ?
              +    nonDeferredExample("success")
              +--- <exception caught here> ---
              +  File "./deferred_ex2.py", line 46, in nonDeferredExample
              +    result = failAtHandlingResult(result)
              +  File "./deferred_ex2.py", line 35, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              + +

              You can see that our second callback, handleResult was not called because +failAtHandlingResult raised an exception

              + +

              Handling an exception and continuing on

              + +

              In this example, we see an errback handle an exception raised in the +preceeding callback. Take note that it could just as easily been an exception +from any other preceeding method. You'll see that after the +exception is handled in the errback (i.e. the errback does not return a +failure or raise an exception) the chain continues on with the next +callback.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now we see how an errback can handle errors. if an errback +does not raise an exception, the next callback in the chain +will be called. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def callbackAfterErrback(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = callbackAfterErrback(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + d.addCallback(callbackAfterErrback) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +
              +callback 1
              +        got result: success
              +callback 2
              +        got result: yay! handleResult was successful!
              +        about to raise exception
              +errback
              +we got an exception: Traceback (most recent call last):
              +  File "./deferred_ex3.py", line 97, in <module>
              +    deferredExample()
              +  File "./deferred_ex3.py", line 90, in deferredExample
              +    d.callback("success")
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line 243, in callback
              +    self._startRunCallbacks(result)
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line 312, in _startRunCallbacks
              +    self._runCallbacks()
              +--- <exception caught here> ---
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line 328, in _runCallbacks
              +    self.result = callback(self.result, *args, **kw)
              +  File "./deferred_ex3.py", line 34, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              +callback 3
              +        got result: okay, continue on
              +
              + +

              addBoth: the deferred version of finally

              + +

              Now we see how deferreds do finally, with .addBoth. The +callback that gets added as addBoth will be called if the result is a failure +or non-failure. We'll also see in this example, that our doThisNoMatterWhat() +method follows a common idiom in deferred callbacks by acting as a passthru, +returning the value that it received to allow processing the chain to +continue, but appearing transparent in terms of the result.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now we'll see what happens when you use 'addBoth'. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def doThisNoMatterWhat(arg): + Counter.num += 1 + print "both %s" % (Counter.num,) + print "\tgot argument %r" % (arg,) + print "\tdoing something very important" + # we pass the argument we received to the next phase here + return arg + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # ---- this is equivalent to addBoth(doThisNoMatterWhat) + + if not isinstance(result, failure.Failure): + try: + result = doThisNoMatterWhat(result) + except: + result = failure.Failure() + else: + try: + result = doThisNoMatterWhat(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addBoth(doThisNoMatterWhat) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +
              +callback 1
              +        got result: success
              +callback 2
              +        got result: yay! handleResult was successful!
              +        about to raise exception
              +both 3
              +        got argument <twisted.python.failure.Failure exceptions.RuntimeError>
              +        doing something very important
              +errback
              +we got an exception: Traceback (most recent call last):
              +--- <exception caught here> ---
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line
              +326, in _runCallbacks
              +    self.result = callback(self.result, *args, **kw)
              +  File "./deferred_ex4.py", line 32, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              + +

              You can see that the errback is called, (and consequently, the failure is +trapped). This is because doThisNoMatterWhat method returned the value it +received, a failure.

              + +

              addCallbacks: decision making based on previous success or failure

              + +

              As we've been seeing in the examples, the callback is a pair of +callback/errback. Using addCallback or addErrback is actually a special case +where one of the pair is a pass statement. If you want to make a decision +based on whether or not the previous result in the chain was a failure or not +(which is very rare, but included here for completeness), you use +addCallbacks. Note that this is not the same thing as an +addCallback followed by an addErrback.

              + + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now comes the more nuanced addCallbacks, which allows us to make a +yes/no (branching) decision based on whether the result at a given point is +a failure or not. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def yesDecision(result): + Counter.num += 1 + print "yes decision %s" % (Counter.num,) + print "\twasn't a failure, so we can plow ahead" + return "go ahead!" + +def noDecision(result): + Counter.num += 1 + result.trap(RuntimeError) + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! quick! damage control!" + return "damage control successful!" + + + +def behindTheScenes(result): + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + d.addCallbacks(yesDecision, noDecision) # noDecision will be called + d.addCallback(handleResult) # - A - + d.addCallbacks(yesDecision, noDecision) # yesDecision will be called + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +
              +callback 1
              +        got result: success
              +        about to raise exception
              +no decision 2
              +        *doh*! a failure! quick! damage control!
              +callback 3
              +        got result: damage control successful!
              +yes decision 4
              +        wasn't a failure, so we can plow ahead
              +callback 5
              +        got result: go ahead!
              +
              + +

              Notice that our errback is never called. The noDecision method returns a +non-failure so processing continues with the next callback. If we wanted to +skip the callback at "- A -" because of the error, but do some kind of +processing in response to the error, we would have used a passthru, and +returned the failure we received, as we see in this next example:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now comes the more nuanced addCallbacks, which allows us to make a +yes/no (branching) decision based on whether the result at a given point is +a failure or not. + +here, we return the failure from noDecisionPassthru, the errback argument to +the first addCallbacks method invocation, and see what happens. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def yesDecision(result): + Counter.num += 1 + print "yes decision %s" % (Counter.num,) + print "\twasn't a failure, so we can plow ahead" + return "go ahead!" + +def noDecision(result): + Counter.num += 1 + result.trap(RuntimeError) + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! quick! damage control!" + return "damage control successful!" + +def noDecisionPassthru(result): + Counter.num += 1 + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! don't know what to do, returning failure!" + return result + + +def behindTheScenes(result): + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecisionPassthru(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + + # noDecisionPassthru will be called + d.addCallbacks(yesDecision, noDecisionPassthru) + d.addCallback(handleResult) # - A - + + # noDecision will be called + d.addCallbacks(yesDecision, noDecision) + d.addCallback(handleResult) # - B - + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() +
              + +
              +callback 1
              +        got result: success
              +        about to raise exception
              +no decision 2
              +        *doh*! a failure! don't know what to do, returning failure!
              +no decision 3
              +        *doh*! a failure! quick! damage control!
              +callback 4
              +        got result: damage control successful!
              +
              + +

              Two things to note here. First, "- A -" was skipped, like we wanted it to, +and the second thing is that after "- A -", noDecision is called, because +it is the next errback that exists in the chain. It returns a +non-failure, so processing continues with the next callback at "- B -", and +the errback at the end of the chain is never called

              + +

              Hints, tips, common mistakes, and miscellaney

              + +

              The deferred callback chain is stateful

              + +

              A deferred that has been called back will call its addCallback and +addErrback methods as appropriate in the order they are added, when they are +added. So we see in the following example, deferredExample1 and +deferredExample2 are equivalent. The first sets up the processing chain +beforehand and then executes it, the other executes the chain as it is being +constructed. This is because deferreds are stateful.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +The deferred callback chain is stateful, and can be executed before +or after all callbacks have been added to the chain +""" + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def deferredExample1(): + # this is another common idiom, since all add* methods + # return the deferred instance, you can just chain your + # calls to addCallback and addErrback + + d = defer.Deferred().addCallback(failAtHandlingResult + ).addCallback(handleResult + ).addErrback(handleFailure) + + d.callback("success") + +def deferredExample2(): + d = defer.Deferred() + + d.callback("success") + + d.addCallback(failAtHandlingResult) + d.addCallback(handleResult) + d.addErrback(handleFailure) + + +if __name__ == '__main__': + deferredExample1() + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample2() +
              + +
              +callback 1
              +        got result: success
              +        about to raise exception
              +errback
              +we got an exception: Traceback (most recent call last):
              +--- <exception caught here> ---
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line
              +326, in _runCallbacks
              +    self.result = callback(self.result, *args, **kw)
              +  File "./deferred_ex7.py", line 35, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              +
              +-------------------------------------------------
              +
              +callback 1
              +        got result: success
              +        about to raise exception
              +errback
              +we got an exception: Traceback (most recent call last):
              +--- <exception caught here> ---
              +  File "/home/slyphon/Projects/Twisted/trunk/twisted/internet/defer.py", line
              +326, in _runCallbacks
              +    self.result = callback(self.result, *args, **kw)
              +  File "./deferred_ex7.py", line 35, in failAtHandlingResult
              +    raise RuntimeError, "whoops! we encountered an error"
              +exceptions.RuntimeError: whoops! we encountered an error
              +
              + +

              This example also shows you the common idiom of chaining calls to +addCallback and addErrback. +

              + +

              Don't call .callback() on deferreds you didn't create!

              + +

              It is an error to reinvoke deferreds callback or errback method, therefore +if you didn't create a deferred, do not under any +circumstances call its callback or errback. doing so will raise +an exception

              + +

              Callbacks can return deferreds

              + +

              If you need to call a method that returns a deferred within your callback +chain, just return that deferred, and the result of the secondary deferred's +processing chain will become the result that gets passed to the next callback +of the primary deferreds processing chain

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +

              #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + + +class Counter(object): + num = 0 + let = 'a' + + def incrLet(cls): + cls.let = chr(ord(cls.let) + 1) + incrLet = classmethod(incrLet) + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + return f + +def subCb_B(result): + print "sub-callback %s" % (Counter.let,) + Counter.incrLet() + s = " beautiful!" + print "\tadding %r to result" % (s,) + result += s + return result + +def subCb_A(result): + print "sub-callback %s" % (Counter.let,) + Counter.incrLet() + s = " are " + print "\tadding %r to result" % (s,) + result += s + return result + +def mainCb_1(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + result += " Deferreds " + + d = defer.Deferred().addCallback(subCb_A + ).addCallback(subCb_B) + d.callback(result) + return d + +def mainCb_2(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + + +def deferredExample(): + d = defer.Deferred().addCallback(mainCb_1 + ).addCallback(mainCb_2) + + d.callback("I hope you'll agree: ") + + +if __name__ == '__main__': + deferredExample() +
              + +
              +callback 1
              +        got result: I hope you'll agree: 
              +sub-callback a
              +        adding ' are ' to result
              +sub-callback b
              +        adding ' beautiful!' to result
              +callback 2
              +        got result: I hope you'll agree:  Deferreds  are  beautiful!
              +
              + +

              Conclusion

              + +

              Deferreds can be confusing, but only because they're so elegant and simple. +There is a lot of logical power that can expressed with a deferred's +processing chain, and once you see what's going on behind the curtain, it's a +lot easier to understand how to make use of what deferreds have to offer.

              + +
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/design.html b/vendor/Twisted-10.0.0/doc/core/howto/design.html new file mode 100644 index 000000000000..3a3a7330fe3d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/design.html @@ -0,0 +1,257 @@ + + +Twisted Documentation: Designing Twisted Applications + + + + +

              Designing Twisted Applications

              + +
              + + + +

              Goals

              + +

              This document describes how a good Twisted application is structured. It +should be useful for beginning Twisted developers who want to structure their +code in a clean, maintainable way that reflects current best practices.

              + +

              Readers will want to be familiar with writing servers and clients using Twisted.

              + +

              Example of a modular design: TwistedQuotes

              + +

              TwistedQuotes is a very simple plugin which is a great +demonstration of +Twisted's power. It will export a small kernel of functionality -- Quote of +the Day -- which can be accessed through every interface that Twisted supports: +web pages, e-mail, instant messaging, a specific Quote of the Day protocol, and +more.

              + +

              Set up the project directory

              + +

              See the description of setting up the TwistedQuotes +example.

              + +

              A Look at the Heart of the Application

              + + + +

              This code listing shows us what the Twisted Quotes system is all about. The +code doesn't have any way of talking to the outside world, but it provides a +library which is a clear and uncluttered abstraction: give me the quote of +the day.

              + +

              Note that this module does not import any Twisted functionality at all! The +reason for doing things this way is integration. If your business +objects are not stuck to your user interface, you can make a module that +can integrate those objects with different protocols, GUIs, and file formats. +Having such classes provides a way to decouple your components from each other, +by allowing each to be used independently.

              + +

              In this manner, Twisted itself has minimal impact on the logic of your +program. Although the Twisted dot products are highly interoperable, +they +also follow this approach. You can use them independently because they are not +stuck to each other. They communicate in well-defined ways, and only when that +communication provides some additional feature. Thus, you can use twisted.web with twisted.enterprise, but neither requires the other, because +they are integrated around the concept of Deferreds.

              + +

              Your Twisted applications should follow this style as much as possible. +Have (at least) one module which implements your specific functionality, +independent of any user-interface code.

              + +

              Next, we're going to need to associate this abstract logic with some way of +displaying it to the user. We'll do this by writing a Twisted server protocol, +which will respond to the clients that connect to it by sending a quote to the +client and then closing the connection. Note: don't get too focused on the +details of this -- different ways to interface with the user are 90% of what +Twisted does, and there are lots of documents describing the different ways to +do it.

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +

              from zope.interface import Interface + +from twisted.internet.protocol import Factory, Protocol + + + +class IQuoter(Interface): + """ + An object that returns quotes. + """ + def getQuote(): + """ + Return a quote. + """ + + + +class QOTD(Protocol): + def connectionMade(self): + self.transport.write(self.factory.quoter.getQuote()+'\r\n') + self.transport.loseConnection() + + + +class QOTDFactory(Factory): + """ + A factory for the Quote of the Day protocol. + + @type quoter: L{IQuoter} provider + @ivar quoter: An object which provides L{IQuoter} which will be used by + the L{QOTD} protocol to get quotes to emit. + """ + protocol = QOTD + + def __init__(self, quoter): + self.quoter = quoter +
              Twisted +Quotes Protocol Implementation - listings/TwistedQuotes/quoteproto.py
              + +

              This is a very straightforward Protocol implementation, and the +pattern described above is repeated here. The Protocol contains essentially no +logic of its own, just enough to tie together an object which can generate +quotes (a Quoter) and an object which can relay +bytes to a TCP connection (a Transport). When a +client connects to this server, a QOTD instance is +created, and its connectionMade method is called. +

              + +

              The QOTDFactory's role is to specify to the +Twisted framework how to create a Protocol instance +that will handle the connection. Twisted will not instantiate a QOTDFactory; you will do that yourself later, in a +twistd plug-in. +

              + +

              Note: you can read more specifics of Protocol and +Factory in the Writing +Servers HOWTO.

              + +

              Once we have an abstraction -- a Quoter -- and we have a +mechanism to connect it to the network -- the QOTD protocol -- the +next thing to do is to put the last link in the chain of functionality between +abstraction and user. This last link will allow a user to choose a +Quoter and configure the protocol. Writing this configuration is +covered in the Application HOWTO.

              + +
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/dirdbm.html b/vendor/Twisted-10.0.0/doc/core/howto/dirdbm.html new file mode 100644 index 000000000000..af0e48ba218c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/dirdbm.html @@ -0,0 +1,77 @@ + + +Twisted Documentation: DirDBM: Directory-based Storage + + + + +

              DirDBM: Directory-based Storage

              + +
              + + +

              dirdbm.DirDBM

              + +

              twisted.persisted.dirdbm.DirDBM is a DBM-like storage system. +That is, it stores mappings between keys +and values, like a Python dictionary, except that it stores the values in files +in a directory - each entry is a different file. The keys must always be strings, +as are the values. Other than that, DirDBM +objects act just like Python dictionaries.

              + +

              DirDBM is useful for cases +when you want to store small amounts of data in an organized fashion, without having +to deal with the complexity of a RDBMS or other sophisticated database. It is simple, +easy to use, cross-platform, and doesn't require any external C libraries, unlike +Python's built-in DBM modules.

              + +
              +>>> from twisted.persisted import dirdbm
              +>>> d = dirdbm.DirDBM("/tmp/dir")
              +>>> d["librarian"] = "ook"
              +>>> d["librarian"]        
              +'ook'
              +>>> d.keys()
              +['librarian']
              +>>> del d["librarian"]
              +>>> d.items()
              +[]
              +
              + +

              dirdbm.Shelf

              + +

              Sometimes it is neccessary to persist more complicated objects than strings. +With some care, dirdbm.Shelf +can transparently persist +them. Shelf works exactly like DirDBM, except that +the values (but not the keys) can be arbitrary picklable objects. However, +notice that mutating an object after it has been stored in the +Shelf has no effect on the Shelf. +When mutating objects, it is neccessary to explictly store them back in the Shelf +afterwards:

              + +
              +>>> from twisted.persisted import dirdbm
              +>>> d = dirdbm.Shelf("/tmp/dir2")
              +>>> d["key"] = [1, 2]
              +>>> d["key"]
              +[1, 2]
              +>>> l = d["key"]
              +>>> l.append(3)
              +>>> d["key"]
              +[1, 2]
              +>>> d["key"] = l
              +>>> d["key"]
              +[1, 2, 3]
              +
              + + + + + +
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/gendefer.html b/vendor/Twisted-10.0.0/doc/core/howto/gendefer.html new file mode 100644 index 000000000000..4d5878bf9442 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/gendefer.html @@ -0,0 +1,415 @@ + + +Twisted Documentation: Generating Deferreds + + + + +

              Generating Deferreds

              + +
              + + + +

              Deferred objects are +signals that a function you have called does not yet have the data you want +available. When a function returns a Deferred object, your calling function +attaches callbacks to it to handle the data when available.

              + +

              This document addresses the other half of the question: writing functions +that return Deferreds, that is, constructing Deferred objects, arranging for +them to be returned immediately without blocking until data is available, and +firing their callbacks when the data is available.

              + +

              This document assumes that you are familiar with the asynchronous model used +by Twisted, and with using deferreds returned by functions +.

              + + + +

              Class overview

              + +

              This is an overview API reference for Deferred from the point of creating a +Deferred and firing its callbacks and errbacks. It is not meant to be a +substitute for the docstrings in the Deferred class, but can provide +guidelines for its use.

              + +

              There is a parallel overview of functions used by calling function which +the Deferred is returned to at Using Deferreds.

              + +

              Basic Callback Functions

              + + + +

              What Deferreds don't do: make your code asynchronous

              + +

              Deferreds do not make the code magically not block.

              + +

              Let's take this function as an example:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +

              from twisted.internet import defer + +TARGET = 10000 + +def largeFibonnaciNumber(): + # create a Deferred object to return: + d = defer.Deferred() + + # calculate the ten thousandth Fibonnaci number + + first = 0 + second = 1 + + for i in xrange(TARGET - 1): + new = first + second + first = second + second = new + if i % 100 == 0: + print "Progress: calculating the %dth Fibonnaci number" % i + + # give the Deferred the answer to pass to the callbacks: + d.callback(second) + + # return the Deferred with the answer: + return d + +import time + +timeBefore = time.time() + +# call the function and get our Deferred +d = largeFibonnaciNumber() + +timeAfter = time.time() + +print "Total time taken for largeFibonnaciNumber call: %0.3f seconds" % + (timeAfter - timeBefore) + +# add a callback to it to print the number + +def printNumber(number): + print "The %dth Fibonacci number is %d" % (TARGET, number) + +print "Adding the callback now." + +d.addCallback(printNumber) +
              + +

              You will notice that despite creating a Deferred in the +largeFibonnaciNumber function, these things happened:

              +
                +
              • the "Total time taken for largeFibonnaciNumber call" output +shows that the function did not return immediately as asynchronous functions +are expected to do; and
              • +
              • rather than the callback being added before the result was available and +called after the result is available, it isn't even added until after the +calculation has been completed.
              • +
              + +

              The function completed its calculation before returning, blocking the +process until it had finished, which is exactly what asynchronous functions +are not meant to do. Deferreds are not a non-blocking talisman: they are a +signal for asynchronous functions to use to pass results onto +callbacks, but using them does not guarantee that you have an asynchronous +function.

              + + +

              Advanced Processing Chain Control

              + +
                +
              • + pause() + +

                Cease calling any methods as they are added, and do not + respond to callback, until + self.unpause() is called.

                +
              • + +
              • + unpause() + +

                If callback has been called on this + Deferred already, call all the callbacks that have been + added to this Deferred since pause was + called.

                + +

                Whether it was called or not, this will put this + Deferred in a state where further calls to + addCallbacks or callback will + work as normal.

                +
              • +
              + +

              Returning Deferreds from synchronous functions

              + +

              Sometimes you might wish to return a Deferred from a synchronous function. +There are several reasons why, the major two are maintaining API compatibility +with another version of your function which returns a Deferred, or allowing +for the possiblity that in the future your function might need to be +asynchronous.

              + +

              In the Using Deferreds reference, we gave the +following example of a synchronous function:

              + +

              1 +2 +3 +4 +5 +

              def synchronousIsValidUser(user): + ''' + Return true if user is a valid user, false otherwise + ''' + return user in ["Alice", "Angus", "Agnes"] +
              + +

              While we can require that callers of our function wrap our synchronous +result in a Deferred using maybeDeferred, for the sake of API +compatibility it is better to return a Deferred ourself using defer.succeed:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +

              from twisted.internet import defer + +def immediateIsValidUser(user): + ''' + Returns a Deferred resulting in true if user is a valid user, false + otherwise + ''' + + result = user in ["Alice", "Angus", "Agnes"] + + # return a Deferred object already called back with the value of result + return defer.succeed(result) +
              + +

              There is an equivalent defer.fail method to return a Deferred with the +errback chain already fired.

              + +

              Integrating blocking code with Twisted

              + +

              At some point, you are likely to need to call a blocking function: many +functions in third party libraries will have long running blocking functions. +There is no way to 'force' a function to be asynchronous: it must be written +that way specifically. When using Twisted, your own code should be +asynchronous, but there is no way to make third party functions asynchronous +other than rewriting them.

              + +

              In this case, Twisted provides the ability to run the blocking code in a +separate thread rather than letting it block your application. The twisted.internet.threads.deferToThread function will set up +a thread to run your blocking function, return a Deferred and later fire that +Deferred when the thread completes.

              + +

              Let's assume our largeFibonnaciNumber function +from above is in a third party library (returning the result of the +calculation, not a Deferred) and is not easily modifiable to be finished in +discrete blocks. This example shows it being called in a thread, unlike in the +earlier section we'll see that the operation does not block our entire +program:

              + +

              1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +

              def largeFibonnaciNumber(): + """ + Represent a long running blocking function by calculating + the TARGETth Fibonnaci number + """ + TARGET = 10000 + + first = 0 + second = 1 + + for i in xrange(TARGET - 1): + new = first + second + first = second + second = new + + return second + +from twisted.internet import threads, reactor + +def fibonacciCallback(result): + """ + Callback which manages the largeFibonnaciNumber result by + printing it out + """ + print "largeFibonnaciNumber result =", result + # make sure the reactor stops after the callback chain finishes, + # just so that this example terminates + reactor.stop() + +def run(): + """ + Run a series of operations, deferring the largeFibonnaciNumber + operation to a thread and performing some other operations after + adding the callback + """ + # get our Deferred which will be called with the largeFibonnaciNumber result + d = threads.deferToThread(largeFibonnaciNumber) + # add our callback to print it out + d.addCallback(fibonacciCallback) + print "1st line after the addition of the callback" + print "2nd line after the addition of the callback" + +if __name__ == '__main__': + run() + reactor.run() +
              + +

              Possible sources of error

              + +

              Deferreds greatly simplify the process of writing asynchronous code by +providing a standard for registering callbacks, but there are some subtle and +sometimes confusing rules that you need to follow if you are going to use +them. This mostly applies to people who are writing new systems that use +Deferreds internally, and not writers of applications that just add callbacks +to Deferreds produced and processed by other systems. Nevertheless, it is good +to know.

              + +

              Firing Deferreds more than once is impossible

              + +

              Deferreds are one-shot. You can only call Deferred.callback or +Deferred.errback once. The processing chain continues each time +you add new callbacks to an already-called-back-to Deferred.

              + +

              Synchronous callback execution

              + +

              If a Deferred already has a result available, addCallback +may call the callback synchronously: that is, immediately +after it's been added. In situations where callbacks modify state, it is +might be desirable for the chain of processing to halt until all callbacks are +added. For this, it is possible to pause and unpause +a Deferred's processing chain while you are adding lots of callbacks.

              + +

              Be careful when you use these methods! If you pause a +Deferred, it is your responsibility to make sure that you unpause it. +The function adding the callbacks must unpause a paused Deferred, it should +never be the responsibility of the code that actually fires the +callback chain by calling callback or errback as +this would negate its usefulness!

              + +
              + +

              Index

              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/glossary.html b/vendor/Twisted-10.0.0/doc/core/howto/glossary.html new file mode 100644 index 000000000000..7826774fe647 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/glossary.html @@ -0,0 +1,347 @@ + + +Twisted Documentation: Twisted Glossary + + + + +

              Twisted Glossary

              +
                +
                + + +
                + +
                adaptee
                +
                + An object that has been adapted, also called original. See Adapter. +
                + +
                Adapter
                +
                + An object whose sole purpose is to implement an Interface for another object. + See Interfaces and Adapters. +
                + +
                Application
                +
                + A twisted.application.service.Application. There are + HOWTOs on creating and manipulating them as a + system-administrator, as well as using them in + your code. +
                + +
                Avatar
                +
                + (from Twisted Cred) business logic for specific user. + For example, in PB these are perspectives, in pop3 these + are mailboxes, and so on. +
                + +
                Banana
                +
                + The low-level data marshalling layer of Twisted Spread. + See twisted.spread.banana. +
                + +
                Broker
                +
                + A twisted.spread.pb.Broker, the object request + broker for Twisted Spread. +
                + +
                cache
                +
                + A way to store data in readily accessible place for later reuse. Caching data + is often done because the data is expensive to produce or access. Caching data + risks being stale, or out of sync with the original data. +
                + +
                component
                +
                + A special kind of (persistent) Adapter that works with a twisted.python.components.Componentized. See also Interfaces and Adapters. +
                + +
                Componentized
                +
                + A Componentized object is a collection of information, separated + into domain-specific or role-specific instances, that all stick + together and refer to each other. + Each object is an Adapter, which, in the + context of Componentized, we call components. See also Interfaces and Adapters. +
                + +
                conch
                +
                Twisted's SSH implementation.
                + +
                Connector
                +
                + Object used to interface between client connections and protocols, usually + used with a twisted.internet.protocol.ClientFactory + to give you control over how a client connection reconnects. See twisted.internet.interfaces.IConnector and Writing Clients. +
                + +
                Consumer
                +
                + An object that consumes data from a Producer. See + twisted.internet.interfaces.IConsumer. +
                + +
                Cred
                +
                + Twisted's authentication API, twisted.cred. See + Introduction to Twisted Cred and + Twisted Cred usage. +
                + +
                credentials
                +
                + A username/password, public key, or some other information used for + authentication. +
                + +
                credential checker
                +
                + Where authentication actually happens. See + ICredentialChecker. +
                + +
                CVSToys
                +
                A nifty set of tools for CVS, available at +http://twistedmatrix.com/users/acapnotic/wares/code/CVSToys/.
                + +
                Daemon
                +
                + A background process that does a job or handles client requests. + Daemon is a Unix term; service is the NT equivalent. +
                + +
                Deferred
                +
                + A instance of twisted.internet.defer.Deferred, an + abstraction for handling chains of callbacks and error handlers + (errbacks). + See the Deferring Execution HOWTO. +
                + +
                Enterprise
                +
                + Twisted's RDBMS support. It contains twisted.enterprise.adbapi for asynchronous access to any + standard DB-API 2.0 module, and twisted.enterprise.row, a Relational + Object Wrapper. See Introduction to + Twisted Enterprise and Twisted Enterprise Row + Objects for more details. +
                + +
                errback
                +
                + A callback attached to a Deferred with + .addErrback to handle errors. +
                + +
                Factory
                +
                + In general, an object that constructs other objects. In Twisted, a Factory + usually refers to a twisted.internet.protocol.Factory, which constructs + Protocol instances for incoming or outgoing + connections. See Writing Servers and Writing Clients. +
                + +
                Failure
                +
                + Basically, an asynchronous exception that contains traceback information; + these are used for passing errors through asynchronous callbacks. +
                + +
                im
                +
                + Abbreviation of (Twisted) Instance + Messenger. +
                + +
                Instance Messenger
                +
                + Instance Messenger is a multi-protocol chat program that comes with + Twisted. It can communicate via TOC with the AOL servers, via IRC, as well as + via PB with Twisted + Words. See twisted.im. +
                + +
                Interface
                +
                + A class that defines and documents methods that a class conforming to that + interface needs to have. A collection of core twisted.internet interfaces can + be found in twisted.internet.interfaces. See also Interfaces and Adapters. +
                + +
                Jelly
                +
                + The serialization layer for Twisted Spread, although it + can be used seperately from Twisted Spread as well. It is similar in purpose + to Python's standard pickle module, but is more + network-friendly, and depends on a separate marshaller (Banana, in most cases). See twisted.spread.jelly. +
                + +
                Lore
                + +
                Lore is +Twisted's documentation system. The source format is a subset of +XHTML, and output formats include HTML and LaTeX.
                + +
                Manhole
                +
                + A debugging/administration interface to a Twisted application. +
                + +
                Microdom
                +
                + A partial DOM implementation using SUX. It is simple and + pythonic, rather than strictly standards-compliant. See twisted.web.microdom. +
                + +
                Names
                +
                Twisted's DNS server, found in twisted.names.
                + +
                Nevow
                +
                The successor to Woven; available from +Divmod. +
                + +
                PB
                +
                + Abbreviation of Perspective + Broker. +
                + +
                Perspective Broker
                +
                + The high-level object layer of Twisted Spread, + implementing semantics for method calling and object copying, caching, and + referencing. See twisted.spread.pb. +
                + +
                Portal
                +
                + Glues credential checkers and + realms together. +
                + +
                Producer
                +
                + An object that generates data a chunk at a time, usually to be processed by a + Consumer. See + twisted.internet.interfaces.IProducer. +
                + +
                Protocol
                +
                + In general each network connection has its own Protocol instance to manage + connection-specific state. There is a collection of standard + protocol implementations in twisted.protocols. See + also Writing Servers and Writing Clients. +
                + +
                PSU
                +
                There is no PSU.
                + +
                Reactor
                +
                + The core event-loop of a Twisted application. See + Reactor Basics. +
                + +
                Reality
                +
                See Twisted Reality
                + +
                realm
                +
                + (in Twisted Cred) stores avatars + and perhaps general business logic. See + IRealm. +
                + +
                Resource
                +
                + A twisted.web.resource.Resource, which are served + by Twisted Web. Resources can be as simple as a static file on disk, or they + can have dynamically generated content. +
                + +
                ROW
                +
                + Relational Object Wrapper, an object-oriented + interface to a relational database. See Twisted Enterprise + Row Objects. +
                + +
                Service
                +
                + A twisted.application.service.Service. See Application howto for a description of how they + relate to Applications. +
                + +
                Spread
                +
                Twisted Spread is +Twisted's remote-object suite. It consists of three layers: +Perspective Broker, Jelly +and Banana. See Writing Applications +with Perspective Broker.
                + +
                SUX
                +
                Small Uncomplicated XML, Twisted's simple XML +parser written in pure Python. See +twisted.protocols.sux.
                + +
                TAC
                +
                A Twisted Application Configuration is a Python +source file, generally with the .tac extension, which defines +configuration to make an application runnable using twistd.
                + +
                TAP
                +
                Twisted Application Pickle (deprecated), or simply just a +Twisted APplication. A serialised application that was created +with mktap and runnable by twistd. See +Using the Utilities.
                + +
                Trial
                +
                twisted.trial, Twisted's unit-testing framework, +modelled after pyunit. See also +Writing tests for Twisted code.
                + +
                Twisted Matrix Laboratories
                +
                The team behind Twisted. +http://twistedmatrix.com/.
                + +
                Twisted Reality
                +
                +In days of old, the Twisted Reality multiplayer text-based interactive-fiction +system was the main focus of Twisted Matrix Labs; Twisted, the general networking +framework, grew out of Reality's need for better network functionality. Twisted +Reality has been superseded by the +Imaginary project. +
                + +
                usage
                +
                The twisted.python.usage module, a replacement for +the standard getopt module for parsing command-lines which is much +easier to work with. See Parsing command-lines.
                + +
                Words
                +
                Twisted Words is a multi-protocol chat server that uses the +Perspective Broker protocol as its native +communication style. See twisted.words.
                + +
                Woven
                +
                Web Object Visualization Environment. +A templating system previously, but no longer, included with Twisted. Woven +has largely been superceded by +Divmod Nevow.
                + +
                + +
                + +

                Index

                + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/howto.tidyrc b/vendor/Twisted-10.0.0/doc/core/howto/howto.tidyrc new file mode 100644 index 000000000000..68965051d818 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/howto.tidyrc @@ -0,0 +1,6 @@ +output-xml: yes +output-xhtml: yes +tidy-mark: no +indent: auto +gnu-emacs: yes +add-xml-decl: yes \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/index.html b/vendor/Twisted-10.0.0/doc/core/howto/index.html new file mode 100644 index 000000000000..4644564cdbfb --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/index.html @@ -0,0 +1,198 @@ + + +Twisted Documentation: Twisted Documentation + + + + +

                Twisted Documentation

                +
                  +
                  + + + + +
                  + +

                  Index

                  + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/internet-overview.html b/vendor/Twisted-10.0.0/doc/core/howto/internet-overview.html new file mode 100644 index 000000000000..40ea11afefd1 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/internet-overview.html @@ -0,0 +1,48 @@ + + +Twisted Documentation: Overview of Twisted Internet + + + + +

                  Overview of Twisted Internet

                  +
                    +
                    + + + +

                    Twisted Internet is a collection of compatible event-loops for Python. +It contains the code to dispatch events to interested observers and a portable +API so that observers need not care about which event loop is running. Thus, +it is possible to use the same code for different loops, from Twisted's basic, +yet portable, select-based loop to the loops of various GUI +toolkits like GTK+ or Tk.

                    + +

                    Twisted Internet contains the various interfaces to the reactor +API, whose usage is documented in the low-level chapter. Those APIs +are IReactorCore, +IReactorTCP, +IReactorSSL, +IReactorUNIX, +IReactorUDP, +IReactorTime, +IReactorProcess, +IReactorMulticast +and IReactorThreads. +The reactor APIs allow non-persistent calls to be made.

                    + +

                    Twisted Internet also covers the interfaces for the various transports, +in ITransport +and friends. These interfaces allow Twisted network code to be written without +regard to the underlying implementation of the transport.

                    + +

                    The IProtocolFactory +dictates how factories, which are usually a large part of third party code, are +written.

                    + +
                    + +

                    Index

                    + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/__init__.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/__init__.py new file mode 100644 index 000000000000..ed6bd97a9c9c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/__init__.py @@ -0,0 +1,3 @@ +""" +Twisted Quotes +""" diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquote.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquote.py new file mode 100644 index 000000000000..d0330e676301 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquote.py @@ -0,0 +1,10 @@ +from twisted.spread import pb + +class QuoteReader(pb.Root): + + def __init__(self, quoter): + self.quoter = quoter + + def remote_nextQuote(self): + return self.quoter.getQuote() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquoteclient.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquoteclient.py new file mode 100644 index 000000000000..c2975399e1a3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/pbquoteclient.py @@ -0,0 +1,32 @@ + +from sys import stdout +from twisted.python import log +log.discardLogs() +from twisted.internet import reactor +from twisted.spread import pb + +def connected(root): + root.callRemote('nextQuote').addCallbacks(success, failure) + +def success(quote): + stdout.write(quote + "\n") + reactor.stop() + +def failure(error): + stdout.write("Failed to obtain quote.\n") + reactor.stop() + +factory = pb.PBClientFactory() +reactor.connectTCP( + "localhost", # host name + pb.portno, # port number + factory, # factory + ) + + + +factory.getRootObject().addCallbacks(connected, # when we get the root + failure) # when we can't + +reactor.run() # start the main loop + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoteproto.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoteproto.py new file mode 100644 index 000000000000..b8d346922b10 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoteproto.py @@ -0,0 +1,36 @@ +from zope.interface import Interface + +from twisted.internet.protocol import Factory, Protocol + + + +class IQuoter(Interface): + """ + An object that returns quotes. + """ + def getQuote(): + """ + Return a quote. + """ + + + +class QOTD(Protocol): + def connectionMade(self): + self.transport.write(self.factory.quoter.getQuote()+'\r\n') + self.transport.loseConnection() + + + +class QOTDFactory(Factory): + """ + A factory for the Quote of the Day protocol. + + @type quoter: L{IQuoter} provider + @ivar quoter: An object which provides L{IQuoter} which will be used by + the L{QOTD} protocol to get quotes to emit. + """ + protocol = QOTD + + def __init__(self, quoter): + self.quoter = quoter diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoters.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoters.py new file mode 100644 index 000000000000..f6d56890498e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quoters.py @@ -0,0 +1,39 @@ +from random import choice + +from zope.interface import implements + +from TwistedQuotes import quoteproto + + + +class StaticQuoter: + """ + Return a static quote. + """ + + implements(quoteproto.IQuoter) + + def __init__(self, quote): + self.quote = quote + + + def getQuote(self): + return self.quote + + + +class FortuneQuoter: + """ + Load quotes from a fortune-format file. + """ + implements(quoteproto.IQuoter) + + def __init__(self, filenames): + self.filenames = filenames + + + def getQuote(self): + quoteFile = file(choice(self.filenames)) + quotes = quoteFile.read().split('\n%\n') + quoteFile.close() + return choice(quotes) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotes.txt b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotes.txt new file mode 100644 index 000000000000..62a5ed99950e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotes.txt @@ -0,0 +1,15 @@ + + the sysadmin of the future is going to know twisted-shelling like the back of his hand +% + Ooh, I just figured out what my first twisted.reality creation will be. + Acapnotic: oh? + "Being Glyph Lefkowitz" +% + Oh, please. Threads ownz j00. +% + I used to hang out with this chick that ran a BBS. + She had a great baud. +% + dsmith: Twisted is neat, but unfortunately, it's not object-oriented. +% + twisted is madness diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap.py new file mode 100644 index 000000000000..06d15ec871f7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap.py @@ -0,0 +1,29 @@ +from twisted.application import internet # services that run TCP/SSL/etc. +from TwistedQuotes import quoteproto # Protocol and Factory +from TwistedQuotes import quoters # "give me a quote" code + +from twisted.python import usage # twisted command-line processing + + +class Options(usage.Options): + optParameters = [["port", "p", 8007, + "Port number to listen on for QOTD protocol."], + ["static", "s", "An apple a day keeps the doctor away.", + "A static quote to display."], + ["file", "f", None, + "A fortune-format text file to read quotes from."]] + + +def makeService(config): + """Return a service that will be attached to the application.""" + if config["file"]: # If I was given a "file" option... + # Read quotes from a file, selecting a random one each time, + quoter = quoters.FortuneQuoter([config['file']]) + else: # otherwise, + # read a single quote from the command line (or use the default). + quoter = quoters.StaticQuoter(config['static']) + port = int(config["port"]) # TCP port to listen on + factory = quoteproto.QOTDFactory(quoter) # here we create a QOTDFactory + # Finally, set up our factory, with its custom quoter, to create QOTD + # protocol instances when events arrive on the specified port. + return internet.TCPServer(port, factory) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap2.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap2.py new file mode 100644 index 000000000000..4bc0f06b776b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/quotetap2.py @@ -0,0 +1,36 @@ +from TwistedQuotes import quoteproto # Protocol and Factory +from TwistedQuotes import quoters # "give me a quote" code +from TwistedQuotes import pbquote # perspective broker binding + +from twisted.application import service, internet +from twisted.python import usage # twisted command-line processing +from twisted.spread import pb # Perspective Broker + +class Options(usage.Options): + optParameters = [["port", "p", 8007, + "Port number to listen on for QOTD protocol."], + ["static", "s", "An apple a day keeps the doctor away.", + "A static quote to display."], + ["file", "f", None, + "A fortune-format text file to read quotes from."], + ["pb", "b", None, + "Port to listen with PB server"]] + +def makeService(config): + svc = service.MultiService() + if config["file"]: # If I was given a "file" option... + # Read quotes from a file, selecting a random one each time, + quoter = quoters.FortuneQuoter([config['file']]) + else: # otherwise, + # read a single quote from the command line (or use the default). + quoter = quoters.StaticQuoter(config['static']) + port = int(config["port"]) # TCP port to listen on + factory = quoteproto.QOTDFactory(quoter) # here we create a QOTDFactory + # Finally, set up our factory, with its custom quoter, to create QOTD + # protocol instances when events arrive on the specified port. + pbport = config['pb'] # TCP PB port to listen on + if pbport: + pbfact = pb.PBServerFactory(pbquote.QuoteReader(quoter)) + svc.addService(internet.TCPServer(int(pbport), pbfact)) + svc.addService(internet.TCPServer(port, factory)) + return svc diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/webquote.rpy b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/webquote.rpy new file mode 100644 index 000000000000..99e0e9cbe648 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/TwistedQuotes/webquote.rpy @@ -0,0 +1,12 @@ +# -*- Python -*- + +from TwistedQuotes import webquoteresource + +#__file__ is defined to be the name of this file; this is to +#get the sibling file "quotes.txt" which should be in the same directory +import os +quotefile = os.path.join(os.path.split(__file__)[0], "quotes.txt") + +#ResourceScript requires us to define 'resource'. +#This resource is used to render the page. +resource = webquoteresource.QuoteResource([quotefile]) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/application/service.tac b/vendor/Twisted-10.0.0/doc/core/howto/listings/application/service.tac new file mode 100644 index 000000000000..b0167fa86606 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/application/service.tac @@ -0,0 +1,34 @@ +# You can run this .tac file directly with: +# twistd -ny service.tac + +""" +This is an example .tac file which starts a webserver on port 8080 and +serves files from the current working directory. + +The important part of this, the part that makes it a .tac file, is +the final root-level section, which sets up the object called 'application' +which twistd will look for +""" + +import os +from twisted.application import service, internet +from twisted.web import static, server + +def getWebService(): + """ + Return a service suitable for creating an application object. + + This service is a simple web server that serves files on port 8080 from + underneath the current working directory. + """ + # create a resource to serve static files + fileServer = server.Site(static.File(os.getcwd())) + return internet.TCPServer(8080, fileServer) + +# this is the core part of any tac file, the creation of the root-level +# application object +application = service.Application("Demo application") + +# attach the service to its parent application +service = getWebService() +service.setServiceParent(application) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex.py new file mode 100644 index 000000000000..3aeae3a42cf0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Here we have the simplest case, a single callback and a single errback. +""" + +num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + global num; num += 1 + print "callback %s" % (num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + global num; num = 0 + deferredExample() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1a.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1a.py new file mode 100755 index 000000000000..737cc4fb508a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1a.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +This example is analogous to a function calling .errback(failure) +""" + + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + +def behindTheScenes(result): + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(result): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + + d.errback(result) + + +if __name__ == '__main__': + result = None + try: + raise RuntimeError, "*doh*! failure!" + except: + result = failure.Failure() + behindTheScenes(result) + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample(result) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1b.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1b.py new file mode 100755 index 000000000000..3243821c7f36 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex1b.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Here we have a slightly more involved case. The deferred is called back with a +result. the first callback returns a value, the second callback, however +raises an exception, which is handled by the errback. +""" + + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + +def behindTheScenes(result): + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex2.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex2.py new file mode 100755 index 000000000000..21f83634a06b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex2.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +This example shows an important concept that many deferred newbies +(myself included) have trouble understanding. + +when an error occurs in a callback, the first errback after the error +occurs will be the next method called. (in the next example we'll +see what happens in the 'chain' after an errback). +""" + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + # now, let's make the error happen in the first callback + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # note: this callback will be skipped because + # result is a failure + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex3.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex3.py new file mode 100755 index 000000000000..b71e43ab1caa --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex3.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now we see how an errback can handle errors. if an errback +does not raise an exception, the next callback in the chain +will be called. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def callbackAfterErrback(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = callbackAfterErrback(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addErrback(handleFailure) + d.addCallback(callbackAfterErrback) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex4.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex4.py new file mode 100755 index 000000000000..cb005c7e6e83 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex4.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now we'll see what happens when you use 'addBoth'. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def doThisNoMatterWhat(arg): + Counter.num += 1 + print "both %s" % (Counter.num,) + print "\tgot argument %r" % (arg,) + print "\tdoing something very important" + # we pass the argument we received to the next phase here + return arg + + + +def behindTheScenes(result): + # equivalent to d.callback(result) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # ---- this is equivalent to addBoth(doThisNoMatterWhat) + + if not isinstance(result, failure.Failure): + try: + result = doThisNoMatterWhat(result) + except: + result = failure.Failure() + else: + try: + result = doThisNoMatterWhat(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(handleResult) + d.addCallback(failAtHandlingResult) + d.addBoth(doThisNoMatterWhat) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex5.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex5.py new file mode 100755 index 000000000000..08d453ee743d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex5.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now comes the more nuanced addCallbacks, which allows us to make a +yes/no (branching) decision based on whether the result at a given point is +a failure or not. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def yesDecision(result): + Counter.num += 1 + print "yes decision %s" % (Counter.num,) + print "\twasn't a failure, so we can plow ahead" + return "go ahead!" + +def noDecision(result): + Counter.num += 1 + result.trap(RuntimeError) + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! quick! damage control!" + return "damage control successful!" + + + +def behindTheScenes(result): + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + d.addCallbacks(yesDecision, noDecision) # noDecision will be called + d.addCallback(handleResult) # - A - + d.addCallbacks(yesDecision, noDecision) # yesDecision will be called + d.addCallback(handleResult) + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex6.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex6.py new file mode 100755 index 000000000000..cc2996d7a841 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex6.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +Now comes the more nuanced addCallbacks, which allows us to make a +yes/no (branching) decision based on whether the result at a given point is +a failure or not. + +here, we return the failure from noDecisionPassthru, the errback argument to +the first addCallbacks method invocation, and see what happens. +""" + +class Counter(object): + num = 0 + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + return "okay, continue on" + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def yesDecision(result): + Counter.num += 1 + print "yes decision %s" % (Counter.num,) + print "\twasn't a failure, so we can plow ahead" + return "go ahead!" + +def noDecision(result): + Counter.num += 1 + result.trap(RuntimeError) + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! quick! damage control!" + return "damage control successful!" + +def noDecisionPassthru(result): + Counter.num += 1 + print "no decision %s" % (Counter.num,) + print "\t*doh*! a failure! don't know what to do, returning failure!" + return result + + +def behindTheScenes(result): + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = failAtHandlingResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecisionPassthru(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + # this is equivalent to addCallbacks(yesDecision, noDecision) + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = yesDecision(result) + except: + result = failure.Failure() + else: # ---- errback + try: + result = noDecision(result) + except: + result = failure.Failure() + + + if not isinstance(result, failure.Failure): # ---- callback + try: + result = handleResult(result) + except: + result = failure.Failure() + else: # ---- errback + pass + + + if not isinstance(result, failure.Failure): # ---- callback + pass + else: # ---- errback + try: + result = handleFailure(result) + except: + result = failure.Failure() + + +def deferredExample(): + d = defer.Deferred() + d.addCallback(failAtHandlingResult) + + # noDecisionPassthru will be called + d.addCallbacks(yesDecision, noDecisionPassthru) + d.addCallback(handleResult) # - A - + + # noDecision will be called + d.addCallbacks(yesDecision, noDecision) + d.addCallback(handleResult) # - B - + d.addErrback(handleFailure) + + d.callback("success") + + +if __name__ == '__main__': + behindTheScenes("success") + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex7.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex7.py new file mode 100755 index 000000000000..f3cb02734e76 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex7.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + +""" +The deferred callback chain is stateful, and can be executed before +or after all callbacks have been added to the chain +""" + +class Counter(object): + num = 0 + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + f.trap(RuntimeError) + +def handleResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + return "yay! handleResult was successful!" + +def failAtHandlingResult(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + print "\tabout to raise exception" + raise RuntimeError, "whoops! we encountered an error" + +def deferredExample1(): + # this is another common idiom, since all add* methods + # return the deferred instance, you can just chain your + # calls to addCallback and addErrback + + d = defer.Deferred().addCallback(failAtHandlingResult + ).addCallback(handleResult + ).addErrback(handleFailure) + + d.callback("success") + +def deferredExample2(): + d = defer.Deferred() + + d.callback("success") + + d.addCallback(failAtHandlingResult) + d.addCallback(handleResult) + d.addErrback(handleFailure) + + +if __name__ == '__main__': + deferredExample1() + print "\n-------------------------------------------------\n" + Counter.num = 0 + deferredExample2() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex8.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex8.py new file mode 100755 index 000000000000..6c8ae17f59bb --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/deferred_ex8.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.python import failure, util + + +class Counter(object): + num = 0 + let = 'a' + + def incrLet(cls): + cls.let = chr(ord(cls.let) + 1) + incrLet = classmethod(incrLet) + + +def handleFailure(f): + print "errback" + print "we got an exception: %s" % (f.getTraceback(),) + return f + +def subCb_B(result): + print "sub-callback %s" % (Counter.let,) + Counter.incrLet() + s = " beautiful!" + print "\tadding %r to result" % (s,) + result += s + return result + +def subCb_A(result): + print "sub-callback %s" % (Counter.let,) + Counter.incrLet() + s = " are " + print "\tadding %r to result" % (s,) + result += s + return result + +def mainCb_1(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + result += " Deferreds " + + d = defer.Deferred().addCallback(subCb_A + ).addCallback(subCb_B) + d.callback(result) + return d + +def mainCb_2(result): + Counter.num += 1 + print "callback %s" % (Counter.num,) + print "\tgot result: %s" % (result,) + + +def deferredExample(): + d = defer.Deferred().addCallback(mainCb_1 + ).addCallback(mainCb_2) + + d.callback("I hope you'll agree: ") + + +if __name__ == '__main__': + deferredExample() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/synch-validation.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/synch-validation.py new file mode 100644 index 000000000000..2912f2b185c0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/deferred/synch-validation.py @@ -0,0 +1,5 @@ +def synchronousIsValidUser(user): + ''' + Return true if user is a valid user, false otherwise + ''' + return user in ["Alice", "Angus", "Agnes"] diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_classes.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_classes.py new file mode 100755 index 000000000000..354df9df469a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_classes.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class MasterDuckPond(pb.Cacheable): + def __init__(self, ducks): + self.observers = [] + self.ducks = ducks + def count(self): + print "I have [%d] ducks" % len(self.ducks) + def addDuck(self, duck): + self.ducks.append(duck) + for o in self.observers: o.callRemote('addDuck', duck) + def removeDuck(self, duck): + self.ducks.remove(duck) + for o in self.observers: o.callRemote('removeDuck', duck) + def getStateToCacheAndObserveFor(self, perspective, observer): + self.observers.append(observer) + # you should ignore pb.Cacheable-specific state, like self.observers + return self.ducks # in this case, just a list of ducks + def stoppedObserving(self, perspective, observer): + self.observers.remove(observer) + +class SlaveDuckPond(pb.RemoteCache): + # This is a cache of a remote MasterDuckPond + def count(self): + return len(self.cacheducks) + def getDucks(self): + return self.cacheducks + def setCopyableState(self, state): + print " cache - sitting, er, setting ducks" + self.cacheducks = state + def observe_addDuck(self, newDuck): + print " cache - addDuck" + self.cacheducks.append(newDuck) + def observe_removeDuck(self, deadDuck): + print " cache - removeDuck" + self.cacheducks.remove(deadDuck) + +pb.setUnjellyableForClass(MasterDuckPond, SlaveDuckPond) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_receiver.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_receiver.py new file mode 100755 index 000000000000..2487c3cc880a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_receiver.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +import cache_classes + +class Receiver(pb.Root): + def remote_takePond(self, pond): + self.pond = pond + print "got pond:", pond # a DuckPondCache + self.remote_checkDucks() + def remote_checkDucks(self): + print "[%d] ducks: " % self.pond.count(), self.pond.getDucks() + def remote_ignorePond(self): + # stop watching the pond + print "dropping pond" + # gc causes __del__ causes 'decache' msg causes stoppedObserving + self.pond = None + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_sender.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_sender.py new file mode 100755 index 000000000000..dea96e5dc08e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/cache_sender.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor +from cache_classes import MasterDuckPond + +class Sender: + def __init__(self, pond): + self.pond = pond + + def phase1(self, remote): + self.remote = remote + d = remote.callRemote("takePond", self.pond) + d.addCallback(self.phase2).addErrback(log.err) + def phase2(self, response): + self.pond.addDuck("ugly duckling") + self.pond.count() + reactor.callLater(1, self.phase3) + def phase3(self): + d = self.remote.callRemote("checkDucks") + d.addCallback(self.phase4).addErrback(log.err) + def phase4(self, dummy): + self.pond.removeDuck("one duck") + self.pond.count() + self.remote.callRemote("checkDucks") + d = self.remote.callRemote("ignorePond") + d.addCallback(self.phase5) + def phase5(self, dummy): + d = self.remote.callRemote("shutdown") + d.addCallback(self.phase6) + def phase6(self, dummy): + reactor.stop() + +def main(): + master = MasterDuckPond(["one duck", "two duck"]) + master.count() + + sender = Sender(master) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.phase1) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatclient.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatclient.py new file mode 100755 index 000000000000..d3e00d5507c8 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatclient.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +class Client(pb.Referenceable): + + def remote_print(self, message): + print message + + def connect(self): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("alice", "1234"), + client=self) + def1.addCallback(self.connected) + reactor.run() + + def connected(self, perspective): + print "connected, joining group #lookingForFourth" + # this perspective is a reference to our User object + d = perspective.callRemote("joinGroup", "#lookingForFourth") + d.addCallback(self.gotGroup) + + def gotGroup(self, group): + print "joined group, now sending a message to all members" + # 'group' is a reference to the Group object (through a ViewPoint) + d = group.callRemote("send", "You can call me Al.") + d.addCallback(self.shutdown) + + def shutdown(self, result): + reactor.stop() + + +Client().connect() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatserver.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatserver.py new file mode 100755 index 000000000000..7be4364ec7de --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/chatserver.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import implements + +from twisted.cred import portal, checkers +from twisted.spread import pb +from twisted.internet import reactor + +class ChatServer: + def __init__(self): + self.groups = {} # indexed by name + + def joinGroup(self, groupname, user, allowMattress): + if not self.groups.has_key(groupname): + self.groups[groupname] = Group(groupname, allowMattress) + self.groups[groupname].addUser(user) + return self.groups[groupname] + +class ChatRealm: + implements(portal.IRealm) + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + avatar = User(avatarID) + avatar.server = self.server + avatar.attached(mind) + return pb.IPerspective, avatar, lambda a=avatar:a.detached(mind) + +class User(pb.Avatar): + def __init__(self, name): + self.name = name + def attached(self, mind): + self.remote = mind + def detached(self, mind): + self.remote = None + def perspective_joinGroup(self, groupname, allowMattress=True): + return self.server.joinGroup(groupname, self, allowMattress) + def send(self, message): + self.remote.callRemote("print", message) + +class Group(pb.Viewable): + def __init__(self, groupname, allowMattress): + self.name = groupname + self.allowMattress = allowMattress + self.users = [] + def addUser(self, user): + self.users.append(user) + def view_send(self, from_user, message): + if not self.allowMattress and message.find("mattress") != -1: + raise ValueError, "Don't say that word" + for user in self.users: + user.send("<%s> says: %s" % (from_user.name, message)) + +realm = ChatRealm() +realm.server = ChatServer() +checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() +checker.addUser("alice", "1234") +checker.addUser("bob", "secret") +checker.addUser("carol", "fido") +p = portal.Portal(realm, [checker]) + +reactor.listenTCP(8800, pb.PBServerFactory(p)) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_classes.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_classes.py new file mode 100755 index 000000000000..60138c0a0816 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_classes.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class FrogPond: + def __init__(self, numFrogs, numToads): + self.numFrogs = numFrogs + self.numToads = numToads + def count(self): + return self.numFrogs + self.numToads + +class SenderPond(FrogPond, pb.Copyable): + def getStateToCopy(self): + d = self.__dict__.copy() + d['frogsAndToads'] = d['numFrogs'] + d['numToads'] + del d['numFrogs'] + del d['numToads'] + return d + +class ReceiverPond(pb.RemoteCopy): + def setCopyableState(self, state): + self.__dict__ = state + def count(self): + return self.frogsAndToads + +pb.setUnjellyableForClass(SenderPond, ReceiverPond) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_receiver.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_receiver.py new file mode 100755 index 000000000000..166801f72005 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_receiver.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +import copy2_classes # needed to get ReceiverPond registered with Jelly + +class Receiver(pb.Root): + def remote_takePond(self, pond): + print " got pond:", pond + print " count %d" % pond.count() + return "safe and sound" # positive acknowledgement + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_sender.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_sender.py new file mode 100755 index 000000000000..bc374d09a7c7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy2_sender.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor +from copy2_classes import SenderPond + +class Sender: + def __init__(self, pond): + self.pond = pond + + def got_obj(self, obj): + d = obj.callRemote("takePond", self.pond) + d.addCallback(self.ok).addErrback(self.notOk) + + def ok(self, response): + print "pond arrived", response + reactor.stop() + def notOk(self, failure): + print "error during takePond:" + if failure.type == jelly.InsecureJelly: + print " InsecureJelly" + else: + print failure + reactor.stop() + return None + +def main(): + pond = SenderPond(3, 4) + print "count %d" % pond.count() + + sender = Sender(pond) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.got_obj) + reactor.run() + +if __name__ == '__main__': + main() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_receiver.tac b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_receiver.tac new file mode 100755 index 000000000000..79aaf5bbf68f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_receiver.tac @@ -0,0 +1,41 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +PB copy receiver example. + +This is a Twisted Application Configuration (tac) file. Run with e.g. + twistd -ny copy_receiver.tac + +See the twistd(1) man page or +http://twistedmatrix.com/documents/current/howto/application for details. +""" + +import sys +if __name__ == '__main__': + print __doc__ + sys.exit(1) + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +from copy_sender import LilyPond, CopyPond + +from twisted.python import log +#log.startLogging(sys.stdout) + +class ReceiverPond(pb.RemoteCopy, LilyPond): + pass +pb.setUnjellyableForClass(CopyPond, ReceiverPond) + +class Receiver(pb.Root): + def remote_takePond(self, pond): + print " got pond:", pond + pond.countFrogs() + return "safe and sound" # positive acknowledgement + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_sender.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_sender.py new file mode 100755 index 000000000000..1636dbb5965d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/copy_sender.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor + +class LilyPond: + def setStuff(self, color, numFrogs): + self.color = color + self.numFrogs = numFrogs + def countFrogs(self): + print "%d frogs" % self.numFrogs + +class CopyPond(LilyPond, pb.Copyable): + pass + +class Sender: + def __init__(self, pond): + self.pond = pond + + def got_obj(self, remote): + self.remote = remote + d = remote.callRemote("takePond", self.pond) + d.addCallback(self.ok).addErrback(self.notOk) + + def ok(self, response): + print "pond arrived", response + reactor.stop() + def notOk(self, failure): + print "error during takePond:" + if failure.type == jelly.InsecureJelly: + print " InsecureJelly" + else: + print failure + reactor.stop() + return None + +def main(): + from copy_sender import CopyPond # so it's not __main__.CopyPond + pond = CopyPond() + pond.setStuff("green", 7) + pond.countFrogs() + # class name: + print ".".join([pond.__class__.__module__, pond.__class__.__name__]) + + sender = Sender(pond) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.got_obj) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_client.py new file mode 100755 index 000000000000..6ec3da46bd7a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_client.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + d = factory.getRootObject() + d.addCallbacks(got_obj) + reactor.run() + +def got_obj(obj): + # change "broken" into "broken2" to demonstrate an unhandled exception + d2 = obj.callRemote("broken") + d2.addCallback(working) + d2.addErrback(broken) + +def working(): + print "erm, it wasn't *supposed* to work.." + +def broken(reason): + print "got remote Exception" + # reason should be a Failure (or subclass) holding the MyError exception + print " .__class__ =", reason.__class__ + print " .getErrorMessage() =", reason.getErrorMessage() + print " .type =", reason.type + reactor.stop() + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_server.py new file mode 100755 index 000000000000..1afe83e6dac7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/exc_server.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class MyError(pb.Error): + """This is an Expected Exception. Something bad happened.""" + pass + +class MyError2(Exception): + """This is an Unexpected Exception. Something really bad happened.""" + pass + +class One(pb.Root): + def remote_broken(self): + msg = "fall down go boom" + print "raising a MyError exception with data '%s'" % msg + raise MyError(msg) + def remote_broken2(self): + msg = "hadda owie" + print "raising a MyError2 exception with data '%s'" % msg + raise MyError2(msg) + +def main(): + reactor.listenTCP(8800, pb.PBServerFactory(One())) + reactor.run() + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1client.py new file mode 100755 index 000000000000..2cb842faa5d7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1client.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.getRootObject() + def1.addCallbacks(got_obj1, err_obj1) + reactor.run() + +def err_obj1(reason): + print "error getting first object", reason + reactor.stop() + +def got_obj1(obj1): + print "got first object:", obj1 + print "asking it to getTwo" + def2 = obj1.callRemote("getTwo") + def2.addCallbacks(got_obj2) + +def got_obj2(obj2): + print "got second object:", obj2 + print "telling it to do three(12)" + obj2.callRemote("three", 12) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1server.py new file mode 100755 index 000000000000..1efa60abd57b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb1server.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class Two(pb.Referenceable): + def remote_three(self, arg): + print "Two.three was given", arg + +class One(pb.Root): + def remote_getTwo(self): + two = Two() + print "returning a Two called", two + return two + +from twisted.internet import reactor +reactor.listenTCP(8800, pb.PBServerFactory(One())) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2client.py new file mode 100755 index 000000000000..632f42cf1a6e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2client.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + foo = Foo() + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + factory.getRootObject().addCallback(foo.step1) + reactor.run() + +# keeping globals around is starting to get ugly, so we use a simple class +# instead. Instead of hooking one function to the next, we hook one method +# to the next. + +class Foo: + def __init__(self): + self.oneRef = None + + def step1(self, obj): + print "got one object:", obj + self.oneRef = obj + print "asking it to getTwo" + self.oneRef.callRemote("getTwo").addCallback(self.step2) + + def step2(self, two): + print "got two object:", two + print "giving it back to one" + print "one is", self.oneRef + self.oneRef.callRemote("checkTwo", two) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2server.py new file mode 100755 index 000000000000..85fcc79e2b77 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb2server.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class Two(pb.Referenceable): + def remote_print(self, arg): + print "two.print was given", arg + +class One(pb.Root): + def __init__(self, two): + #pb.Root.__init__(self) # pb.Root doesn't implement __init__ + self.two = two + def remote_getTwo(self): + print "One.getTwo(), returning my two called", two + return two + def remote_checkTwo(self, newtwo): + print "One.checkTwo(): comparing my two", self.two + print "One.checkTwo(): against your two", newtwo + if two == newtwo: + print "One.checkTwo(): our twos are the same" + + +two = Two() +root_obj = One(two) +reactor.listenTCP(8800, pb.PBServerFactory(root_obj)) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3client.py new file mode 100755 index 000000000000..9c6f7f350b8f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3client.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class Two(pb.Referenceable): + def remote_print(self, arg): + print "Two.print() called with", arg + +def main(): + two = Two() + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.getRootObject() + def1.addCallback(got_obj, two) # hands our 'two' to the callback + reactor.run() + +def got_obj(obj, two): + print "got One:", obj + print "giving it our two" + obj.callRemote("takeTwo", two) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3server.py new file mode 100755 index 000000000000..001d3320f316 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb3server.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class One(pb.Root): + def remote_takeTwo(self, two): + print "received a Two called", two + print "telling it to print(12)" + two.callRemote("print", 12) + +reactor.listenTCP(8800, pb.PBServerFactory(One())) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb4client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb4client.py new file mode 100755 index 000000000000..0354fb9faffe --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb4client.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + rootobj_def = pb.getObjectAt("localhost", 8800, 30) + rootobj_def.addCallbacks(got_rootobj) + obj2_def = getSomeObjectAt("localhost", 8800, 30, "two") + obj2_def.addCallbacks(got_obj2) + obj3_def = getSomeObjectAt("localhost", 8800, 30, "three") + obj3_def.addCallbacks(got_obj3) + reactor.run() + +def got_rootobj(rootobj): + print "got root object:", rootobj + print "telling root object to do foo(A)" + rootobj.callRemote("foo", "A") + +def got_obj2(obj2): + print "got second object:", obj2 + print "telling second object to do foo(B)" + obj2.callRemote("foo", "B") + +def got_obj3(obj3): + print "got third object:", obj3 + print "telling third object to do foo(C)" + obj3.callRemote("foo", "C") + +class my_ObjectRetrieval(pb._ObjectRetrieval): + def __init__(self, broker, d, objname): + pb._ObjectRetrieval.__init__(self, broker, d) + self.objname = objname + def connectionMade(self): + assert not self.term, "How did this get called?" + x = self.broker.remoteForName(self.objname) + del self.broker + self.term = 1 + self.deferred.callback(x) + +def getSomeObjectAt(host, port, timeout=None, objname="root"): + from twisted.internet import defer + from twisted.spread.pb import Broker, BrokerClientFactory + d = defer.Deferred() + b = Broker(1) + bf = BrokerClientFactory(b) + my_ObjectRetrieval(b, d, objname) + if host == "unix": + # every time you use this, God kills a kitten + reactor.connectUNIX(port, bf, timeout) + else: + reactor.connectTCP(host, port, bf, timeout) + return d + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5client.py new file mode 100755 index 000000000000..fac671c32b07 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5client.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user1", "pass1")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective ref:", perspective + print "asking it to foo(12)" + perspective.callRemote("foo", 12) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5server.py new file mode 100755 index 000000000000..d3fc3b53976e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb5server.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import implements + +from twisted.spread import pb +from twisted.cred import checkers, portal +from twisted.internet import reactor + +class MyPerspective(pb.Avatar): + def __init__(self, name): + self.name = name + def perspective_foo(self, arg): + print "I am", self.name, "perspective_foo(",arg,") called on", self + +class MyRealm: + implements(portal.IRealm) + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: + raise NotImplementedError + return pb.IPerspective, MyPerspective(avatarId), lambda:None + +p = portal.Portal(MyRealm()) +p.registerChecker( + checkers.InMemoryUsernamePasswordDatabaseDontUse(user1="pass1")) +reactor.listenTCP(8800, pb.PBServerFactory(p)) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client1.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client1.py new file mode 100755 index 000000000000..eed4a98764c9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client1.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user1", "pass1")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective1 ref:", perspective + print "asking it to foo(13)" + perspective.callRemote("foo", 13) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client2.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client2.py new file mode 100755 index 000000000000..02e61cd3d782 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6client2.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user2", "pass2")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective2 ref:", perspective + print "asking it to foo(14)" + perspective.callRemote("foo", 14) + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6server.py new file mode 100755 index 000000000000..375ec199d18a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb6server.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import implements + +from twisted.spread import pb +from twisted.cred import checkers, portal +from twisted.internet import reactor + +class MyPerspective(pb.Avatar): + def __init__(self, name): + self.name = name + def perspective_foo(self, arg): + print "I am", self.name, "perspective_foo(",arg,") called on", self + +class MyRealm: + implements(portal.IRealm) + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: + raise NotImplementedError + return pb.IPerspective, MyPerspective(avatarId), lambda:None + +p = portal.Portal(MyRealm()) +c = checkers.InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", + user2="pass2") +p.registerChecker(c) +reactor.listenTCP(8800, pb.PBServerFactory(p)) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb7client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb7client.py new file mode 100755 index 000000000000..8b2823fa9e82 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pb7client.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def one(port, user, pw, service, perspective, number): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", port, factory) + def1 = factory.getPerspective( + user, pw, service, perspective) + def1.addCallback(connected, number) + +def connected(perspective, number): + print "got perspective ref:", perspective + print "asking it to foo(%d)" % number + perspective.callRemote("foo", number) + +def main(): + one(8800, "user1", "pass1", "service1", "perspective1.1", 10) + one(8800, "user1", "pass1", "service2", "perspective2.1", 11) + one(8800, "user2", "pass2", "service1", "perspective1.2", 12) + one(8800, "user2", "pass2", "service2", "perspective2.2", 13) + one(8801, "user3", "pass3", "service3", "perspective3.3", 14) + reactor.run() + +main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonClient.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonClient.py new file mode 100755 index 000000000000..a9f5b52a6e2e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonClient.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python + +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Client which will talk to the server run by pbAnonServer.py, logging in +either anonymously or with username/password credentials. +""" + +from sys import stdout + +from twisted.python.log import err, startLogging +from twisted.cred.credentials import Anonymous, UsernamePassword +from twisted.internet import reactor +from twisted.internet.defer import gatherResults +from twisted.spread.pb import PBClientFactory + + +def error(why, msg): + """ + Catch-all errback which simply logs the failure. This isn't expected to + be invoked in the normal case for this example. + """ + err(why, msg) + + +def connected(perspective): + """ + Login callback which invokes the remote "foo" method on the perspective + which the server returned. + """ + print "got perspective1 ref:", perspective + print "asking it to foo(13)" + return perspective.callRemote("foo", 13) + + +def finished(ignored): + """ + Callback invoked when both logins and method calls have finished to shut + down the reactor so the example exits. + """ + reactor.stop() + + +def main(): + """ + Connect to a PB server running on port 8800 on localhost and log in to + it, both anonymously and using a username/password it will recognize. + """ + startLogging(stdout) + factory = PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + + anonymousLogin = factory.login(Anonymous()) + anonymousLogin.addCallback(connected) + anonymousLogin.addErrback(error, "Anonymous login failed") + + usernameLogin = factory.login(UsernamePassword("user1", "pass1")) + usernameLogin.addCallback(connected) + usernameLogin.addErrback(error, "Username/password login failed") + + bothDeferreds = gatherResults([anonymousLogin, usernameLogin]) + bothDeferreds.addCallback(finished) + + reactor.run() + + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonServer.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonServer.py new file mode 100755 index 000000000000..dcdae23d218e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/pbAnonServer.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python + +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Implement the realm for and run on port 8800 a PB service which allows both +anonymous and username/password based access. + +Successful username/password-based login requests given an instance of +MyPerspective with a name which matches the username with which they +authenticated. Success anonymous login requests are given an instance of +MyPerspective with the name "Anonymous". +""" + +from sys import stdout + +from zope.interface import implements + +from twisted.python.log import startLogging +from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess +from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse +from twisted.cred.portal import IRealm, Portal +from twisted.internet import reactor +from twisted.spread.pb import Avatar, IPerspective, PBServerFactory + + +class MyPerspective(Avatar): + """ + Trivial avatar exposing a single remote method for demonstrative + purposes. All successful login attempts in this example will result in + an avatar which is an instance of this class. + + @type name: C{str} + @ivar name: The username which was used during login or C{"Anonymous"} + if the login was anonymous (a real service might want to avoid the + collision this introduces between anonoymous users and authenticated + users named "Anonymous"). + """ + def __init__(self, name): + self.name = name + + + def perspective_foo(self, arg): + """ + Print a simple message which gives the argument this method was + called with and this avatar's name. + """ + print "I am %s. perspective_foo(%s) called on %s." % ( + self.name, arg, self) + + + +class MyRealm(object): + """ + Trivial realm which supports anonymous and named users by creating + avatars which are instances of MyPerspective for either. + """ + implements(IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if IPerspective not in interfaces: + raise NotImplementedError("MyRealm only handles IPerspective") + if avatarId is ANONYMOUS: + avatarId = "Anonymous" + return IPerspective, MyPerspective(avatarId), lambda: None + + + +def main(): + """ + Create a PB server using MyRealm and run it on port 8800. + """ + startLogging(stdout) + + p = Portal(MyRealm()) + + # Here the username/password checker is registered. + c1 = InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", user2="pass2") + p.registerChecker(c1) + + # Here the anonymous checker is registered. + c2 = AllowAnonymousAccess() + p.registerChecker(c2) + + reactor.listenTCP(8800, PBServerFactory(p)) + reactor.run() + + +if __name__ == '__main__': + main() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_client.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_client.py new file mode 100755 index 000000000000..6edb094bb49f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_client.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor + +class MyException(pb.Error): pass +class MyOtherException(pb.Error): pass + +class ScaryObject: + # not safe for serialization + pass + +def worksLike(obj): + # the callback/errback sequence in class One works just like an + # asynchronous version of the following: + try: + response = obj.callMethod(name, arg) + except pb.DeadReferenceError: + print " stale reference: the client disconnected or crashed" + except jelly.InsecureJelly: + print " InsecureJelly: you tried to send something unsafe to them" + except (MyException, MyOtherException): + print " remote raised a MyException" # or MyOtherException + except: + print " something else happened" + else: + print " method successful, response:", response + +class One: + def worked(self, response): + print " method successful, response:", response + def check_InsecureJelly(self, failure): + failure.trap(jelly.InsecureJelly) + print " InsecureJelly: you tried to send something unsafe to them" + return None + def check_MyException(self, failure): + which = failure.trap(MyException, MyOtherException) + if which == MyException: + print " remote raised a MyException" + else: + print " remote raised a MyOtherException" + return None + def catch_everythingElse(self, failure): + print " something else happened" + log.err(failure) + return None + + def doCall(self, explanation, arg): + print explanation + try: + deferred = self.remote.callRemote("fooMethod", arg) + deferred.addCallback(self.worked) + deferred.addErrback(self.check_InsecureJelly) + deferred.addErrback(self.check_MyException) + deferred.addErrback(self.catch_everythingElse) + except pb.DeadReferenceError: + print " stale reference: the client disconnected or crashed" + + def callOne(self): + self.doCall("callOne: call with safe object", "safe string") + def callTwo(self): + self.doCall("callTwo: call with dangerous object", ScaryObject()) + def callThree(self): + self.doCall("callThree: call that raises remote exception", "panic!") + def callShutdown(self): + print "telling them to shut down" + self.remote.callRemote("shutdown") + def callFour(self): + self.doCall("callFour: call on stale reference", "dummy") + + def got_obj(self, obj): + self.remote = obj + reactor.callLater(1, self.callOne) + reactor.callLater(2, self.callTwo) + reactor.callLater(3, self.callThree) + reactor.callLater(4, self.callShutdown) + reactor.callLater(5, self.callFour) + reactor.callLater(6, reactor.stop) + +factory = pb.PBClientFactory() +reactor.connectTCP("localhost", 8800, factory) +deferred = factory.getRootObject() +deferred.addCallback(One().got_obj) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_server.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_server.py new file mode 100755 index 000000000000..ef705811f1f6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/pb/trap_server.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import reactor +from twisted.spread import pb + +class MyException(pb.Error): + pass + +class One(pb.Root): + def remote_fooMethod(self, arg): + if arg == "panic!": + raise MyException + return "response" + def remote_shutdown(self): + reactor.stop() + +reactor.listenTCP(8800, pb.PBServerFactory(One())) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/process/process.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/process.py new file mode 100755 index 000000000000..60a5b29dcbfb --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/process.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +# Copyright (c) 2009-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import protocol +from twisted.internet import reactor +import re + +class MyPP(protocol.ProcessProtocol): + def __init__(self, verses): + self.verses = verses + self.data = "" + def connectionMade(self): + print "connectionMade!" + for i in range(self.verses): + self.transport.write("Aleph-null bottles of beer on the wall,\n" + + "Aleph-null bottles of beer,\n" + + "Take one down and pass it around,\n" + + "Aleph-null bottles of beer on the wall.\n") + self.transport.closeStdin() # tell them we're done + def outReceived(self, data): + print "outReceived! with %d bytes!" % len(data) + self.data = self.data + data + def errReceived(self, data): + print "errReceived! with %d bytes!" % len(data) + def inConnectionLost(self): + print "inConnectionLost! stdin is closed! (we probably did it)" + def outConnectionLost(self): + print "outConnectionLost! The child closed their stdout!" + # now is the time to examine what they wrote + #print "I saw them write:", self.data + (dummy, lines, words, chars, file) = re.split(r'\s+', self.data) + print "I saw %s lines" % lines + def errConnectionLost(self): + print "errConnectionLost! The child closed their stderr." + def processExited(self, reason): + print "processExited, status %d" % (reason.value.exitCode,) + def processEnded(self, reason): + print "processEnded, status %d" % (reason.value.exitCode,) + print "quitting" + reactor.stop() + +pp = MyPP(10) +reactor.spawnProcess(pp, "wc", ["wc"], {}) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/process/quotes.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/quotes.py new file mode 100644 index 000000000000..c0efeafaf97e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/quotes.py @@ -0,0 +1,25 @@ +from twisted.internet import protocol, utils, reactor +from twisted.python import failure +from cStringIO import StringIO + +class FortuneQuoter(protocol.Protocol): + + fortune = '/usr/games/fortune' + + def connectionMade(self): + output = utils.getProcessOutput(self.fortune) + output.addCallbacks(self.writeResponse, self.noResponse) + + def writeResponse(self, resp): + self.transport.write(resp) + self.transport.loseConnection() + + def noResponse(self, err): + self.transport.loseConnection() + + +if __name__ == '__main__': + f = protocol.Factory() + f.protocol = FortuneQuoter + reactor.listenTCP(10999, f) + reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/process/trueandfalse.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/trueandfalse.py new file mode 100644 index 000000000000..4962c93dbaa8 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/process/trueandfalse.py @@ -0,0 +1,14 @@ +from twisted.internet import utils, reactor + +def printTrueValue(val): + print "/bin/true exits with rc=%d" % val + output = utils.getProcessValue('/bin/false') + output.addCallback(printFalseValue) + +def printFalseValue(val): + print "/bin/false exits with rc=%d" % val + reactor.stop() + +output = utils.getProcessValue('/bin/true') +output.addCallback(printTrueValue) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastClient.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastClient.py new file mode 100644 index 000000000000..ec48ac0cc595 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastClient.py @@ -0,0 +1,13 @@ +from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor +from twisted.application.internet import MulticastServer + +class MulticastClientUDP(DatagramProtocol): + + def datagramReceived(self, datagram, address): + print "Received:" + repr(datagram) + +# Send multicast on 224.0.0.1:8005, on our dynamically allocated port +reactor.listenUDP(0, MulticastClientUDP()).write('UniqueID', + ('224.0.0.1', 8005)) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastServer.py b/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastServer.py new file mode 100644 index 000000000000..9e70bdd32796 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/listings/udp/MulticastServer.py @@ -0,0 +1,25 @@ +from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor +from twisted.application.internet import MulticastServer + +class MulticastServerUDP(DatagramProtocol): + def startProtocol(self): + print 'Started Listening' + # Join a specific multicast group, which is the IP we will respond to + self.transport.joinGroup('224.0.0.1') + + def datagramReceived(self, datagram, address): + # The uniqueID check is to ensure we only service requests from + # ourselves + if datagram == 'UniqueID': + print "Server Received:" + repr(datagram) + self.transport.write("data", address) + +# Note that the join function is picky about having a unique object +# on which to call join. To avoid using startProtocol, the following is +# sufficient: +#reactor.listenMulticast(8005, MulticastServerUDP()).join('224.0.0.1') + +# Listen for multicast on 224.0.0.1:8005 +reactor.listenMulticast(8005, MulticastServerUDP()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/logging.html b/vendor/Twisted-10.0.0/doc/core/howto/logging.html new file mode 100644 index 000000000000..e524d1a92414 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/logging.html @@ -0,0 +1,181 @@ + + +Twisted Documentation: Logging with twisted.python.log + + + + +

                    Logging with twisted.python.log

                    + +
                    + + +

                    Basic usage

                    + +

                    Twisted provides a simple and flexible logging system in the twisted.python.log module. It has three commonly used + functions:

                    + +
                    +
                    msg
                    +
                    Logs a new message. For example: +

                    1 +2 +

                    from twisted.python import log +log.msg('Hello, world.') +
                    +
                    + +
                    err
                    +
                    Writes a failure to the log, including traceback information (if any). + You can pass it a Failure or Exception instance, or + nothing. If you pass something else, it will be converted to a string + with repr and logged. + + If you pass nothing, it will construct a Failure from the + currently active exception, which makes it convenient to use in an except clause: +

                    1 +2 +3 +4 +

                    try: + x = 1 / 0 +except: + log.err() # will log the ZeroDivisionError +
                    +
                    + +
                    startLogging
                    +
                    Starts logging to a given file-like object. For example: +

                    1 +

                    log.startLogging(open('/var/log/foo.log', 'w')) +
                    + or: +

                    1 +

                    log.startLogging(sys.stdout) +
                    + + By default, startLogging will also redirect anything written + to sys.stdout and sys.stderr to the log. You + can disable this by passing setStdout=False to + startLogging. +
                    +
                    + +

                    Before startLogging is called, log messages will be + discarded and errors will be written to stderr.

                    + +

                    Logging and twistd

                    + +

                    If you are using twistd to run your daemon, it + will take care of calling startLogging for you, and will also + rotate log files. See twistd and tac + and the twistd man page for details of using + twistd.

                    + +

                    Log files

                    + +

                    The twisted.python.logfile module provides + some standard classes suitable for use with startLogging, such + as DailyLogFile, + which will rotate the log to a new file once per day.

                    + +

                    Using the Python logging module

                    + +

                    If your application uses the logging module or you want to use its ease + of configuration but don't want to lose twisted-produced messages, + the observer + PythonLoggingObserver + should be useful to you

                    + +

                    You just start it like any other observers: +

                    1 +2 +

                    observer = log.PythonLoggingObserver() +observer.start() +
                    + And then you'll just have to configure logging to do what you want: + + logging documentation. +

                    + +

                    This method allows you to customize the log level received by the + logging module using the logLevel keyword: +

                    1 +2 +

                    log.msg("This is important!", logLevel=logging.CRITICAL) +log.msg("Don't mind", logLevel=logging.DEBUG) +
                    + Unless logLevel is provided, logging.INFO is used for log.msg + and logging.ERROR is used for log.err. +

                    + +

                    One special care should be made when you use special configuration of + the python logging module: some handlers (e.g. SMTP, HTTP) uses network + so can block inside the reactor loop. Nothing in the bridge is + done to prevent that.

                    + +

                    Writing log observers

                    + +

                    Log observers are the basis of the Twisted logging system. An example of + a log observer in Twisted is the FileLogObserver used by + startLogging that writes events to a log file. A log observer + is just a callable that accepts a dictionary as its only argument. You can + then register it to receive all log events (in addition to any other + observers):

                    + +

                    1 +

                    twisted.python.log.addObserver(yourCallable) +
                    + +

                    The dictionary will have at least two items:

                    + +
                    +
                    message
                    +
                    The message (a list, usually of strings) + for this log event, as passed to log.msg or the + message in the failure passed to log.err.
                    + +
                    isError
                    +
                    This is a boolean that will be true if this event came from a call to + log.err. If this is set, there may be a failure + item in the dictionary as will, with a Failure object in it.
                    +
                    + +

                    Other items the built in logging functionality may add include:

                    + +
                    +
                    printed
                    +
                    This message was captured from sys.stdout, i.e. this + message came from a print statement. If + isError is also true, it came from + sys.stderr.
                    +
                    + +

                    You can pass additional items to the event dictionary by passing keyword + arguments to log.msg and log.err. The standard + log observers will ignore dictionary items they don't use.

                    + +

                    Important notes:

                    + +
                      +
                    • Never block in a log observer, as it may run in main Twisted thread. + This means you can't use socket or syslog Python-logging backends.
                    • + +
                    • The observer needs to be thread safe if you anticipate using threads + in your program.
                    • +
                    + +

                    Customizing twistd logging

                    +

                    + The behavior of the logging that twistd does can be customized + by setting the ILogObserver component on the application + object. See the Application document for + more information. +

                    + +
                    + +

                    Index

                    + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/options.html b/vendor/Twisted-10.0.0/doc/core/howto/options.html new file mode 100644 index 000000000000..e275895b28c1 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/options.html @@ -0,0 +1,533 @@ + + +Twisted Documentation: Parsing command-lines with usage.Options + + + + +

                    Parsing command-lines with usage.Options

                    + +
                    + + +

                    Introduction

                    + +

                    There is frequently a need for programs to parse a UNIX-like + command line program: options preceded by - or + --, sometimes followed by a parameter, followed by + a list of arguments. The twisted.python.usage provides a class, + Options, to facilitate such parsing.

                    + +

                    While Python has the getopt module for doing + this, it provides a very low level of abstraction for options. + Twisted has a higher level of abstraction, in the class twisted.python.usage.Options. It uses + Python's reflection facilities to provide an easy to use yet + flexible interface to the command line. While most command line + processors either force the application writer to write her own + loops, or have arbitrary limitations on the command line (the + most common one being not being able to have more then one + instance of a specific option, thus rendering the idiom + program -v -v -v impossible), Twisted allows the + programmer to decide how much control she wants.

                    + +

                    The Options class is used by subclassing. Since + a lot of time it will be used in the twisted.tap package, where the local + conventions require the specific options parsing class to also + be called Options, it is usually imported with

                    +

                    1 +

                    from twisted.python import usage +
                    + +

                    Boolean Options

                    + +

                    For simple boolean options, define the attribute + optFlags like this:

                    +

                    1 +2 +3 +

                    class Options(usage.Options): + + optFlags = [["fast", "f", "Act quickly"], ["safe", "s", "Act safely"]] +
                    +

                    optFlags should be a list of 3-lists. The first element + is the long name, and will be used on the command line as + --fast. The second one is the short name, and will be used + on the command line as -f. The last element is a + description of the flag and will be used to generate the usage + information text. The long name also determines the name of the key + that will be set on the Options instance. Its value will be 1 if the + option was seen, 0 otherwise. Here is an example for usage:

                    +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +

                    class Options(usage.Options): + + optFlags = [ + ["fast", "f", "Act quickly"], + ["good", "g", "Act well"], + ["cheap", "c", "Act cheaply"] + ] + +command_line = ["-g", "--fast"] + +options = Options() +try: + options.parseOptions(command_line) +except usage.UsageError, errortext: + print '%s: %s' % (sys.argv[0], errortext) + print '%s: Try --help for usage details.' % (sys.argv[0]) + sys.exit(1) +if options['fast']: + print "fast", +if options['good']: + print "good", +if options['cheap']: + print "cheap", +print +
                    + +

                    The above will print fast good.

                    + +

                    Note here that Options fully supports the mapping interface. You can + access it mostly just like you can access any other dict. Options are stored + as mapping items in the Options instance: parameters as 'paramname': 'value' + and flags as 'flagname': 1 or 0.

                    + +

                    Inheritance, Or: How I Learned to Stop Worrying and Love + the Superclass

                    + +

                    Sometimes there is a need for several option processors with + a unifying core. Perhaps you want all your commands to + understand -q/--quiet means to be + quiet, or something similar. On the face of it, this looks + impossible: in Python, the subclass's optFlags + would shadow the superclass's. However, + usage.Options uses special reflection code to get + all of the optFlags defined in the hierarchy. So + the following:

                    +

                    1 +2 +3 +4 +5 +6 +7 +8 +9 +

                    class BaseOptions(usage.Options): + + optFlags = [["quiet", "q", None]] + +class SpecificOptions(BaseOptions): + + optFlags = [ + ["fast", "f", None], ["good", "g", None], ["cheap", "c", None] + ] +
                    +

                    Is the same as:

                    +

                    1 +2 +3 +4 +5 +6 +7 +8 +

                    class SpecificOptions(BaseOptions): + + optFlags = [ + ["quiet", "q", "Silence output"], + ["fast", "f", "Run quickly"], + ["good", "g", "Don't validate input"], + ["cheap", "c", "Use cheap resources"] + ] +
                    + +

                    Parameters

                    + +

                    Parameters are specified using the attribute + optParameters. They must be given a + default. If you want to make sure you got the parameter from + the command line, give a non-string default. Since the command + line only has strings, this is completely reliable.

                    + +

                    Here is an example:

                    +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +

                    from twisted.python import usage + +class Options(usage.Options): + + optFlags = [ + ["fast", "f", "Run quickly"], + ["good", "g", "Don't validate input"], + ["cheap", "c", "Use cheap resources"] + ] + optParameters = [["user", "u", None, "The user name"]] + +config = Options() +try: + config.parseOptions() # When given no argument, parses sys.argv[1:] +except usage.UsageError, errortext: + print '%s: %s' % (sys.argv[0], errortext) + print '%s: Try --help for usage details.' % (sys.argv[0]) + sys.exit(1) + +if config['user'] is not None: + print "Hello", config['user'] +print "So, you want it:" + +if config['fast']: + print "fast", +if config['good']: + print "good", +if config['cheap']: + print "cheap", +print +
                    + +

                    Like optFlags, optParameters works + smoothly with inheritance.

                    + +

                    Option Subcommands

                    + +

                    It is useful, on occassion, to group a set of options together based + on the logical action to which they belong. For this, the + usage.Options class allows you to define a set of + subcommands, each of which can provide its own + usage.Options instance to handle its particular + options.

                    + +

                    Here is an example for an Options class that might parse + options like those the cvs program takes

                    +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +

                    from twisted.python import usage + +class ImportOptions(usage.Options): + optParameters = [ + ['module', 'm', None, None], ['vendor', 'v', None, None], + ['release', 'r', None] + ] + +class CheckoutOptions(usage.Options): + optParameters = [['module', 'm', None, None], ['tag', 'r', None, None]] + +class Options(usage.Options): + subCommands = [['import', None, ImportOptions, "Do an Import"], + ['checkout', None, CheckoutOptions, "Do a Checkout"]] + + optParameters = [ + ['compression', 'z', 0, 'Use compression'], + ['repository', 'r', None, 'Specify an alternate repository'] + ] + +config = Options(); config.parseOptions() +if config.subCommand == 'import': + doImport(config.subOptions) +elif config.subCommand == 'checkout': + doCheckout(config.subOptions) +
                    + +

                    The subCommands attribute of Options + directs the parser to the two other Options subclasses + when the strings "import" or "checkout" are + present on the command + line. All options after the given command string are passed to the + specified Options subclass for further parsing. Only one subcommand + may be specified at a time. After parsing has completed, the Options + instance has two new attributes - subCommand and + subOptions - which hold the command string and the Options + instance used to parse the remaining options.

                    + +

                    Generic Code For Options

                    + +

                    Sometimes, just setting an attribute on the basis of the + options is not flexible enough. In those cases, Twisted does + not even attempt to provide abstractions such as counts or + lists, but rathers lets you call your own method, which will + be called whenever the option is encountered.

                    + +

                    Here is an example of counting verbosity

                    +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +

                    from twisted.python import usage + +class Options(usage.Options): + + def __init__(self): + usage.Options.__init__(self) + self['verbosity'] = 0 # default + + def opt_verbose(self): + self['verbosity'] = self['verbosity']+1 + + def opt_quiet(self): + self['verbosity'] = self['verbosity']-1 + + opt_v = opt_verbose + opt_q = opt_quiet +
                    + +

                    Command lines that look like + command -v -v -v -v will + increase verbosity to 4, while + command -q -q -q will decrease + verbosity to -3. +

                    + +

                    The usage.Options + class knows that these are + parameter-less options, since the methods do not receive an + argument. Here is an example for a method with a parameter:

                    + +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +

                    from twisted.python import usage + +class Options(usage.Options): + + def __init__(self): + usage.Options.__init__(self) + self['symbols'] = [] + + def opt_define(self, symbol): + self['symbols'].append(symbol) + + opt_D = opt_define +
                    + +

                    This example is useful for the common idiom of having + command -DFOO -DBAR to define symbols.

                    + +

                    Parsing Arguments

                    + +

                    usage.Options does not stop helping when the + last parameter is gone. All the other arguments are sent into a + function which should deal with them. Here is an example for a + cmp like command.

                    +

                    1 +2 +3 +4 +5 +6 +7 +8 +9 +

                    from twisted.python import usage + +class Options(usage.Options): + + optParameters = [["max_differences", "d", 1, None]] + + def parseArgs(self, origin, changed): + self['origin'] = origin + self['changed'] = changed +
                    + +

                    The command should look like command origin + changed.

                    + +

                    If you want to have a variable number of left-over + arguments, just use def parseArgs(self, *args):. + This is useful for commands like the UNIX + cat(1).

                    + +

                    Post Processing

                    + +

                    Sometimes, you want to perform post processing of options to + patch up inconsistencies, and the like. Here is an example:

                    +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +

                    from twisted.python import usage + +class Options(usage.Options): + + optFlags = [ + ["fast", "f", "Run quickly"], + ["good", "g", "Don't validate input"], + ["cheap", "c", "Use cheap resources"] + ] + + def postOptions(self): + if self['fast'] and self['good'] and self['cheap']: + raise usage.UsageError, "can't have it all, brother" +
                    + +

                    Type enforcement

                    + +

                    By default, all options are handled as strings. You may want to + enforce the type of your option in some specific case, the classic example + being port number. Any callable can be specified in the fifth row of + optParameters and will be called with the string value passed + in parameter. +

                    + +

                    1 +2 +3 +4 +5 +

                    from twisted.python import usage + +class Options(usage.Options): + optParameters = [["shiny_integer", "s", 1, None, int]] + optParameters = [["dummy_float", "d", 3.14159, None, float]] +
                    + +

                    Note that default values are not coerced, so you should either declare + it with the good type (as above) or handle it when you use your + options.

                    + +

                    The coerce function may have a coerceDoc attribute, the content of which + will be printed after the documentation of the option. It's particularly + useful for reusing the function at multiple places.

                    + +

                    1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                    def oneTwoThree(val): + val = int(val) + if val not in range(1, 4): + raise ValueError("Not in range") + return val +oneTwoThree.coerceDoc = "Must be 1, 2 or 3." + +from twisted.python import usage + +class Options(usage.Options): + optParameters = [["one_choice", "o", 1, None, oneTwoThree]] +
                    + +

                    This example code will print the following help when added to your program: +

                    + +
                    +$ python myprogram.py --help
                    +Usage: myprogram [options] 
                    +Options:
                    +  -o, --one_choice=           [default: 0]. Must be 1, 2 or 3.
                    +
                    +
                    + +

                    Index

                    + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/overview.html b/vendor/Twisted-10.0.0/doc/core/howto/overview.html new file mode 100644 index 000000000000..0aa7bf941716 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/overview.html @@ -0,0 +1,18 @@ + + +Twisted Documentation: High-Level Overview of Twisted + + + + +

                    High-Level Overview of Twisted

                    +
                      +
                      + + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pb-copyable.html b/vendor/Twisted-10.0.0/doc/core/howto/pb-copyable.html new file mode 100644 index 000000000000..9df3a3d93ca3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pb-copyable.html @@ -0,0 +1,1195 @@ + + +Twisted Documentation: PB Copyable: Passing Complex Types + + + + +

                      PB Copyable: Passing Complex Types

                      + +
                      + + +

                      Overview

                      + +

                      This chapter focuses on how to use PB to pass complex types (specifically +class instances) to and from a remote process. The first section is on +simply copying the contents of an object to a remote process (pb.Copyable). The second covers how +to copy those contents once, then update them later when they change (Cacheable).

                      + +

                      Motivation

                      + +

                      From the previous chapter, you've seen how to +pass basic types to a remote process, by using them in the arguments or +return values of a callRemote function. However, +if you've experimented with it, you may have discovered problems when trying +to pass anything more complicated than a primitive int/list/dict/string +type, or another pb.Referenceable object. At some point you want +to pass entire objects between processes, instead of having to reduce them +down to dictionaries on one end and then re-instantiating them on the +other.

                      + +

                      Passing Objects

                      + +

                      The most obvious and straightforward way to send an object to a remote +process is with something like the following code. It also happens that this +code doesn't work, as will be explained below.

                      + +

                      1 +2 +3 +4 +5 +6 +

                      class LilyPond: + def __init__(self, frogs): + self.frogs = frogs + +pond = LilyPond(12) +ref.callRemote("sendPond", pond) +
                      + +

                      If you try to run this, you might hope that a suitable remote end which +implements the remote_sendPond method would see that method get +invoked with an instance from the LilyPond class. But instead, +you'll encounter the dreaded InsecureJelly exception. This is +Twisted's way of telling you that you've violated a security restriction, +and that the receiving end refuses to accept your object.

                      + +

                      Security Options

                      + +

                      What's the big deal? What's wrong with just copying a class into another +process' namespace?

                      + +

                      Reversing the question might make it easier to see the issue: what is the +problem with accepting a stranger's request to create an arbitrary object in +your local namespace? The real question is how much power you are granting +them: what actions can they convince you to take on the basis of the bytes +they are sending you over that remote connection.

                      + +

                      Objects generally represent more power than basic types like strings and +dictionaries because they also contain (or reference) code, which can modify +other data structures when executed. Once previously-trusted data is +subverted, the rest of the program is compromised.

                      + +

                      The built-in Python batteries included classes are relatively +tame, but you still wouldn't want to let a foreign program use them to +create arbitrary objects in your namespace or on your computer. Imagine a +protocol that involved sending a file-like object with a read() +method that was supposed to used later to retrieve a document. Then imagine +what if that object were created with +os.fdopen("~/.gnupg/secring.gpg"). Or an instance of +telnetlib.Telnet("localhost", "chargen").

                      + +

                      Classes you've written for your own program are likely to have far more +power. They may run code during __init__, or even have special +meaning simply because of their existence. A program might have +User objects to represent user accounts, and have a rule that +says all User objects in the system are referenced when +authorizing a login session. (In this system, User.__init__ +would probably add the object to a global list of known users). The simple +act of creating an object would give access to somebody. If you could be +tricked into creating a bad object, an unauthorized user would get +access.

                      + +

                      So object creation needs to be part of a system's security design. The +dotted line between trusted inside and untrusted outside needs +to describe what may be done in response to outside events. One of those +events is the receipt of an object through a PB remote procedure call, which +is a request to create an object in your inside namespace. The +question is what to do in response to it. For this reason, you must +explicitly specific what remote classes will be accepted, and how their +local representatives are to be created.

                      + +

                      What class to use?

                      + +

                      Another basic question to answer before we can do anything useful with an +incoming serialized object is: what class should we create? The simplistic +answer is to create the same kind that was serialized on the sender's +end of the wire, but this is not as easy or as straightforward as you might +think. Remember that the request is coming from a different program, using a +potentially different set of class libraries. In fact, since PB has also +been implemented in Java, Emacs-Lisp, and other languages, there's no +guarantee that the sender is even running Python! All we know on the +receiving end is a list of two things which describe the instance they are +trying to send us: the name of the class, and a representation of the +contents of the object.

                      + + +

                      PB lets you specify the mapping from remote class names to local classes +with the setUnjellyableForClass function1. + + +This function takes a remote/sender class reference (either the +fully-qualified name as used by the sending end, or a class object from +which the name can be extracted), and a local/recipient class (used to +create the local representation for incoming serialized objects). Whenever +the remote end sends an object, the class name that they transmit is looked +up in the table controlled by this function. If a matching class is found, +it is used to create the local object. If not, you get the +InsecureJelly exception.

                      + +

                      In general you expect both ends to share the same codebase: either you +control the program that is running on both ends of the wire, or both +programs share some kind of common language that is implemented in code +which exists on both ends. You wouldn't expect them to send you an object of +the MyFooziWhatZit class unless you also had a definition for that class. So +it is reasonable for the Jelly layer to reject all incoming classes except +the ones that you have explicitly marked with +setUnjellyableForClass. But keep in mind that the sender's idea +of a User object might differ from the recipient's, either +through namespace collisions between unrelated packages, version skew +between nodes that haven't been updated at the same rate, or a malicious +intruder trying to cause your code to fail in some interesting or +potentially vulnerable way.

                      + + +

                      pb.Copyable

                      + +

                      Ok, enough of this theory. How do you send a fully-fledged object from +one side to the other?

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor + +class LilyPond: + def setStuff(self, color, numFrogs): + self.color = color + self.numFrogs = numFrogs + def countFrogs(self): + print "%d frogs" % self.numFrogs + +class CopyPond(LilyPond, pb.Copyable): + pass + +class Sender: + def __init__(self, pond): + self.pond = pond + + def got_obj(self, remote): + self.remote = remote + d = remote.callRemote("takePond", self.pond) + d.addCallback(self.ok).addErrback(self.notOk) + + def ok(self, response): + print "pond arrived", response + reactor.stop() + def notOk(self, failure): + print "error during takePond:" + if failure.type == jelly.InsecureJelly: + print " InsecureJelly" + else: + print failure + reactor.stop() + return None + +def main(): + from copy_sender import CopyPond # so it's not __main__.CopyPond + pond = CopyPond() + pond.setStuff("green", 7) + pond.countFrogs() + # class name: + print ".".join([pond.__class__.__module__, pond.__class__.__name__]) + + sender = Sender(pond) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.got_obj) + reactor.run() + +if __name__ == '__main__': + main() +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +

                      # Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +PB copy receiver example. + +This is a Twisted Application Configuration (tac) file. Run with e.g. + twistd -ny copy_receiver.tac + +See the twistd(1) man page or +http://twistedmatrix.com/documents/current/howto/application for details. +""" + +import sys +if __name__ == '__main__': + print __doc__ + sys.exit(1) + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +from copy_sender import LilyPond, CopyPond + +from twisted.python import log +#log.startLogging(sys.stdout) + +class ReceiverPond(pb.RemoteCopy, LilyPond): + pass +pb.setUnjellyableForClass(CopyPond, ReceiverPond) + +class Receiver(pb.Root): + def remote_takePond(self, pond): + print " got pond:", pond + pond.countFrogs() + return "safe and sound" # positive acknowledgement + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) +
                      + +

                      The sending side has a class called LilyPond. To make this +eligble for transport through callRemote (either as an +argument, a return value, or something referenced by either of those [like a +dictionary value]), it must inherit from one of the four Serializable classes. In this section, +we focus on Copyable. +The copyable subclass of LilyPond is called +CopyPond. We create an instance of it and send it through +callRemote as an argument to the receiver's +remote_takePond method. The Jelly layer will serialize +(jelly) that object as an instance with a class name of +copy_sender.CopyPond and some chunk of data that represents the +object's state. pond.__class__.__module__ and +pond.__class__.__name__ are used to derive the class name +string. The object's getStateToCopy method is +used to get the state: this is provided by pb.Copyable, and the default just retrieves +self.__dict__. This works just like the optional +__getstate__ method used by pickle. The pair of +name and state are sent over the wire to the receiver.

                      + +

                      The receiving end defines a local class named ReceiverPond +to represent incoming LilyPond instances. This class derives +from the sender's LilyPond class (with a fully-qualified name +of copy_sender.LilyPond), which specifies how we expect it to +behave. We trust that this is the same LilyPond class as the +sender used. (At the very least, we hope ours will be able to accept a state +created by theirs). It also inherits from pb.RemoteCopy, which is a requirement for all +classes that act in this local-representative role (those which are given to +the second argument of setUnjellyableForClass). +RemoteCopy provides the methods that tell the Jelly layer how +to create the local object from the incoming serialized state.

                      + +

                      Then setUnjellyableForClass is used to register the two +classes. This has two effects: instances of the remote class (the first +argument) will be allowed in through the security layer, and instances of +the local class (the second argument) will be used to contain the state that +is transmitted when the sender serializes the remote object.

                      + +

                      When the receiver unserializes (unjellies) the object, it will +create an instance of the local ReceiverPond class, and hand +the transmitted state (usually in the form of a dictionary) to that object's +setCopyableState method. +This acts just like the __setstate__ method that +pickle uses when unserializing an object. +getStateToCopy/setCopyableState are distinct from +__getstate__/__setstate__ to allow objects to be +persisted (across time) differently than they are transmitted (across +[memory]space).

                      + +

                      When this is run, it produces the following output:

                      + +
                      +[-] twisted.spread.pb.PBServerFactory starting on 8800
                      +[-] Starting factory <twisted.spread.pb.PBServerFactory instance at
                      +0x406159cc>
                      +[Broker,0,127.0.0.1]  got pond: <__builtin__.ReceiverPond instance at
                      +0x406ec5ec>
                      +[Broker,0,127.0.0.1] 7 frogs
                      +
                      + +
                      +% ./copy_sender.py
                      +7 frogs
                      +copy_sender.CopyPond
                      +pond arrived safe and sound
                      +Main loop terminated.
                      +%
                      +
                      + + + +

                      Controlling the Copied State

                      + +

                      By overriding getStateToCopy and +setCopyableState, you can control how the object is transmitted +over the wire. For example, you might want perform some data-reduction: +pre-compute some results instead of sending all the raw data over the wire. +Or you could replace references to a local object on the sender's side with +markers before sending, then upon receipt replace those markers with +references to a receiver-side proxy that could perform the same operations +against a local cache of data.

                      + +

                      Another good use for getStateToCopy is to implement +local-only attributes: data that is only accessible by the local +process, not to any remote users. For example, a .password +attribute could be removed from the object state before sending to a remote +system. Combined with the fact that Copyable objects return +unchanged from a round trip, this could be used to build a +challenge-response system (in fact PB does this with +pb.Referenceable objects to implement authorization as +described here).

                      + +

                      Whatever getStateToCopy returns from the sending object will +be serialized and sent over the wire; setCopyableState gets +whatever comes over the wire and is responsible for setting up the state of +the object it lives in.

                      + + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class FrogPond: + def __init__(self, numFrogs, numToads): + self.numFrogs = numFrogs + self.numToads = numToads + def count(self): + return self.numFrogs + self.numToads + +class SenderPond(FrogPond, pb.Copyable): + def getStateToCopy(self): + d = self.__dict__.copy() + d['frogsAndToads'] = d['numFrogs'] + d['numToads'] + del d['numFrogs'] + del d['numToads'] + return d + +class ReceiverPond(pb.RemoteCopy): + def setCopyableState(self, state): + self.__dict__ = state + def count(self): + return self.frogsAndToads + +pb.setUnjellyableForClass(SenderPond, ReceiverPond) +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor +from copy2_classes import SenderPond + +class Sender: + def __init__(self, pond): + self.pond = pond + + def got_obj(self, obj): + d = obj.callRemote("takePond", self.pond) + d.addCallback(self.ok).addErrback(self.notOk) + + def ok(self, response): + print "pond arrived", response + reactor.stop() + def notOk(self, failure): + print "error during takePond:" + if failure.type == jelly.InsecureJelly: + print " InsecureJelly" + else: + print failure + reactor.stop() + return None + +def main(): + pond = SenderPond(3, 4) + print "count %d" % pond.count() + + sender = Sender(pond) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.got_obj) + reactor.run() + +if __name__ == '__main__': + main() +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +import copy2_classes # needed to get ReceiverPond registered with Jelly + +class Receiver(pb.Root): + def remote_takePond(self, pond): + print " got pond:", pond + print " count %d" % pond.count() + return "safe and sound" # positive acknowledgement + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) +
                      + +

                      In this example, the classes are defined in a separate source file, which +also sets up the binding between them. The SenderPond and +ReceiverPond are unrelated save for this binding: they happen +to implement the same methods, but use different internal instance variables +to accomplish them.

                      + +

                      The recipient of the object doesn't even have to import the class +definition into their namespace. It is sufficient that they import the class +definition (and thus execute the setUnjellyableForClass +statement). The Jelly layer remembers the class definition until a matching +object is received. The sender of the object needs the definition, of +course, to create the object in the first place.

                      + +

                      When run, the copy2 example emits the following:

                      + +
                      +% twistd -n -y copy2_receiver.py
                      +[-] twisted.spread.pb.PBServerFactory starting on 8800
                      +[-] Starting factory <twisted.spread.pb.PBServerFactory instance at
                      +0x40604b4c>
                      +[Broker,0,127.0.0.1]  got pond: <copy2_classes.ReceiverPond instance at
                      +0x406eb2ac>
                      +[Broker,0,127.0.0.1]  count 7
                      +
                      + +
                      +% ./copy2_sender.py
                      +count 7
                      +pond arrived safe and sound
                      +Main loop terminated.
                      +%
                      +
                      + + + +

                      Things To Watch Out For

                      + + + +

                      More Information

                      + + + + +

                      pb.Cacheable

                      + +

                      Sometimes the object you want to send to the remote process is big and +slow. big means it takes a lot of data (storage, network bandwidth, +processing) to represent its state. slow means that state doesn't +change very frequently. It may be more efficient to send the full state only +once, the first time it is needed, then afterwards only send the differences +or changes in state whenever it is modified. The pb.Cacheable class provides a framework to +implement this.

                      + +

                      pb.Cacheable is derived +from pb.Copyable, so it is +based upon the idea of an object's state being captured on the sending side, +and then turned into a new object on the receiving side. This is extended to +have an object publishing on the sending side (derived from pb.Cacheable), matched with one +observing on the receiving side (derived from pb.RemoteCache).

                      + +

                      To effectively use pb.Cacheable, you need to isolate changes +to your object into accessor functions (specifically setter +functions). Your object needs to get control every single time some +attribute is changed3.

                      + +

                      You derive your sender-side class from pb.Cacheable, and you +add two methods: getStateToCacheAndObserveFor +and stoppedObserving. The first +is called when a remote caching reference is first created, and retrieves +the data with which the cache is first filled. It also provides an +object called the observer4 + +that points at that receiver-side cache. Every time the state of the object +is changed, you give a message to the observer, informing them of the +change. The other method, stoppedObserving, is called when the +remote cache goes away, so that you can stop sending updates.

                      + +

                      On the receiver end, you make your cache class inherit from pb.RemoteCache, and implement the +setCopyableState as you would for a pb.RemoteCopy +object. In addition, you must implement methods to receive the updates sent +to the observer by the pb.Cacheable: these methods should have +names that start with observe_, and match the +callRemote invocations from the sender side just as the usual +remote_* and perspective_* methods match normal +callRemote calls.

                      + +

                      The first time a reference to the pb.Cacheable object is +sent to any particular recipient, a sender-side Observer will be created for +it, and the getStateToCacheAndObserveFor method will be called +to get the current state and register the Observer. The state which that +returns is sent to the remote end and turned into a local representation +using setCopyableState just like pb.RemoteCopy, +described above (in fact it inherits from that class).

                      + +

                      After that, your setter functions on the sender side should call +callRemote on the Observer, which causes observe_* +methods to run on the receiver, which are then supposed to update the +receiver-local (cached) state.

                      + +

                      When the receiver stops following the cached object and the last +reference goes away, the pb.RemoteCache object can be freed. +Just before it dies, it tells the sender side it no longer cares about the +original object. When that reference count goes to zero, the +Observer goes away and the pb.Cacheable object can stop +announcing every change that takes place. The stoppedObserving method is +used to tell the pb.Cacheable that the Observer has gone +away.

                      + +

                      With the pb.Cacheable and pb.RemoteCache +classes in place, bound together by a call to +pb.setUnjellyableForClass, all that remains is to pass a +reference to your pb.Cacheable over the wire to the remote end. +The corresponding pb.RemoteCache object will automatically be +created, and the matching methods will be used to keep the receiver-side +slave object in sync with the sender-side master object.

                      + +

                      Example

                      + +

                      Here is a complete example, in which the MasterDuckPond is +controlled by the sending side, and the SlaveDuckPond is a +cache that tracks changes to the master:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class MasterDuckPond(pb.Cacheable): + def __init__(self, ducks): + self.observers = [] + self.ducks = ducks + def count(self): + print "I have [%d] ducks" % len(self.ducks) + def addDuck(self, duck): + self.ducks.append(duck) + for o in self.observers: o.callRemote('addDuck', duck) + def removeDuck(self, duck): + self.ducks.remove(duck) + for o in self.observers: o.callRemote('removeDuck', duck) + def getStateToCacheAndObserveFor(self, perspective, observer): + self.observers.append(observer) + # you should ignore pb.Cacheable-specific state, like self.observers + return self.ducks # in this case, just a list of ducks + def stoppedObserving(self, perspective, observer): + self.observers.remove(observer) + +class SlaveDuckPond(pb.RemoteCache): + # This is a cache of a remote MasterDuckPond + def count(self): + return len(self.cacheducks) + def getDucks(self): + return self.cacheducks + def setCopyableState(self, state): + print " cache - sitting, er, setting ducks" + self.cacheducks = state + def observe_addDuck(self, newDuck): + print " cache - addDuck" + self.cacheducks.append(newDuck) + def observe_removeDuck(self, deadDuck): + print " cache - removeDuck" + self.cacheducks.remove(deadDuck) + +pb.setUnjellyableForClass(MasterDuckPond, SlaveDuckPond) +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor +from cache_classes import MasterDuckPond + +class Sender: + def __init__(self, pond): + self.pond = pond + + def phase1(self, remote): + self.remote = remote + d = remote.callRemote("takePond", self.pond) + d.addCallback(self.phase2).addErrback(log.err) + def phase2(self, response): + self.pond.addDuck("ugly duckling") + self.pond.count() + reactor.callLater(1, self.phase3) + def phase3(self): + d = self.remote.callRemote("checkDucks") + d.addCallback(self.phase4).addErrback(log.err) + def phase4(self, dummy): + self.pond.removeDuck("one duck") + self.pond.count() + self.remote.callRemote("checkDucks") + d = self.remote.callRemote("ignorePond") + d.addCallback(self.phase5) + def phase5(self, dummy): + d = self.remote.callRemote("shutdown") + d.addCallback(self.phase6) + def phase6(self, dummy): + reactor.stop() + +def main(): + master = MasterDuckPond(["one duck", "two duck"]) + master.count() + + sender = Sender(master) + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + deferred = factory.getRootObject() + deferred.addCallback(sender.phase1) + reactor.run() + +if __name__ == '__main__': + main() +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.application import service, internet +from twisted.internet import reactor +from twisted.spread import pb +import cache_classes + +class Receiver(pb.Root): + def remote_takePond(self, pond): + self.pond = pond + print "got pond:", pond # a DuckPondCache + self.remote_checkDucks() + def remote_checkDucks(self): + print "[%d] ducks: " % self.pond.count(), self.pond.getDucks() + def remote_ignorePond(self): + # stop watching the pond + print "dropping pond" + # gc causes __del__ causes 'decache' msg causes stoppedObserving + self.pond = None + def remote_shutdown(self): + reactor.stop() + +application = service.Application("copy_receiver") +internet.TCPServer(8800, pb.PBServerFactory(Receiver())).setServiceParent( + service.IServiceCollection(application)) +
                      +

                      When run, this example emits the following:

                      + +
                      +% twistd -n -y cache_receiver.py
                      +[-] twisted.spread.pb.PBServerFactory starting on 8800
                      +[-] Starting factory <twisted.spread.pb.PBServerFactory instance at
                      +0x40615acc>
                      +[Broker,0,127.0.0.1]  cache - sitting, er, setting ducks
                      +[Broker,0,127.0.0.1] got pond: <cache_classes.SlaveDuckPond instance at
                      +0x406eb5ec>
                      +[Broker,0,127.0.0.1] [2] ducks:  ['one duck', 'two duck']
                      +[Broker,0,127.0.0.1]  cache - addDuck
                      +[Broker,0,127.0.0.1] [3] ducks:  ['one duck', 'two duck', 'ugly duckling']
                      +[Broker,0,127.0.0.1]  cache - removeDuck
                      +[Broker,0,127.0.0.1] [2] ducks:  ['two duck', 'ugly duckling']
                      +[Broker,0,127.0.0.1] dropping pond
                      +%
                      +
                      + +
                      +% ./cache_sender.py
                      +I have [2] ducks
                      +I have [3] ducks
                      +I have [2] ducks
                      +Main loop terminated.
                      +%
                      +
                      + + +

                      Points to notice:

                      + +
                        +
                      • There is one Observer for each remote program that holds + an active reference. Multiple references inside the same program don't + matter: the serialization layer notices the duplicates and does the + appropriate reference counting5. +
                      • + +
                      • Multiple Observers need to be kept in a list, and all of them need to + be updated when something changes. By sending the initial state at the + same time as you add the observer to the list, in a single atomic action + that cannot be interrupted by a state change, you insure that you can send + the same status update to all the observers.
                      • + +
                      • The observer.callRemote calls can still fail. If the + remote side has disconnected very recently and + stoppedObserving has not yet been called, you may get a + DeadReferenceError. It is a good idea to add an errback to + those callRemotes to throw away such an error. This is a + useful idiom: + +

                        1 +

                        observer.callRemote('foo', arg).addErrback(lambda f: None) +
                        +
                      • + + +
                      • getStateToCacheAndObserverFor must return some object + that represents the current state of the object. This may simply be the + object's __dict__ attribute. It is a good idea to remove the + pb.Cacheable-specific members of it before sending it to the + remote end. The list of Observers, in particular, should be left out, to + avoid dizzying recursive Cacheable references. The mind boggles as to the + potential consequences of leaving in such an item.
                      • + +
                      • A perspective argument is available to + getStateToCacheAndObserveFor, as well as + stoppedObserving. I think the purpose of this is to allow + viewer-specific changes to the way the cache is updated. If all remote + viewers are supposed to see the same data, it can be ignored.
                      • + +
                      + + + + +

                      More Information

                      + + + + + +

                      Footnotes

                      1. Note that, in this context, unjelly is +a verb with the opposite meaning of jelly. The verb to jelly +means to serialize an object or data structure into a sequence of bytes (or +other primitive transmittable/storable representation), while to +unjelly means to unserialize the bytestream into a live object in the +receiver's memory space. Unjellyable is a noun, (not an +adjective), referring to the the class that serves as a destination or +recipient of the unjellying process. A is unjellyable into B means +that a serialized representation A (of some remote object) can be +unserialized into a local object of type B. It is these objects B +that are the Unjellyable second argument of the +setUnjellyableForClass function.

                        + +

                        In particular, unjellyable does not mean cannot be +jellied. Unpersistable means not +persistable, but unjelly, unserialize, and unpickle +mean to reverse the operations of jellying, serializing, and +pickling.

                      2. pb.RemoteCopy is actually defined + as pb.RemoteCopy, but + pb.RemoteCopy is the preferred way to access it
                      3. of course you could be clever and +add a hook to __setattr__, along with magical change-announcing +subclasses of the usual builtin types, to detect changes that result from +normal = set operations. The semi-magical property attributes +that were introduced in Python-2.2 could be useful too. The result might be +hard to maintain or extend, though.
                      4. this is actually a RemoteCacheObserver, but it isn't very +useful to subclass or modify, so simply treat it as a little demon that sits +in your pb.Cacheable class and helps you distribute change +notifications. The only useful thing to do with it is to run its +callRemote method, which acts just like a normal +pb.Referenceable's method of the same name.
                      5. this applies to + multiple references through the same Broker. If you've managed to make multiple + TCP connections to the same program, you deserve whatever you get.
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pb-cred.html b/vendor/Twisted-10.0.0/doc/core/howto/pb-cred.html new file mode 100644 index 000000000000..9cd425ef3ed7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pb-cred.html @@ -0,0 +1,1723 @@ + + +Twisted Documentation: Authentication with Perspective Broker + + + + +

                      Authentication with Perspective Broker

                      + +
                      + + +

                      Overview

                      + +

                      The examples shown in Using Perspective +Broker demonstrate how to do basic remote method calls, but provided no +facilities for authentication. In this context, authentication is about who +gets which remote references, and how to restrict access to the right +set of people or programs.

                      + +

                      As soon as you have a program which offers services to multiple users, +where those users should not be allowed to interfere with each other, you +need to think about authentication. Many services use the idea of an +account, and rely upon fact that each user has access to only one +account. Twisted uses a system called cred to +handle authentication issues, and Perspective Broker has code to make it +easy to implement the most common use cases.

                      + +

                      Compartmentalizing Services

                      + +

                      Imagine how you would write a chat server using PB. The first step might +be a ChatServer object which had a bunch of +pb.RemoteReferences that point at user clients. Pretend that +those clients offered a remote_print method which lets the +server print a message on the user's console. In that case, the server might +look something like this:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +

                      class ChatServer(pb.Referenceable): + + def __init__(self): + self.groups = {} # indexed by name + self.users = {} # indexed by name + def remote_joinGroup(self, username, groupname): + if not self.groups.has_key(groupname): + self.groups[groupname] = [] + self.groups[groupname].append(self.users[username]) + def remote_sendMessage(self, from_username, groupname, message): + group = self.groups[groupname] + if group: + # send the message to all members of the group + for user in group: + user.callRemote("print", + "<%s> says: %s" % (from_username, + message)) +
                      + +

                      For now, assume that all clients have somehow acquired a +pb.RemoteReference to this ChatServer object, +perhaps using pb.Root and getRootObject as +described in the previous chapter. In this +scheme, when a user sends a message to the group, their client runs +something like the following:

                      + +

                      1 +

                      remotegroup.callRemote("sendMessage", "alice", "Hi, my name is alice.") +
                      + + +

                      Incorrect Arguments

                      + +

                      You've probably seen the first problem: users can trivially spoof each +other. We depend upon the user to pass a correct value in their +username argument, and have no way to tell if they're lying or not. +There is nothing to prevent Alice from modifying her client to do:

                      + +

                      1 +

                      remotegroup.callRemote("sendMessage", "bob", "i like pork") +
                      + +

                      much to the horror of Bob's vegetarian friends.1

                      + +

                      (In general, learn to get suspicious if you see any argument of a +remotely-invokable method described as must be X)

                      + +

                      The best way to fix this is to keep track of the user's name locally, +rather than asking them to send it to the server with each message. The best +place to keep state is in an object, so this suggests we need a per-user +object. Rather than choosing an obvious name2, let's call this the +User class. +

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +

                      class User(pb.Referenceable): + def __init__(self, username, server, clientref): + self.name = username + self.server = server + self.remote = clientref + def remote_joinGroup(self, groupname): + self.server.joinGroup(groupname, self) + def remote_sendMessage(self, groupname, message): + self.server.sendMessage(self.name, groupname, message) + def send(self, message): + self.remote.callRemote("print", message) + +class ChatServer: + def __init__(self): + self.groups = {} # indexed by name + def joinGroup(self, groupname, user): + if not self.groups.has_key(groupname): + self.groups[groupname] = [] + self.groups[groupname].append(user) + def sendMessage(self, from_username, groupname, message): + group = self.groups[groupname] + if group: + # send the message to all members of the group + for user in group: + user.send("<%s> says: %s" % (from_username, message)) +
                      + +

                      Again, assume that each remote client gets access to a single +User object, which is created with the proper username.

                      + +

                      Note how the ChatServer object has no remote access: it +isn't even pb.Referenceable anymore. This means that all access +to it must be mediated through other objects, with code that is under your +control.

                      + +

                      As long as Alice only has access to her own User object, she +can no longer spoof Bob. The only way for her to invoke +ChatServer.sendMessage is to call her User +object's remote_sendMessage method, and that method uses its +own state to provide the from_username argument. It doesn't +give her any way to change that state.

                      + +

                      This restriction is important. The User object is able to +maintain its own integrity because there is a wall between the object and +the client: the client cannot inspect or modify internal state, like the +.name attribute. The only way through this wall is via remote +method invocations, and the only control Alice has over those invocations is +when they get invoked and what arguments they are given.

                      + +
                      Note: +

                      No object can maintain its integrity against local threats: by design, +Python offers no mechanism for class instances to hide their attributes, and +once an intruder has a copy of self.__dict__, they can do +everything the original object was able to do.

                      +
                      + + +

                      Unforgeable References

                      + +

                      Now suppose you wanted to implement group parameters, for example a mode +in which nobody was allowed to talk about mattresses because some users were +sensitive and calming them down after someone said mattress is a +hassle that were best avoided altogether. Again, per-group state implies a +per-group object. We'll go out on a limb and call this the +Group object:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +

                      class User(pb.Referenceable): + def __init__(self, username, server, clientref): + self.name = username + self.server = server + self.remote = clientref + def remote_joinGroup(self, groupname, allowMattress=True): + return self.server.joinGroup(groupname, self) + def send(self, message): + self.remote.callRemote("print", message) + +class Group(pb.Referenceable): + def __init__(self, groupname, allowMattress): + self.name = groupname + self.allowMattress = allowMattress + self.users = [] + def remote_send(self, from_user, message): + if not self.allowMattress and message.find("mattress") != -1: + raise ValueError, "Don't say that word" + for user in self.users: + user.send("<%s> says: %s" % (from_user.name, message)) + def addUser(self, user): + self.users.append(user) + +class ChatServer: + def __init__(self): + self.groups = {} # indexed by name + def joinGroup(self, groupname, user, allowMattress): + if not self.groups.has_key(groupname): + self.groups[groupname] = Group(groupname, allowMattress) + self.groups[groupname].addUser(user) + return self.groups[groupname] +
                      + + +

                      This example takes advantage of the fact that +pb.Referenceable objects sent over a wire can be returned to +you, and they will be turned into references to the same object that you +originally sent. The client cannot modify the object in any way: all they +can do is point at it and invoke its remote_* methods. Thus, +you can be sure that the .name attribute remains the same as +you left it. In this case, the client code would look something like +this:

                      + +

                      1 +2 +3 +4 +5 +6 +7 +8 +9 +

                      class ClientThing(pb.Referenceable): + def remote_print(self, message): + print message + def join(self): + d = self.remoteUser.callRemote("joinGroup", "#twisted", + allowMattress=False) + d.addCallback(self.gotGroup) + def gotGroup(self, group): + group.callRemote("send", self.remoteUser, "hi everybody") +
                      + +

                      The User object is sent from the server side, and is turned +into a pb.RemoteReference when it arrives at the client. The +client sends it back to Group.remote_send, and PB turns it back +into a reference to the original User when it gets there. +Group.remote_send can then use its .name attribute +as the sender of the message.

                      + +
                      Note: + +

                      Third party references (there aren't any)

                      + +

                      This technique also relies upon the fact that the +pb.Referenceable reference can only come from someone +who holds a corresponding pb.RemoteReference. The design of the +serialization mechanism (implemented in twisted.spread.jelly: pb, jelly, spread.. get it? Look for +banana, too. What other networking framework +can claim API names based on sandwich ingredients?) makes it impossible for +a client to obtain a reference that they weren't explicitly given. +References passed over the wire are given id numbers and recorded in a +per-connection dictionary. If you didn't give them the reference, the id +number won't be in the dict, and no amount of guessing by a malicious client +will give them anything else. The dict goes away when the connection is +dropped, further limiting the scope of those references.

                      + +

                      Futhermore, it is not possible for Bob to send his +User reference to Alice (perhaps over some other PB channel +just between the two of them). Outside the context of Bob's connection to +the server, that reference is just a meaningless number. To prevent +confusion, PB will tell you if you try to give it away: when you try to hand +a pb.RemoteReference to a third party, you'll get an exception +(implemented with an assert in pb.py:364 RemoteReference.jellyFor).

                      + +

                      This helps the security model somewhat: only the client you gave the +reference to can cause any damage with it. Of course, the client might be a +brainless zombie, simply doing anything some third party wants. When it's +not proxying callRemote invocations, it's probably terrorizing +the living and searching out human brains for sustenance. In short, if you +don't trust them, don't give them that reference.

                      + +

                      And remember that everything you've ever given them over that connection +can come back to you. If expect the client to invoke your method with some +object A that you sent to them earlier, and instead they send you object B +(that you also sent to them earlier), and you don't check it somehow, then +you've just opened up a security hole (we'll see an example of this +shortly). It may be better to keep such objects in a dictionary on the +server side, and have the client send you an index string instead. Doing it +that way makes it obvious that they can send you anything they want, and +improves the chances that you'll remember to implement the right checks. +(This is exactly what PB is doing underneath, with a per-connection +dictionary of Referenceable objects, indexed by a number).

                      + +

                      And, of course, you have to make sure you don't accidentally hand out a +reference to the wrong object.

                      + +
                      + + +

                      But again, note the vulnerability. If Alice holds a +RemoteReference to any object on the server side that +has a .name attribute, she can use that name as a spoofed +from parameter. As a simple example, what if her client code looked +like:

                      + +

                      1 +2 +3 +4 +5 +6 +

                      class ClientThing(pb.Referenceable): + def join(self): + d = self.remoteUser.callRemote("joinGroup", "#twisted") + d.addCallback(self.gotGroup) + def gotGroup(self, group): + group.callRemote("send", from_user=group, "hi everybody") +
                      + +

                      This would let her send a message that appeared to come from +#twisted rather than Alice. If she joined a group that +happened to be named bob (perhaps it is the How To Be Bob +channel, populated by Alice and countless others, a place where they can +share stories about their best impersonating-Bob moments), then she would be +able to emit a message that looked like <bob> says: hi there, +and she has accomplished her lifelong goal.

                      + + +

                      Argument Typechecking

                      + +

                      There are two techniques to close this hole. The first is to have your +remotely-invokable methods do type-checking on their arguments: if +Group.remote_send asserted isinstance(from_user, +User) then Alice couldn't use non-User objects to do her spoofing, +and hopefully the rest of the system is designed well enough to prevent her +from obtaining access to somebody else's User object.

                      + + +

                      Objects as Capabilities

                      + +

                      The second technique is to avoid having the client send you the objects +altogether. If they don't send you anything, there is nothing to verify. In +this case, you would have to have a per-user-per-group object, in which the +remote_send method would only take a single +message argument. The UserGroup object is created +with references to the only User and Group objects +that it will ever use, so no lookups are needed:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +

                      class UserGroup(pb.Referenceable): + def __init__(self, user, group): + self.user = user + self.group = group + def remote_send(self, message): + self.group.send(self.user.name, message) + +class Group: + def __init__(self, groupname, allowMattress): + self.name = groupname + self.allowMattress = allowMattress + self.users = [] + def send(self, from_user, message): + if not self.allowMattress and message.find("mattress") != -1: + raise ValueError, "Don't say that word" + for user in self.users: + user.send("<%s> says: %s" % (from_user.name, message)) + def addUser(self, user): + self.users.append(user) +
                      + +

                      The only message-sending method Alice has left is +UserGroup.remote_send, and it only accepts a message: there are +no remaining ways to influence the from name.

                      + +

                      In this model, each remotely-accessible object represents a very small +set of capabilities. Security is achieved by only granting a minimal set of +abilities to each remote user.

                      + +

                      PB provides a shortcut which makes this technique easier to use. The +Viewable class will be discussed below.

                      + +

                      Avatars and Perspectives

                      + +

                      In Twisted's cred system, an Avatar is +an object that lives on the server side (defined here as the side +farthest from the human who is trying to get something done) which lets the +remote user get something done. The avatar isn't really a particular class, +it's more like a description of a role that some object plays, as in the +Foo object here is acting as the user's avatar for this particular +service. Generally, the remote user has some way of getting their avatar +to run some code. The avatar object may enforce some security checks, and +provide additional data, then call other methods which get things done.

                      + +

                      The two pieces in the cred puzzle (for any protocol, not just PB) are: +what serves as the Avatar?, and how does the user get access to +it?.

                      + +

                      For PB, the first question is easy. The Avatar is a remotely-accessible +object which can run code: this is a perfect description of +pb.Referenceable and its subclasses. We shall defer the second +question until the next section.

                      + +

                      In the example above, you can think of the ChatServer and +Group objects as a service. The User object is the +user's server-side representative: everything the user is capable of doing +is done by running one of its methods. Anything that the server wants to do +to the user (change their group membership, change their name, delete their +pet cat, whatever) is done by manipulating the User object.

                      + +

                      There are multiple User objects living in peace and harmony around the +ChatServer. Each has a different point of view on the services provided by +the ChatServer and the Groups: each may belong to different groups, some +might have more permissions than others (like the ability to create groups). +These different points of view are called Perspectives. This is the +origin of the term Perspective in Perspective Broker: PB +provides and controls (i.e. brokers) access to Perspectives.

                      + +

                      Once upon a time, these local-representative objects were actually called +pb.Perspective. But this has changed with the advent of the +rewritten cred system, and now the more generic term for a local +representative object is an Avatar. But you will still see reference to +Perspective in the code, the docs, and the module names3. Just remember +that perspectives and avatars are basically the same thing.

                      + +

                      Despite all we've been telling you about how +Avatars are more of a concept than an actual class, the base class from +which you can create your server-side avatar-ish objects is, in fact, named +pb.Avatar4. These objects behave very much like +pb.Referenceable. The only difference is that instead of +offering remote_FOO methods, they offer perspective_FOO +methods.

                      + +

                      The other way in which pb.Avatar differs from +pb.Referenceable is that the avatar objects are designed to be +the first thing retrieved by a cred-using remote client. Just as +PBClientFactory.getRootObject gives the client access to a +pb.Root object (which can then provide access to all kinds of +other objects), PBClientFactory.login gives client access to a +pb.Avatar object (which can return other references).

                      + +

                      So, the first half of using cred in your PB application is to create an +Avatar object which implements perspective_ methods and is +careful to do useful things for the remote user while remaining vigilant +against being tricked with unexpected argument values. It must also be +careful to never give access to objects that the user should not have access +to, whether by returning them directly, returning objects which contain +them, or returning objects which can be asked (remotely) to provide +them.

                      + +

                      The second half is how the user gets a pb.RemoteReference to +your Avatar. As explained elsewhere, Avatars are +obtained from a Realm. The Realm doesn't deal with authentication at all +(usernames, passwords, public keys, challenge-response systems, retinal +scanners, real-time DNA sequencers, etc). It simply takes an avatarID +(which is effectively a username) and returns an Avatar object. The Portal +and its Checkers deal with authenticating the user: by the time they are +done, the remote user has proved their right to access the avatarID that is +given to the Realm, so the Realm can return a remotely-controllable object +that has whatever powers you wish to grant to this particular user.

                      + +

                      For PB, the realm is expected to return a pb.Avatar (or +anything which implements pb.IPerspective, really, but there's +no reason to not return a pb.Avatar subclass). This object will +be given to the client just like a pb.Root would be without +cred, and the user can get access to other objects through it (if you let +them).

                      + +

                      The basic idea is that there is a separate IPerspective-implementing +object (i.e. the Avatar subclass) (i.e. the perspective) for each +user, and only the authorized user gets a remote reference to that +object. You can store whatever permissions or capabilities the user +possesses in that object, and then use them when the user invokes a remote +method. You give the user access to the perspective object instead of the +objects that do the real work.

                      + + +

                      Perspective Examples

                      + +

                      Here is a brief example of using a pb.Avatar. Most of the support code +is magic for now: we'll explain it later.

                      + +

                      One Client

                      + + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user1", "pass1")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective ref:", perspective + print "asking it to foo(12)" + perspective.callRemote("foo", 12) + +main() +
                      Source listing - listings/pb/pb5client.py
                      + +

                      Ok, so that wasn't really very exciting. It doesn't accomplish much more +than the first PB example, and used a lot more code to do it. Let's try it +again with two users this time.

                      + +
                      Note: + +

                      When the client runs login to request the Perspective, +they can provide it with an optional client argument (which +must be a pb.Referenceable object). If they do, then a +reference to that object will be handed to the realm's +requestAvatar in the mind argument.

                      + +

                      The server-side Perspective can use it to invoke remote methods on +something in the client, so that the client doesn't always have to drive the +interaction. In a chat server, the client object would be the one to which +display text messages were sent. In a board game server, this would +provide a way to tell the clients that someone has made a move, so they can +update their game boards.

                      + +
                      + +

                      Two Clients

                      + + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user1", "pass1")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective1 ref:", perspective + print "asking it to foo(13)" + perspective.callRemote("foo", 13) + +main() +
                      Source listing - listings/pb/pb6client1.py
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("user2", "pass2")) + def1.addCallback(connected) + reactor.run() + +def connected(perspective): + print "got perspective2 ref:", perspective + print "asking it to foo(14)" + perspective.callRemote("foo", 14) + +main() +
                      Source listing - listings/pb/pb6client2.py
                      + +

                      While pb6server.py is running, try starting pb6client1, then pb6client2. +Compare the argument passed by the .callRemote() in each +client. You can see how each client gets connected to a different +Perspective.

                      + + +

                      How that example worked

                      + +

                      Let's walk through the previous example and see what was going on.

                      + +

                      First, we created a subclass called MyPerspective which is +our server-side Avatar. It implements a perspective_foo method +that is exposed to the remote client.

                      + +

                      Second, we created a realm (an object which implements +IRealm, and therefore implements requestAvatar). +This realm manufactures MyPerspective objects. It makes as many +as we want, and names each one with the avatarID (a username) that comes out +of the checkers. This MyRealm object returns two other objects as well, +which we will describe later.

                      + +

                      Third, we created a portal to hold this realm. The portal's job is to +dispatch incoming clients to the credential checkers, and then to request +Avatars for any which survive the authentication process.

                      + +

                      Fourth, we made a simple checker (an object which implements +IChecker) to hold valid user/password pairs. The checker +gets registered with the portal, so it knows who to ask when new +clients connect. We use a checker named +InMemoryUsernamePasswordDatabaseDontUse, which suggests +that 1: all the username/password pairs are kept in memory instead of +being saved to a database or something, and 2: you shouldn't use +it. The admonition against using it is because there are better +schemes: keeping everything in memory will not work when you have +thousands or millions of users to keep track of, the passwords will be +stored in the .tap file when the application shuts down (possibly a +security risk), and finally it is a nuisance to add or remove users +after the checker is constructed.

                      + +

                      Fifth, we create a pb.PBServerFactory to listen on a TCP +port. This factory knows how to connect the remote client to the Portal, so +incoming connections will be handed to the authentication process. Other +protocols (non-PB) would do something similar: the factory that creates +Protocol objects will give those objects access to the Portal so +authentication can take place.

                      + +

                      On the client side, a pb.PBClientFactory is created (as before) and attached to a TCP connection. When the +connection completes, the factory will be asked to produce a Protocol, and +it will create a PB object. Unlike the previous chapter, where we used +.getRootObject, here we use factory.login to +initiate the cred authentication process. We provide a +credentials object, which is the client-side agent for doing +our half of the authentication process. This process may involve several +messages: challenges, responses, encrypted passwords, secure hashes, etc. We +give our credentials object everything it will need to respond correctly (in +this case, a username and password, but you could write a credential that +used public-key encryption or even fancier techniques).

                      + +

                      login returns a Deferred which, when it fires, will return a +pb.RemoteReference to the remote avatar. We can then do +callRemote to invoke a perspective_foo method on +that Avatar.

                      + + +

                      Anonymous Clients

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +

                      #!/usr/bin/env python + +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Implement the realm for and run on port 8800 a PB service which allows both +anonymous and username/password based access. + +Successful username/password-based login requests given an instance of +MyPerspective with a name which matches the username with which they +authenticated. Success anonymous login requests are given an instance of +MyPerspective with the name "Anonymous". +""" + +from sys import stdout + +from zope.interface import implements + +from twisted.python.log import startLogging +from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess +from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse +from twisted.cred.portal import IRealm, Portal +from twisted.internet import reactor +from twisted.spread.pb import Avatar, IPerspective, PBServerFactory + + +class MyPerspective(Avatar): + """ + Trivial avatar exposing a single remote method for demonstrative + purposes. All successful login attempts in this example will result in + an avatar which is an instance of this class. + + @type name: C{str} + @ivar name: The username which was used during login or C{"Anonymous"} + if the login was anonymous (a real service might want to avoid the + collision this introduces between anonoymous users and authenticated + users named "Anonymous"). + """ + def __init__(self, name): + self.name = name + + + def perspective_foo(self, arg): + """ + Print a simple message which gives the argument this method was + called with and this avatar's name. + """ + print "I am %s. perspective_foo(%s) called on %s." % ( + self.name, arg, self) + + + +class MyRealm(object): + """ + Trivial realm which supports anonymous and named users by creating + avatars which are instances of MyPerspective for either. + """ + implements(IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if IPerspective not in interfaces: + raise NotImplementedError("MyRealm only handles IPerspective") + if avatarId is ANONYMOUS: + avatarId = "Anonymous" + return IPerspective, MyPerspective(avatarId), lambda: None + + + +def main(): + """ + Create a PB server using MyRealm and run it on port 8800. + """ + startLogging(stdout) + + p = Portal(MyRealm()) + + # Here the username/password checker is registered. + c1 = InMemoryUsernamePasswordDatabaseDontUse(user1="pass1", user2="pass2") + p.registerChecker(c1) + + # Here the anonymous checker is registered. + c2 = AllowAnonymousAccess() + p.registerChecker(c2) + + reactor.listenTCP(8800, PBServerFactory(p)) + reactor.run() + + +if __name__ == '__main__': + main() +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +

                      #!/usr/bin/env python + +# Copyright (c) 2007-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Client which will talk to the server run by pbAnonServer.py, logging in +either anonymously or with username/password credentials. +""" + +from sys import stdout + +from twisted.python.log import err, startLogging +from twisted.cred.credentials import Anonymous, UsernamePassword +from twisted.internet import reactor +from twisted.internet.defer import gatherResults +from twisted.spread.pb import PBClientFactory + + +def error(why, msg): + """ + Catch-all errback which simply logs the failure. This isn't expected to + be invoked in the normal case for this example. + """ + err(why, msg) + + +def connected(perspective): + """ + Login callback which invokes the remote "foo" method on the perspective + which the server returned. + """ + print "got perspective1 ref:", perspective + print "asking it to foo(13)" + return perspective.callRemote("foo", 13) + + +def finished(ignored): + """ + Callback invoked when both logins and method calls have finished to shut + down the reactor so the example exits. + """ + reactor.stop() + + +def main(): + """ + Connect to a PB server running on port 8800 on localhost and log in to + it, both anonymously and using a username/password it will recognize. + """ + startLogging(stdout) + factory = PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + + anonymousLogin = factory.login(Anonymous()) + anonymousLogin.addCallback(connected) + anonymousLogin.addErrback(error, "Anonymous login failed") + + usernameLogin = factory.login(UsernamePassword("user1", "pass1")) + usernameLogin.addCallback(connected) + usernameLogin.addErrback(error, "Username/password login failed") + + bothDeferreds = gatherResults([anonymousLogin, usernameLogin]) + bothDeferreds.addCallback(finished) + + reactor.run() + + +if __name__ == '__main__': + main() +
                      + +

                      pbAnonServer.py implements a server based on pb6server.py, extending it to +permit anonymous logins in addition to authenticated logins. A +AllowAnonymousAccess +checker and a +InMemoryUsernamePasswordDatabaseDontUse checker are registered and the +client's choice of credentials object determines which is used to authenticate +the login. In either case, the realm will be called on to create an avatar for +the login. AllowAnonymousAccess always produces an avatarId + of ANONYMOUS.

                      + +

                      On the client side, the only change is the use of an instance of +Anonymous when calling +PBClientFactory.login.

                      + + +

                      Using Avatars

                      + + +

                      Avatar Interfaces

                      + +

                      The first element of the 3-tuple returned by requestAvatar +indicates which Interface this Avatar implements. For PB avatars, it will +always be pb.IPerspective, because that's the only interface +these avatars implement.

                      + +

                      This element is present because requestAvatar is actually +presented with a list of possible Interfaces. The question being posed to +the Realm is: do you have an avatar for (avatarID) that can implement one +of the following set of Interfaces?. Some portals and checkers might +give a list of Interfaces and the Realm could pick; the PB code only knows +how to do one, so we cannot take advantage of this feature.

                      + +

                      Logging Out

                      + +

                      The third element of the 3-tuple is a zero-argument callable, which will +be invoked by the protocol when the connection has been lost. We can use +this to notify the Avatar when the client has lost its connection. This will +be described in more detail below.

                      + +

                      Making Avatars

                      + +

                      In the example above, we create Avatars upon request, during +requestAvatar. Depending upon the service, these Avatars might +already exist before the connection is received, and might outlive the +connection. The Avatars might also accept multiple connections.

                      + +

                      Another possibility is that the Avatars might exist ahead of time, but in +a different form (frozen in a pickle and/or saved in a database). In this +case, requestAvatar may need to perform a database lookup and +then do something with the result before it can provide an avatar. In this +case, it would probably return a Deferred so it could provide the real +Avatar later, once the lookup had completed.

                      + +

                      Here are some possible implementations of +MyRealm.requestAvatar:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +

                      # pre-existing, static avatars + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + avatar = self.avatars[avatarID] + return pb.IPerspective, avatar, lambda:None + + # database lookup and unpickling + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + d = self.database.fetchAvatar(avatarID) + d.addCallback(self.doUnpickle) + return pb.IPerspective, d, lambda:None + def doUnpickle(self, pickled): + avatar = pickle.loads(pickled) + return avatar + + # everybody shares the same Avatar + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + return pb.IPerspective, self.theOneAvatar, lambda:None + + # anonymous users share one Avatar, named users each get their own + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + if avatarID == checkers.ANONYMOUS: + return pb.IPerspective, self.anonAvatar, lambda:None + else: + return pb.IPerspective, self.avatars[avatarID], lambda:None + + # anonymous users get independent (but temporary) Avatars + # named users get their own persistent one + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + if avatarID == checkers.ANONYMOUS: + return pb.IPerspective, MyAvatar(), lambda:None + else: + return pb.IPerspective, self.avatars[avatarID], lambda:None +
                      + +

                      The last example, note that the new MyAvatar instance is not +saved anywhere: it will vanish when the connection is dropped. By contrast, +the avatars that live in the self.avatars dictionary will +probably get persisted into the .tap file along with the Realm, the Portal, +and anything else that is referenced by the top-level Application object. +This is an easy way to manage saved user profiles.

                      + + +

                      Connecting and Disconnecting

                      + +

                      It may be useful for your Avatars to be told when remote clients gain +(and lose) access to them. For example, and Avatar might be updated by +something in the server, and if there are clients attached, it should update +them (through the mind argument which lets the Avatar do callRemote +on the client).

                      + +

                      One common idiom which accomplishes this is to have the Realm tell the +avatar that a remote client has just attached. The Realm can also ask the +protocol to let it know when the connection goes away, so it can then inform +the Avatar that the client has detached. The third member of the +requestAvatar return tuple is a callable which will be invoked +when the connection is lost.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +

                      class MyPerspective(pb.Avatar): + def __init__(self): + self.clients = [] + def attached(self, mind): + self.clients.append(mind) + print "attached to", mind + def detached(self, mind): + self.clients.remove(mind) + print "detached from", mind + def update(self, message): + for c in self.clients: + c.callRemote("update", message) + +class MyRealm: + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + avatar = self.avatars[avatarID] + avatar.attached(mind) + return pb.IPerspective, avatar, lambda a=avatar:a.detached(mind) +
                      + + +

                      Viewable

                      + +

                      Once you have IPerspective objects (i.e. the Avatar) to +represent users, the Viewable class can come into play. This +class behaves a lot like Referenceable: it turns into a +RemoteReference when sent over the wire, and certain methods +can be invoked by the holder of that reference. However, the methods that +can be called have names that start with view_ instead of +remote_, and those methods are always called with an extra +perspective argument that points to the Avatar through which +the reference was sent:

                      + +

                      1 +2 +3 +

                      class Foo(pb.Viewable): + def view_doFoo(self, perspective, arg1, arg2): + pass +
                      + +

                      This is useful if you want to let multiple clients share a reference to +the same object. The view_ methods can use the +perspective argument to figure out which client is calling them. This +gives them a way to do additional permission checks, do per-user accounting, +etc.

                      + +

                      This is the shortcut which makes per-user-per-group capability objects +much easier to use. Instead of creating such per-(user,group) objects, you +just have per-group objects which inherit from pb.Viewable, and +give the user references to them. The local pb.Avatar object +will automatically show up as the perspective argument in the +view_* method calls, give you a chance to involve the Avatar in +the process.

                      + + +

                      Chat Server with Avatars

                      + +

                      Combining all the above techniques, here is an example chat server which +uses a fixed set of identities (say, for the three members of your bridge +club, who hang out in #NeedAFourth hoping that someone will discover +your server, guess somebody's password, break in, join the group, and also +be available for a game next saturday afternoon).

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import implements + +from twisted.cred import portal, checkers +from twisted.spread import pb +from twisted.internet import reactor + +class ChatServer: + def __init__(self): + self.groups = {} # indexed by name + + def joinGroup(self, groupname, user, allowMattress): + if not self.groups.has_key(groupname): + self.groups[groupname] = Group(groupname, allowMattress) + self.groups[groupname].addUser(user) + return self.groups[groupname] + +class ChatRealm: + implements(portal.IRealm) + def requestAvatar(self, avatarID, mind, *interfaces): + assert pb.IPerspective in interfaces + avatar = User(avatarID) + avatar.server = self.server + avatar.attached(mind) + return pb.IPerspective, avatar, lambda a=avatar:a.detached(mind) + +class User(pb.Avatar): + def __init__(self, name): + self.name = name + def attached(self, mind): + self.remote = mind + def detached(self, mind): + self.remote = None + def perspective_joinGroup(self, groupname, allowMattress=True): + return self.server.joinGroup(groupname, self, allowMattress) + def send(self, message): + self.remote.callRemote("print", message) + +class Group(pb.Viewable): + def __init__(self, groupname, allowMattress): + self.name = groupname + self.allowMattress = allowMattress + self.users = [] + def addUser(self, user): + self.users.append(user) + def view_send(self, from_user, message): + if not self.allowMattress and message.find("mattress") != -1: + raise ValueError, "Don't say that word" + for user in self.users: + user.send("<%s> says: %s" % (from_user.name, message)) + +realm = ChatRealm() +realm.server = ChatServer() +checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() +checker.addUser("alice", "1234") +checker.addUser("bob", "secret") +checker.addUser("carol", "fido") +p = portal.Portal(realm, [checker]) + +reactor.listenTCP(8800, pb.PBServerFactory(p)) +reactor.run() +
                      + +

                      Notice that the client uses perspective_joinGroup to both +join a group and retrieve a RemoteReference to the +Group object. However, the reference they get is actually to a +special intermediate object called a pb.ViewPoint. When they do +group.callRemote("send", "message"), their avatar is inserted +into the argument list that Group.view_send actually sees. This +lets the group get their username out of the Avatar without giving the +client an opportunity to spoof someone else.

                      + +

                      The client side code that joins a group and sends a message would look +like this:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor +from twisted.cred import credentials + +class Client(pb.Referenceable): + + def remote_print(self, message): + print message + + def connect(self): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.login(credentials.UsernamePassword("alice", "1234"), + client=self) + def1.addCallback(self.connected) + reactor.run() + + def connected(self, perspective): + print "connected, joining group #lookingForFourth" + # this perspective is a reference to our User object + d = perspective.callRemote("joinGroup", "#lookingForFourth") + d.addCallback(self.gotGroup) + + def gotGroup(self, group): + print "joined group, now sending a message to all members" + # 'group' is a reference to the Group object (through a ViewPoint) + d = group.callRemote("send", "You can call me Al.") + d.addCallback(self.shutdown) + + def shutdown(self, result): + reactor.stop() + + +Client().connect() +
                      Source listing - listings/pb/chatclient.py
                      + + +

                      Footnotes

                      1. Apparently Alice is one of those weirdos who has nothing +better to do than to try and impersonate Bob. She will lie to her chat +client, send incorrect objects to remote methods, even rewrite her local +client code entirely to accomplish this juvenile prank. Given this +adversarial relationship, one must wonder why she and Bob seem to spend so +much time together: their adventures are clearly documented by the +cryptographic literature.
                      2. the +obvious name is clearly +ServerSidePerUserObjectWhichNobodyElseHasAccessTo, but because +python makes everything else so easy to read, it only seems fair to make +your audience work for something
                      3. We could just go ahead and rename Perspective Broker to be +Avatar Broker, but 1) that would cause massive compatibility problems, and 2) +AB doesn't fit into the whole sandwich-themed naming scheme nearly as +well as PB does. If we changed it to AB, we'd probably have to change +Banana to be CD (CoderDecoder), and Jelly to be EF (EncapsulatorFragmentor). +twisted.spread would then have to be renamed twisted.alphabetsoup, and then +the whole food-pun thing would start all over again.
                      4. The avatar-ish class is named +pb.Avatar because pb.Perspective was already +taken, by the (now obsolete) oldcred perspective-ish class. It is a pity, +but it simply wasn't possible both replace pb.Perspective +in-place and maintain a reasonable level of +backwards-compatibility.
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pb-intro.html b/vendor/Twisted-10.0.0/doc/core/howto/pb-intro.html new file mode 100644 index 000000000000..4b848641aff8 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pb-intro.html @@ -0,0 +1,320 @@ + + +Twisted Documentation: Introduction to Perspective Broker + + + + +

                      Introduction to Perspective Broker

                      + +
                      + + +

                      Introduction

                      + +

                      Suppose you find yourself in control of both ends of the wire: you +have two programs that need to talk to each other, and you get to use any +protocol you want. If you can think of your problem in terms of objects that +need to make method calls on each other, then chances are good that you can +use twisted's Perspective Broker protocol rather than trying to shoehorn +your needs into something like HTTP, or implementing yet another RPC +mechanism1.

                      + +

                      The Perspective Broker system (abbreviated PB, spawning numerous +sandwich-related puns) is based upon a few central concepts:

                      + +
                        + +
                      • serialization: taking fairly arbitrary objects and types, + turning them into a chunk of bytes, sending them over a wire, then + reconstituting them on the other end. By keeping careful track of object + ids, the serialized objects can contain references to other objects and + the remote copy will still be useful.
                      • + +
                      • remote method calls: doing something to a local object and + causing a method to get run on a distant one. The local object is called a + RemoteReference, and you + do something by running its .callRemote method. +
                      • + +
                      + +

                      This document will contain several examples that will (hopefully) appear +redundant and verbose once you've figured out what's going on. To begin +with, much of the code will just be labelled magic: don't worry about how +these parts work yet. It will be explained more fully later.

                      + +

                      Object Roadmap

                      + +

                      To start with, here are the major classes, interfaces, and +functions involved in PB, with links to the file where they are +defined (all of which are under twisted/, of course). Don't worry +about understanding what they all do yet: it's easier to figure them +out through their interaction than explaining them one at a time.

                      + +
                      + +

                      Other classes that are involved at some point:

                      + +
                        + +
                      • RemoteReference + : spread/pb.py
                      • + +
                      • pb.Root + : spread/pb.py, actually defined as + twisted.spread.flavors.Root + in spread/flavors.py
                      • + +
                      • pb.Referenceable + : spread/pb.py, actually defined as + twisted.spread.flavors.Referenceable + in spread/flavors.py
                      • + +
                      + +

                      Classes and interfaces that get involved when you start to care +about authorization and security:

                      + +
                        +
                      • Portal + : cred/portal.py
                      • + +
                      • IRealm + : cred/portal.py
                      • + +
                      • IPerspective + : spread/pb.py, which you will usually be interacting + with via pb.Avatar (a basic implementor of the interface).
                      • +
                      + +

                      Subclassing and Implementing

                      + +

                      Technically you can subclass anything you want, but technically you +could also write a whole new framework, which would just waste a lot +of time. Knowing which classes are useful to subclass or which +interfaces to implement is one of the bits of knowledge that's crucial +to using PB (and all of Twisted) successfully. Here are some hints to +get started:

                      + +
                        + +
                      • pb.Root, pb.Referenceable: you'll + subclass these to make remotely-referenceable objects (i.e., objects + which you can call methods on remotely) using PB. You don't need to + change any of the existing behavior, just inherit all of it and add + the remotely-accessible methods that you want to export.
                      • + +
                      • pb.Avatar: You'll + be subclassing this when you get into PB programming with + authorization. This is an implementor of IPerspective.
                      • + +
                      • ICredentialsChecker: Implement this if + you want to authenticate your users against some sort of data store: + i.e., an LDAP database, an RDBMS, etc. There are already a few + implementations of this for various back-ends in + twisted.cred.checkers.
                      • + +
                      + + + +

                      Things you can Call Remotely

                      + +

                      At this writing, there are three flavors of objects that can +be accessed remotely through RemoteReference objects. Each of these +flavors has a rule for how the callRemote +message is transformed into a local method call on the server. In +order to use one of these flavors, subclass them and name your +published methods with the appropriate prefix. + +

                        +
                      • twisted.spread.pb.IPerspective implementors + +

                        This is the first interface we deal with. It is a perspective + onto your PB application. Perspectives are slightly special because + they are usually the first object that a given user can access in + your application (after they log on). A user should only receive a + reference to their own perspective. PB works hard to + verify, as best it can, that any method that can be called on a + perspective directly is being called on behalf of the user who is + represented by that perspective. (Services with unusual + requirements for on behalf of, such as simulations with the + ability to posess another player's avatar, are accomplished by + providing indirected access to another user's perspective.) + +

                        + +

                        Perspectives are not usually serialized as remote references, so + do not return an IPerspective-implementor directly.

                        + +

                        The way most people will want to implement IPerspective is by + subclassing pb.Avatar. Remotely accessible methods on pb.Avatar + instances are named with the perspective_ prefix.

                        + +
                      • + +
                      • twisted.spread.pb.Referenceable + +

                        Referenceable objects are the simplest kind of PB object. You can call + methods on them and return them from methods to provide access to other + objects' methods.

                        + +

                        However, when a method is called on a Referenceable, it's not possible to + tell who called it.

                        + +

                        Remotely accessible methods on Referenceables are named with the + remote_ prefix.

                        + +
                      • + +
                      • twisted.spread.pb.Viewable + +

                        Viewable objects are remotely referenceable objects which have the + additional requirement that it must be possible to tell who is calling them. + The argument list to a Viewable's remote methods is modified in order to + include the Perspective representing the calling user.

                        + +

                        Remotely accessible methods on Viewables are named with the + view_ prefix.

                        + +
                      • + +
                      + +

                      + +

                      Things you can Copy Remotely

                      + +

                      In addition to returning objects that you can call remote methods on, you +can return structured copies of local objects.

                      + +

                      There are 2 basic flavors that allow for copying objects remotely. Again, +you can use these by subclassing them. In order to specify what state you want +to have copied when these are serialized, you can either use the Python default +__getstate__ or specialized method calls for that +flavor.

                      + +

                      +

                        +
                      • twisted.spread.pb.Copyable + +

                        This is the simpler kind of object that can be copied. Every time this + object is returned from a method or passed as an argument, it is serialized + and unserialized.

                        + +

                        Copyable + provides a method you can override, getStateToCopyFor(perspective), which + allows you to decide what an object will look like for the + perspective who is requesting it. The perspective argument will be the perspective + which is either passing an argument or returning a result an + instance of your Copyable class.

                        + +

                        For security reasons, in order to allow a particular Copyable class to + actually be copied, you must declare a RemoteCopy + handler for + that Copyable subclass. The easiest way to do this is to declare both in the + same module, like so: + +

                        1 +2 +3 +4 +5 +6 +

                        from twisted.spread import flavors +class Foo(flavors.Copyable): + pass +class RemoteFoo(flavors.RemoteCopy): + pass +flavors.setUnjellyableForClass(Foo, RemoteFoo) +
                        + + In this case, each time a Foo is copied between peers, a RemoteFoo will be + instantiated and populated with the Foo's state. If you do not do this, PB + will complain that there have been security violations, and it may close the + connection. +

                        + +
                      • + +
                      • twisted.spread.pb.Cacheable + +

                        Let me preface this with a warning: Cacheable may be hard to understand. + The motivation for it may be unclear if you don't have some experience with + real-world applications that use remote method calling of some kind. Once + you understand why you need it, what it does will likely seem simple and + obvious, but if you get confused by this, forget about it and come back + later. It's possible to use PB without understanding Cacheable at all. +

                        + +

                        Cacheable is a flavor which is designed to be copied only when necessary, + and updated on the fly as changes are made to it. When passed as an argument + or a return value, if a Cacheable exists on the side of the connection it is + being copied to, it will be referred to by ID and not copied.

                        + +

                        Cacheable is designed to minimize errors involved in replicating an object + between multiple servers, especially those related to having stale + information. In order to do this, Cacheable automatically registers + observers and queries state atomically, together. You can override the + method getStateToCacheAndObserveFor(self, + perspective, observer) in order to specify how your observers will be + stored and updated. +

                        + +

                        Similar to + getStateToCopyFor, + getStateToCacheAndObserveFor gets passed a + perspective. It also gets passed an + observer, which is a remote reference to a + secret fourth referenceable flavor: + RemoteCache.

                        + +

                        A RemoteCache is simply + the object that represents your + Cacheable on the other side + of the connection. It is registered using the same method as + RemoteCopy, above. + RemoteCache is different, however, in that it will be referenced by its peer. + It acts as a Referenceable, where all methods prefixed with + observe_ will be callable remotely. It is + recommended that your object maintain a list (note: library support for this + is forthcoming!) of observers, and update them using + callRemote when the Cacheable changes in a way + that should be noticeable to its clients.

                        + +

                        Finally, when all references to a + Cacheable from a given + perspective are lost, + stoppedObserving(perspective, observer) + will be called on the + Cacheable, with the same + perspective/observer pair that getStateToCacheAndObserveFor was + originally called with. Any cleanup remote calls can be made there, as well + as removing the observer object from any lists which it was previously in. + Any further calls to this observer object will be invalid.

                        +
                      • +
                      +

                      + +

                      Footnotes

                      1. Most of Twisted is like this. Hell, most of +unix is like this: if you think it would be useful, someone else has +probably thought that way in the past, and acted on it, and you can take +advantage of the tool they created to solve the same problem you're facing +now.
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pb-usage.html b/vendor/Twisted-10.0.0/doc/core/howto/pb-usage.html new file mode 100644 index 000000000000..e6d1b0d6ed68 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pb-usage.html @@ -0,0 +1,1158 @@ + + +Twisted Documentation: Using Perspective Broker + + + + +

                      Using Perspective Broker

                      + +
                      + + +

                      Basic Example

                      + +

                      The first example to look at is a complete (although somewhat trivial) +application. It uses PBServerFactory() on the server side, and +PBClientFactory() on the client side.

                      + +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +

                      from twisted.spread import pb +from twisted.internet import reactor +from twisted.python import util + +factory = pb.PBClientFactory() +reactor.connectTCP("localhost", 8789, factory) +d = factory.getRootObject() +d.addCallback(lambda object: object.callRemote("echo", "hello network")) +d.addCallback(lambda echo: 'server echoed: '+echo) +d.addErrback(lambda reason: 'error: '+str(reason.value)) +d.addCallback(util.println) +d.addCallback(lambda _: reactor.stop()) +reactor.run() +
                      + +

                      First we look at the server. This defines an Echoer class (derived from +pb.Root), with a method called +remote_echo(). +pb.Root objects (because of +their inheritance of +pb.Referenceable, described +later) can define methods with names of the form remote_*; a +client which obtains a remote reference to that +pb.Root object will be able to +invoke those methods.

                      + +

                      The pb.Root-ish object is +given to a pb.PBServerFactory(). This is a +Factory object like +any other: the Protocol objects it creates for new +connections know how to speak the PB protocol. The object you give to +pb.PBServerFactory() becomes the root object, which +simply makes it available for the client to retrieve. The client may only +request references to the objects you want to provide it: this helps you +implement your security model. Because it is so common to export just a +single object (and because a remote_* method on that one can +return a reference to any other object you might want to give out), the +simplest example is one where the PBServerFactory is given the root object, and +the client retrieves it.

                      + +

                      The client side uses +pb.PBClientFactory to make a +connection to a given port. This is a two-step process involving opening +a TCP connection to a given host and port and requesting the root object +using .getRootObject().

                      + +

                      Because .getRootObject() has to wait until a network +connection has been made and exchange some data, it may take a while, +so it returns a Deferred, to which the gotObject() callback is +attached. (See the documentation on Deferring +Execution for a complete explanation of Deferreds). If and when the +connection succeeds and a reference to the remote root object is +obtained, this callback is run. The first argument passed to the +callback is a remote reference to the distant root object. (you can +give other arguments to the callback too, see the other parameters for +.addCallback() and .addCallbacks()).

                      + +

                      The callback does:

                      + +

                      1 +

                      object.callRemote("echo", "hello network") +
                      + +

                      which causes the server's .remote_echo() method to be invoked. +(running .callRemote("boom") would cause +.remote_boom() to be run, etc). Again because of the delay +involved, callRemote() returns a +Deferred. Assuming the +remote method was run without causing an exception (including an attempt to +invoke an unknown method), the callback attached to that +Deferred will be +invoked with any objects that were returned by the remote method call.

                      + +

                      In this example, the server's Echoer object has a method +invoked, exactly as if some code on the server side had done:

                      + +

                      1 +

                      echoer_object.remote_echo("hello network") +
                      + +

                      and from the definition of remote_echo() we see that this just +returns the same string it was given: hello network.

                      + +

                      From the client's point of view, the remote call gets another Deferred object instead of +that string. callRemote() always returns a Deferred. This is why PB is +described as a system for translucent remote method calls instead of +transparent ones: you cannot pretend that the remote object is really +local. Trying to do so (as some other RPC mechanisms do, coughCORBAcough) +breaks down when faced with the asynchronous nature of the network. Using +Deferreds turns out to be a very clean way to deal with the whole thing.

                      + +

                      The remote reference object (the one given to +getRootObject()'s success callback) is an instance the RemoteReference class. This means +you can use it to invoke methods on the remote object that it refers to. Only +instances of RemoteReference are eligible for +.callRemote(). The RemoteReference object is the one that lives +on the remote side (the client, in this case), not the local side (where the +actual object is defined).

                      + +

                      In our example, the local object is that Echoer() instance, +which inherits from pb.Root, +which inherits from +pb.Referenceable. It is that +Referenceable class that makes the object eligible to be available +for remote method calls1. If you have +an object that is Referenceable, then any client that manages to get a +reference to it can invoke any remote_* methods they please.

                      + +
                      Note: +

                      The only thing they can do is invoke those +methods. In particular, they cannot access attributes. From a security point +of view, you control what they can do by limiting what the +remote_* methods can do.

                      + +

                      Also note: the other classes like +Referenceable allow access to +other methods, in particular perspective_* and view_* +may be accessed. Don't write local-only methods with these names, because then +remote callers will be able to do more than you intended.

                      + +

                      Also also note: the other classes like +pb.Copyable do allow +access to attributes, but you control which ones they can see.

                      +
                      + +

                      You don't have to be a +pb.Root to be remotely callable, +but you do have to be +pb.Referenceable. (Objects that +inherit from pb.Referenceable +but not from pb.Root can be +remotely called, but only +pb.Root-ish objects can be given +to the PBServerFactory.)

                      + +

                      Complete Example

                      + +

                      Here is an example client and server which uses pb.Referenceable as a root object and as the +result of a remotely exposed method. In each context, methods can be invoked +on the exposed Referenceable +instance. In this example, the initial root object has a method that returns a +reference to the second object.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb + +class Two(pb.Referenceable): + def remote_three(self, arg): + print "Two.three was given", arg + +class One(pb.Root): + def remote_getTwo(self): + two = Two() + print "returning a Two called", two + return two + +from twisted.internet import reactor +reactor.listenTCP(8800, pb.PBServerFactory(One())) +reactor.run() +
                      Source listing - listings/pb/pb1server.py
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.getRootObject() + def1.addCallbacks(got_obj1, err_obj1) + reactor.run() + +def err_obj1(reason): + print "error getting first object", reason + reactor.stop() + +def got_obj1(obj1): + print "got first object:", obj1 + print "asking it to getTwo" + def2 = obj1.callRemote("getTwo") + def2.addCallbacks(got_obj2) + +def got_obj2(obj2): + print "got second object:", obj2 + print "telling it to do three(12)" + obj2.callRemote("three", 12) + +main() +
                      Source listing - listings/pb/pb1client.py
                      + +

                      pb.PBClientFactory.getRootObject will +handle all the details of waiting for the creation of a connection. +It returns a Deferred, which will have its +callback called when the reactor connects to the remote server and +pb.PBClientFactory gets the +root, and have its errback called when the +object-connection fails for any reason, whether it was host lookup +failure, connection refusal, or some server-side error. +

                      + +

                      The root object has a method called remote_getTwo, which +returns the Two() instance. On the client end, the callback gets +a RemoteReference to that +instance. The client can then invoke two's .remote_three() +method.

                      + +

                      RemoteReference +objects have one method which is their purpose for being: callRemote. This method allows you to call a +remote method on the object being referred to by the Reference. RemoteReference.callRemote, like pb.PBClientFactory.getRootObject, returns +a Deferred. +When a response to the method-call being sent arrives, the Deferred's callback or errback +will be made, depending on whether an error occurred in processing the +method call.

                      + +

                      You can use this technique to provide access to arbitrary sets of objects. +Just remember that any object that might get passed over the wire must +inherit from Referenceable +(or one of the other flavors). If you try to pass a non-Referenceable object +(say, by returning one from a remote_* method), you'll get an +InsecureJelly +exception2.

                      + + +

                      References can come back to you

                      + +

                      If your server gives a reference to a client, and then that client gives +the reference back to the server, the server will wind up with the same +object it gave out originally. The serialization layer watches for returning +reference identifiers and turns them into actual objects. You need to stay +aware of where the object lives: if it is on your side, you do actual method +calls. If it is on the other side, you do +.callRemote()3.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class Two(pb.Referenceable): + def remote_print(self, arg): + print "two.print was given", arg + +class One(pb.Root): + def __init__(self, two): + #pb.Root.__init__(self) # pb.Root doesn't implement __init__ + self.two = two + def remote_getTwo(self): + print "One.getTwo(), returning my two called", two + return two + def remote_checkTwo(self, newtwo): + print "One.checkTwo(): comparing my two", self.two + print "One.checkTwo(): against your two", newtwo + if two == newtwo: + print "One.checkTwo(): our twos are the same" + + +two = Two() +root_obj = One(two) +reactor.listenTCP(8800, pb.PBServerFactory(root_obj)) +reactor.run() +
                      Source listing - listings/pb/pb2server.py
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + foo = Foo() + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + factory.getRootObject().addCallback(foo.step1) + reactor.run() + +# keeping globals around is starting to get ugly, so we use a simple class +# instead. Instead of hooking one function to the next, we hook one method +# to the next. + +class Foo: + def __init__(self): + self.oneRef = None + + def step1(self, obj): + print "got one object:", obj + self.oneRef = obj + print "asking it to getTwo" + self.oneRef.callRemote("getTwo").addCallback(self.step2) + + def step2(self, two): + print "got two object:", two + print "giving it back to one" + print "one is", self.oneRef + self.oneRef.callRemote("checkTwo", two) + +main() +
                      Source listing - listings/pb/pb2client.py
                      + +

                      The server gives a Two() instance to the client, who then +returns the reference back to the server. The server compares the two +given with the two received and shows that they are the same, and that +both are real objects instead of remote references.

                      + +

                      A few other techniques are demonstrated in pb2client.py. One +is that the callbacks are are added with .addCallback instead +of .addCallbacks. As you can tell from the Deferred documentation, .addCallback is a +simplified form which only adds a success callback. The other is that to +keep track of state from one callback to the next (the remote reference to +the main One() object), we create a simple class, store the reference in an +instance thereof, and point the callbacks at a sequence of bound methods. +This is a convenient way to encapsulate a state machine. Each response kicks +off the next method, and any data that needs to be carried from one state to +the next can simply be saved as an attribute of the object.

                      + +

                      Remember that the client can give you back any remote reference you've +given them. Don't base your zillion-dollar stock-trading clearinghouse +server on the idea that you trust the client to give you back the right +reference. The security model inherent in PB means that they can only +give you back a reference that you've given them for the current connection +(not one you've given to someone else instead, nor one you gave them last +time before the TCP session went down, nor one you haven't yet given to the +client), but just like with URLs and HTTP cookies, the particular reference +they give you is entirely under their control.

                      + + +

                      References to client-side objects

                      + +

                      Anything that's Referenceable can get passed across the wire, in +either direction. The client can give a reference to the +server, and then the server can use .callRemote() to invoke methods on +the client end. This fuzzes the distinction between client and +server: the only real difference is who initiates the original TCP +connection; after that it's all symmetric.

                      + +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class Two(pb.Referenceable): + def remote_print(self, arg): + print "Two.print() called with", arg + +def main(): + two = Two() + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + def1 = factory.getRootObject() + def1.addCallback(got_obj, two) # hands our 'two' to the callback + reactor.run() + +def got_obj(obj, two): + print "got One:", obj + print "giving it our two" + obj.callRemote("takeTwo", two) + +main() +
                      Source listing - listings/pb/pb3client.py
                      + +

                      In this example, the client gives a reference to its own object to the +server. The server then invokes a remote method on the client-side +object.

                      + + +

                      Raising Remote Exceptions

                      + +

                      Everything so far has covered what happens when things go right. What +about when they go wrong? The Python Way is to raise an exception of some +sort. The Twisted Way is the same.

                      + +

                      The only special thing you do is to define your Exception +subclass by deriving it from pb.Error. When any remotely-invokable method +(like remote_* or perspective_*) raises a +pb.Error-derived exception, a serialized form of that Exception +object will be sent back over the wire4. The other side (which +did callRemote) will have the errback +callback run with a Failure object that contains a copy of +the exception object. This Failure object can be queried to +retrieve the error message and a stack traceback.

                      + +

                      Failure is a +special class, defined in twisted/python/failure.py, created to +make it easier to handle asynchronous exceptions. Just as exception handlers +can be nested, errback functions can be chained. If one errback +can't handle the particular type of failure, it can be passed along to a +errback handler further down the chain.

                      + +

                      For simple purposes, think of the Failure as just a container +for remotely-thrown Exception objects. To extract the string that +was put into the exception, use its .getErrorMessage() method. +To get the type of the exception (as a string), look at its +.type attribute. The stack traceback is available too. The +intent is to let the errback function get just as much information about the +exception as Python's normal try: clauses do, even though the +exception occurred in somebody else's memory space at some unknown time in +the past.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +class MyError(pb.Error): + """This is an Expected Exception. Something bad happened.""" + pass + +class MyError2(Exception): + """This is an Unexpected Exception. Something really bad happened.""" + pass + +class One(pb.Root): + def remote_broken(self): + msg = "fall down go boom" + print "raising a MyError exception with data '%s'" % msg + raise MyError(msg) + def remote_broken2(self): + msg = "hadda owie" + print "raising a MyError2 exception with data '%s'" % msg + raise MyError2(msg) + +def main(): + reactor.listenTCP(8800, pb.PBServerFactory(One())) + reactor.run() + +if __name__ == '__main__': + main() +
                      Source listing - listings/pb/exc_server.py
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb +from twisted.internet import reactor + +def main(): + factory = pb.PBClientFactory() + reactor.connectTCP("localhost", 8800, factory) + d = factory.getRootObject() + d.addCallbacks(got_obj) + reactor.run() + +def got_obj(obj): + # change "broken" into "broken2" to demonstrate an unhandled exception + d2 = obj.callRemote("broken") + d2.addCallback(working) + d2.addErrback(broken) + +def working(): + print "erm, it wasn't *supposed* to work.." + +def broken(reason): + print "got remote Exception" + # reason should be a Failure (or subclass) holding the MyError exception + print " .__class__ =", reason.__class__ + print " .getErrorMessage() =", reason.getErrorMessage() + print " .type =", reason.type + reactor.stop() + +main() +
                      Source listing - listings/pb/exc_client.py
                      + +
                      +% ./exc_client.py 
                      +got remote Exception
                      + .__class__ = twisted.spread.pb.CopiedFailure
                      + .getErrorMessage() = fall down go boom
                      + .type = __main__.MyError
                      +Main loop terminated.
                      +
                      + +

                      Oh, and what happens if you raise some other kind of exception? Something +that isn't subclassed from pb.Error? Well, those are +called unexpected exceptions, which make Twisted think that something +has really gone wrong. These will raise an exception on the +server side. This won't break the connection (the exception is +trapped, just like most exceptions that occur in response to network +traffic), but it will print out an unsightly stack trace on the server's +stderr with a message that says Peer Will Receive PB Traceback, just +as if the exception had happened outside a remotely-invokable method. (This +message will go the current log target, if log.startLogging was used to redirect it). The +client will get the same Failure object in either case, but +subclassing your exception from pb.Error is the way to tell +Twisted that you expect this sort of exception, and that it is ok to just +let the client handle it instead of also asking the server to complain. Look +at exc_client.py and change it to invoke broken2() +instead of broken() to see the change in the server's +behavior.

                      + +

                      If you don't add an errback function to the Deferred, then a remote +exception will still send a Failure object back over, but it +will get lodged in the Deferred with nowhere to go. When that +Deferred finally goes out of scope, the side that did +callRemote will emit a message about an Unhandled error in +Deferred, along with an ugly stack trace. It can't raise an exception at +that point (after all, the callRemote that triggered the +problem is long gone), but it will emit a traceback. So be a good programmer +and always add errback handlers, even if they are just +calls to log.err.

                      + +

                      Try/Except blocks and Failure.trap

                      + +

                      To implement the equivalent of the Python try/except blocks (which can +trap particular kinds of exceptions and pass others up to +higher-level try/except blocks), you can use the +.trap() method in conjunction with multiple +errback handlers on the Deferred. Re-raising an +exception in an errback handler serves to pass that new +exception to the next handler in the chain. The trap method is +given a list of exceptions to look for, and will re-raise anything that +isn't on the list. Instead of passing unhandled exceptions up to an +enclosing try block, this has the effect of passing the +exception off to later errback handlers on the same +Deferred. The trap calls are used in chained +errbacks to test for each kind of exception in sequence.

                      + +
                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +

                      #!/usr/bin/env python + +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.spread import pb, jelly +from twisted.python import log +from twisted.internet import reactor + +class MyException(pb.Error): pass +class MyOtherException(pb.Error): pass + +class ScaryObject: + # not safe for serialization + pass + +def worksLike(obj): + # the callback/errback sequence in class One works just like an + # asynchronous version of the following: + try: + response = obj.callMethod(name, arg) + except pb.DeadReferenceError: + print " stale reference: the client disconnected or crashed" + except jelly.InsecureJelly: + print " InsecureJelly: you tried to send something unsafe to them" + except (MyException, MyOtherException): + print " remote raised a MyException" # or MyOtherException + except: + print " something else happened" + else: + print " method successful, response:", response + +class One: + def worked(self, response): + print " method successful, response:", response + def check_InsecureJelly(self, failure): + failure.trap(jelly.InsecureJelly) + print " InsecureJelly: you tried to send something unsafe to them" + return None + def check_MyException(self, failure): + which = failure.trap(MyException, MyOtherException) + if which == MyException: + print " remote raised a MyException" + else: + print " remote raised a MyOtherException" + return None + def catch_everythingElse(self, failure): + print " something else happened" + log.err(failure) + return None + + def doCall(self, explanation, arg): + print explanation + try: + deferred = self.remote.callRemote("fooMethod", arg) + deferred.addCallback(self.worked) + deferred.addErrback(self.check_InsecureJelly) + deferred.addErrback(self.check_MyException) + deferred.addErrback(self.catch_everythingElse) + except pb.DeadReferenceError: + print " stale reference: the client disconnected or crashed" + + def callOne(self): + self.doCall("callOne: call with safe object", "safe string") + def callTwo(self): + self.doCall("callTwo: call with dangerous object", ScaryObject()) + def callThree(self): + self.doCall("callThree: call that raises remote exception", "panic!") + def callShutdown(self): + print "telling them to shut down" + self.remote.callRemote("shutdown") + def callFour(self): + self.doCall("callFour: call on stale reference", "dummy") + + def got_obj(self, obj): + self.remote = obj + reactor.callLater(1, self.callOne) + reactor.callLater(2, self.callTwo) + reactor.callLater(3, self.callThree) + reactor.callLater(4, self.callShutdown) + reactor.callLater(5, self.callFour) + reactor.callLater(6, reactor.stop) + +factory = pb.PBClientFactory() +reactor.connectTCP("localhost", 8800, factory) +deferred = factory.getRootObject() +deferred.addCallback(One().got_obj) +reactor.run() +
                      + +
                      +% ./trap_client.py 
                      +callOne: call with safe object
                      + method successful, response: response
                      +callTwo: call with dangerous object
                      + InsecureJelly: you tried to send something unsafe to them
                      +callThree: call that raises remote exception
                      + remote raised a MyException
                      +telling them to shut down
                      +callFour: call on stale reference
                      + stale reference: the client disconnected or crashed
                      +% 
                      +
                      + + +

                      In this example, callTwo tries to send an instance of a +locally-defined class through callRemote. The default security +model implemented by pb.Jelly +on the remote end will not allow unknown classes to be unserialized (i.e. +taken off the wire as a stream of bytes and turned back into an object: a +living, breathing instance of some class): one reason is that it does not +know which local class ought to be used to create an instance that +corresponds to the remote object5. + +The receiving end of the connection gets to decide what to accept and what +to reject. It indicates its disapproval by raising a pb.InsecureJelly exception. Because it occurs +at the remote end, the exception is returned to the caller asynchronously, +so an errback handler for the associated Deferred +is run. That errback receives a Failure which wraps the +InsecureJelly.

                      + + +

                      Remember that trap re-raises exceptions that it wasn't asked +to look for. You can only check for one set of exceptions per errback +handler: all others must be checked in a subsequent handler. +check_MyException shows how multiple kinds of exceptions can be +checked in a single errback: give a list of exception types to +trap, and it will return the matching member. In this case, the +kinds of exceptions we are checking for (MyException and +MyOtherException) may be raised by the remote end: they inherit +from pb.Error.

                      + +

                      The handler can return None to terminate processing of the +errback chain (to be precise, it switches to the callback that follows the +errback; if there is no callback then processing terminates). It is a good +idea to put an errback that will catch everything (no trap +tests, no possible chance of raising more exceptions, always returns +None) at the end of the chain. Just as with regular try: +except: handlers, you need to think carefully about ways in which +your errback handlers could themselves raise exceptions. The extra +importance in an asynchronous environment is that an exception that falls +off the end of the Deferred will not be signalled until that +Deferred goes out of scope, and at that point may only cause a +log message (which could even be thrown away if log.startLogging is not used to point it at +stdout or a log file). In contrast, a synchronous exception that is not +handled by any other except: block will very visibly terminate +the program immediately with a noisy stack trace.

                      + +

                      callFour shows another kind of exception that can occur +while using callRemote: pb.DeadReferenceError. This one occurs when the +remote end has disconnected or crashed, leaving the local side with a stale +reference. This kind of exception happens to be reported right away (XXX: is +this guaranteed? probably not), so must be caught in a traditional +synchronous try: except pb.DeadReferenceError block.

                      + +

                      Yet another kind that can occur is a pb.PBConnectionLost exception. This occurs +(asynchronously) if the connection was lost while you were waiting for a +callRemote call to complete. When the line goes dead, all +pending requests are terminated with this exception. Note that you have no +way of knowing whether the request made it to the other end or not, nor how +far along in processing it they had managed before the connection was +lost. XXX: explain transaction semantics, find a decent reference.

                      + +

                      Footnotes

                      1. There are a few other classes +that can bestow this ability, but pb.Referenceable is the easiest to +understand; see 'flavors' below for details on the others.
                      2. This can be overridden, by subclassing one of +the Serializable flavors and defining custom serialization code for your +class. See Passing Complex Types for +details.
                      3. The binary nature of this +local vs. remote scheme works because you cannot give RemoteReferences to a +third party. If you could, then your object A could go to B, B could give it to +C, C might give it back to you, and you would be hard pressed to tell if the +object lived in C's memory space, in B's, or if it was really your own object, +tarnished and sullied after being handed down like a really ugly picture that +your great aunt owned and which nobody wants but which nobody can bear to throw +out. Ok, not really like that, but you get the idea.
                      4. To be precise, +the Failure will be sent if any exception is raised, not just +pb.Error-derived ones. But the server will print ugly error messages if you +raise ones that aren't derived from pb.Error.
                      5. The naive approach of simply doing import +SomeClass to match a remote caller who claims to have an object of +type SomeClass could have nasty consequences for some modules that do +significant operations in their __init__ methods (think +telnetlib.Telnet(host='localhost', port='chargen'), or even +more powerful classes that you have available in your server program). +Allowing a remote entity to create arbitrary classes in your namespace is +nearly equivalent to allowing them to run arbitrary code.

                        + +

                        The pb.InsecureJelly +exception arises because the class being sent over the wire has not been +registered with the serialization layer (known as jelly). The easiest way to make it possible to +copy entire class instances over the wire is to have them inherit from pb.Copyable, and then to use +setUnjellyableForClass(remoteClass, localClass) on the +receiving side. See Passing Complex Types +for an example.

                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pb.html b/vendor/Twisted-10.0.0/doc/core/howto/pb.html new file mode 100644 index 000000000000..92d2f9279571 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pb.html @@ -0,0 +1,52 @@ + + +Twisted Documentation: Overview of Twisted Spread + + + + +

                      Overview of Twisted Spread

                      + +
                      + + +

                      Perspective Broker (affectionately known as PB) is an +asynchronous, symmetric1 network protocol for secure, +remote method calls and transferring of objects. PB is translucent, not +transparent, meaning that it is very visible and obvious to see the +difference between local method calls and potentially remote method calls, +but remote method calls are still extremely convenient to make, and it is +easy to emulate them to have objects which work both locally and +remotely.

                      + +

                      PB supports user-defined serialized data in return values, which can be +either copied each time the value is returned, or cached: only copied +once and updated by notifications.

                      + +

                      PB gets its name from the fact that access to objects is through a +perspective. This means that when you are responding to a remote +method call, you can establish who is making the call.

                      + +

                      Rationale

                      + +

                      No other currently existing protocols have all the properties of PB at the +same time. The particularly interesting combination of attributes, though, is +that PB is flexible and lightweight, allowing for rapid development, while +still powerful enough to do two-way method calls and user-defined data +types.

                      + +

                      It is important to have these attributes in order to allow for a protocol +which is extensible. One of the facets of this flexibility is that PB can +integrate an arbitrary number of services could be aggregated over a single +connection, as well as publish and call new methods on existing objects +without restarting the server or client.

                      + +

                      Footnotes

                      1. There is a negotiation phase +for banana with particular roles for listener and initiator, so it's not +completely symmetric, but after the connection is fully established, +the protocol is completely symmetrical.
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/pclients.html b/vendor/Twisted-10.0.0/doc/core/howto/pclients.html new file mode 100644 index 000000000000..0d95bc4b8701 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/pclients.html @@ -0,0 +1,364 @@ + + +Twisted Documentation: Managing Clients of Perspectives + + + + +

                      Managing Clients of Perspectives

                      + +
                      + + +

                      Overview

                      + +

                      In all the IPerspective +we have shown so far, we ignored the mind argument and created +a new Avatar for every connection. This is usually an easy +design choice, and it works well for simple cases.

                      + +

                      In more complicated cases, for example an Avatar that +represents a player object which is persistent in the game universe, +we will want connections from the same player to use the same +Avatar.

                      + +

                      Another thing which is necessary in more complicated scenarios +is notifying a player asynchronously. While it is possible, of +course, to allow a player to call +perspective_remoteListener(referencable) that would +mean both duplication of code and a higher latency in logging in, +both bad.

                      + +

                      In previous sections all realms looked to be identical. +In this one we will show the usefulness of realms in accomplishing +those two objectives.

                      + +

                      Managing Avatars

                      + +

                      The simplest way to manage persistent avatars is to use a straight-forward +caching mechanism:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +

                      from zope.interface import implements + +class SimpleAvatar(pb.Avatar): + greetings = 0 + def __init__(self, name): + self.name = name + def perspective_greet(self): + self.greetings += 1 + return "<%d>hello %s" % (self.greetings, self.name) + +class CachingRealm: + implements(portal.IRealm) + + def __init__(self): + self.avatars = {} + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: raise NotImplementedError + if avatarId in self.avatars: + p = self.avatars[avatarId] + else: + p = self.avatars[avatarId] = SimpleAvatar(avatarId) + return pb.IPerspective, p, lambda:None +
                      + +

                      This gives us a perspective which counts the number of greetings it +sent its client. Implementing a caching strategy, as opposed to generating +a realm with the correct avatars already in it, is usually easier. This +makes adding new checkers to the portal, or adding new users to a checker +database, transparent. Otherwise, careful synchronization is needed between +the checker and avatar is needed (much like the synchronization between +UNIX's /etc/shadow and /etc/passwd).

                      + +

                      Sometimes, however, an avatar will need enough per-connection state +that it would be easier to generate a new avatar and cache something +else. Here is an example of that:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +

                      from zope.interface import implements + +class Greeter: + greetings = 0 + def hello(self): + self.greetings += 1 + return "<%d>hello" % (self.greetings, self.name) + +class SimpleAvatar(pb.Avatar): + greetings = 0 + def __init__(self, name, greeter): + self.name = name + self.greeter = greeter + def perspective_greet(self): + return self.greeter.hello()+' '+self.name + +class CachingRealm: + implements(portal.IRealm) + + def __init__(self): + self.greeters = {} + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: raise NotImplementedError + if avatarId in self.greeters: + p = self.greeters[avatarId] + else: + p = self.greeters[avatarId] = Greeter() + return pb.IPerspective, SimpleAvatar(avatarId, p), lambda:None +
                      + +

                      It might seem tempting to use this pattern to have an avatar which +is notified of new connections. However, the problems here are twofold: +it would lead to a thin class which needs to forward all of its methods, +and it would be impossible to know when disconnections occur. Luckily, +there is a better pattern:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +

                      from zope.interface import implements + +class SimpleAvatar(pb.Avatar): + greetings = 0 + connections = 0 + def __init__(self, name): + self.name = name + def connect(self): + self.connections += 1 + def disconnect(self): + self.connections -= 1 + def perspective_greet(self): + self.greetings += 1 + return "<%d>hello %s" % (self.greetings, self.name) + +class CachingRealm: + implements(portal.IRealm) + + def __init__(self): + self.avatars = {} + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: raise NotImplementedError + if avatarId in self.avatars: + p = self.avatars[avatarId] + else: + p = self.avatars[avatarId] = SimpleAvatar(avatarId) + p.connect() + return pb.IPerspective, p, p.disconnect +
                      + +

                      It is possible to use such a pattern to define an arbitrary limit for +the number of concurrent connections:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

                      from zope.interface import implements + +class SimpleAvatar(pb.Avatar): + greetings = 0 + connections = 0 + def __init__(self, name): + self.name = name + def connect(self): + self.connections += 1 + def disconnect(self): + self.connections -= 1 + def perspective_greet(self): + self.greetings += 1 + return "<%d>hello %s" % (self.greetings, self.name) + +class CachingRealm: + implements(portal.IRealm) + + def __init__(self, max=1): + self.avatars = {} + self.max = max + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: raise NotImplementedError + if avatarId in self.avatars: + p = self.avatars[avatarId] + else: + p = self.avatars[avatarId] = SimpleAvatar(avatarId) + if p.connections >= self.max: + raise ValueError("too many connections") + p.connect() + return pb.IPerspective, p, p.disconnect +
                      + +

                      Managing Clients

                      + +

                      So far, all our realms have ignored the mind argument. +In the case of PB, the mind is an object supplied by +the remote login method -- usually, when it passes over the wire, +it becomes a pb.RemoteReference. This object allows +sending messages to the client as soon as the connection is established +and authenticated.

                      + +

                      Here is a simple remote-clock application which shows the usefulness +of the mind argument:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +

                      from zope.interface import implements + +class SimpleAvatar(pb.Avatar): + def __init__(self, client): + self.s = internet.TimerService(1, self.telltime) + self.s.startService() + self.client = client + def telltime(self): + self.client.callRemote("notifyTime", time.time()) + def perspective_setperiod(self, period): + self.s.stopService() + self.s = internet.TimerService(period, self.telltime) + self.s.startService() + def logout(self): + self.s.stopService() + +class Realm: + implements(portal.IRealm) + + def requestAvatar(self, avatarId, mind, *interfaces): + if pb.IPerspective not in interfaces: raise NotImplementedError + p = SimpleAvatar(mind) + return pb.IPerspective, p, p.logout +
                      + +

                      In more complicated situations, you might want to cache the avatars +and give each one a set of current clients or something similar.

                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/plugin.html b/vendor/Twisted-10.0.0/doc/core/howto/plugin.html new file mode 100644 index 000000000000..e902edf531d7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/plugin.html @@ -0,0 +1,292 @@ + + +Twisted Documentation: The Twisted Plugin System + + + + +

                      The Twisted Plugin System

                      + +
                      + + +

                      The purpose of this guide is to describe the preferred way to + write extensible Twisted applications (and consequently, also to + describe how to extend applications written in such a way). This + extensibility is achieved through the definition of one or more + APIs and a mechanism for collecting code plugins which + implement this API to provide some additional functionality. + At the base of this system is the twisted.plugin module.

                      + +

                      Making an application extensible using the plugin system has + several strong advantages over other techniques:

                      + +
                        +
                      • It allows third-party developers to easily enhance your + software in a way that is loosely coupled: only the plugin API + is required to remain stable.
                      • + +
                      • It allows new plugins to be discovered flexibly. For + example, plugins can be loaded and saved when a program is first + run, or re-discovered each time the program starts up, or they + can be polled for repeatedly at runtime (allowing the discovery + of new plugins installed after the program has started).
                      • +
                      + +

                      Writing Extensible Programs

                      + +

                      Taking advantage of twisted.plugin is + a two step process:

                      + +
                        +
                      1. +

                        + Define an interface which plugins will be required to implement. + This is done using the zope.interface package in the same way one + would define an interface for any other purpose. +

                        + +

                        + A convention for defining interfaces is do so in a file named like + ProjectName/projectname/iprojectname.py. The rest of this + document will follow that convention: consider the following + interface definition be in Matsim/matsim/imatsim.py, an + interface definition module for a hypothetical material simulation + package. +

                        +
                      2. + +
                      3. + At one or more places in your program, invoke twisted.plugin.getPlugins and iterate over its + result. +
                      4. +
                      + +

                      + As an example of the first step, consider the following interface + definition for a physical modelling system. +

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +

                      from zope.interface import Interface, Attribute + +class IMaterial(Interface): + """ + An object with specific physical properties + """ + def yieldStress(temperature): + """ + Returns the pressure this material can support without + fracturing at the given temperature. + + @type temperature: C{float} + @param temperature: Kelvins + + @rtype: C{float} + @return: Pascals + """ + + dielectricConstant = Attribute(""" + @type dielectricConstant: C{complex} + @ivar dielectricConstant: The relative permittivity, with the + real part giving reflective surface properties and the + imaginary part giving the radio absorption coefficient. + """) +
                      + +

                      In another module, we might have a function that operates on + objects providing the IMaterial interface:

                      + +

                      1 +2 +3 +

                      def displayMaterial(m): + print 'A material with yield stress %s at 500 K' % (m.yieldStress(500),) + print 'Also a dielectric constant of %s.' % (m.dielectricConstant,) +
                      + +

                      The last piece of required code is that which collects + IMaterial providers and passes them to the + displayMaterial function.

                      + +

                      1 +2 +3 +4 +5 +6 +

                      from twisted.plugin import getPlugins +from matsim import imatsim + +def displayAllKnownMaterials(): + for material in getPlugins(imatsim.IMaterial): + displayMaterial(material) +
                      + +

                      Third party developers may now contribute different materials + to be used by this modelling system by implementing one or more + plugins for the IMaterial interface.

                      + +

                      Extending an Existing Program

                      + +

                      The above code demonstrates how an extensible program might be + written using Twisted's plugin system. How do we write plugins + for it, though? Essentially, we create objects which provide the + required interface and then make them available at a particular + location. Consider the following example.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +

                      from zope.interface import implements +from twisted.plugin import IPlugin +from matsim import imatsim + +class SimpleMaterial(object): + implements(IPlugin, imatsim.IMaterial) + + def __init__(self, yieldStressFactor, dielectricConstant): + self._yieldStressFactor = yieldStressFactor + self.dielectricConstant = dielectricConstant + + def yieldStress(self, temperature): + return self._yieldStressFactor * temperature + +steelPlate = SimpleMaterial(2.06842719e11, 2.7 + 0.2j) +brassPlate = SimpleMaterial(1.03421359e11, 1.4 + 0.5j) +
                      + +

                      steelPlate and brassPlate now provide both + IPlugin and IMaterial. + All that remains is to make this module available at an appropriate + location. For this, there are two options. The first of these is + primarily useful during development: if a directory which + has been added to sys.path (typically by adding it to the + PYTHONPATH environment variable) contains a + directory named twisted/plugins/, + each .py file in that directory will be loaded + as a source of plugins. This directory must not be a Python + package: including __init__.py will cause the + directory to be skipped and no plugins loaded from it. Second, each + module in the installed version of Twisted's + twisted.plugins package will also be loaded as a source of + plugins.

                      + +

                      Once this plugin is installed in one of these two ways, + displayAllKnownMaterials can be run and we will see + two pairs of output: one for a steel plate and one for a brass + plate.

                      + +

                      Alternate Plugin Packages

                      + +

                      getPlugins takes one + additional argument not mentioned above. If passed in, the 2nd argument + should be a module or package to be used instead of + twisted.plugins as the plugin meta-package. If you + are writing a plugin for a Twisted interface, you should never + need to pass this argument. However, if you have developed an + interface of your own, you may want to mandate that plugins for it + are installed in your own plugins package, rather than in + Twisted's. In this case, you probably also want to support yourproject/plugins/ directories for ease of + development. To do so, you should make the __init__.py for that package contain at least + the following lines.

                      + +

                      1 +2 +3 +

                      from twisted.plugin import pluginPackagePaths +__path__.extend(pluginPackagePaths(__name__)) +__all__ = [] +
                      + +

                      The key behavior here is that interfaces are essentially paired + with a particular plugin package. If plugins are installed in a + different package than the one the code which relies on the + interface they provide, they will not be found when the + application goes to load them.

                      + +

                      Plugin Caching

                      + +

                      In the course of using the Twisted plugin system, you may + notice dropin.cache files appearing at + various locations. These files are used to cache information + about what plugins are present in the directory which contains + them. At times, this cached information may become out of date. + Twisted uses the mtimes of various files involved in the plugin + system to determine when this cache may have become invalid. + Twisted will try to re-write the cache each time it tries to use + it but finds it out of date.

                      + +

                      For a site-wide install, it may not (indeed, should not) be + possible for applications running as normal users to rewrite the + cache file. While these applications will still run and find + correct plugin information, they may run more slowly than they + would if the cache was up to date, and they may also report + exceptions if certain plugins have been removed but which the + cache still references. For these reasons, when installing or + removing software which provides Twisted plugins, the site + administrator should be sure the cache is regenerated. + Well-behaved package managers for such software should take this + task upon themselves, since it is trivially automatable. The + canonical way to regenerate the cache is to run the following + Python code:

                      + +

                      1 +2 +

                      from twisted.plugin import IPlugin, getPlugins +list(getPlugins(IPlugin)) +
                      + +

                      As mentioned, it is normal for exceptions to be raised + once here if plugins have been removed.

                      + +

                      Further Reading

                      + + + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/process.html b/vendor/Twisted-10.0.0/doc/core/howto/process.html new file mode 100644 index 000000000000..a5c714668062 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/process.html @@ -0,0 +1,725 @@ + + +Twisted Documentation: Using Processes + + + + +

                      Using Processes

                      + +
                      + + +

                      Overview

                      + +

                      Along with connection to servers across the internet, Twisted also +connects to local processes with much the same API. The API is described in +more detail in the documentation of: +

                      +

                      + +

                      Running Another Process

                      + +

                      Processes are run through the reactor, +using reactor.spawnProcess. Pipes are created to the child process, +and added to the reactor core so that the application will not block while +sending data into or pulling data out of the new +process. reactor.spawnProcess requires two arguments, +processProtocol and executable, and optionally takes several more: arguments, +environment, path, userID, groupID, usePTY, and childFDs. Not all of these are +available on Windows.

                      + + +

                      1 +2 +3 +4 +5 +6 +

                      from twisted.internet import reactor + +processProtocol = MyProcessProtocol() +reactor.spawnProcess(processProtocol, executable, args=[program, arg1, arg2], + env={'HOME': os.environ['HOME']}, path, + uid, gid, usePTY, childFDs) +
                      + +
                        + +
                      • processProtocol should be an instance of a subclass of + twisted.internet.protocol.ProcessProtocol. The + interface is described below.
                      • + +
                      • executable is the full path of the program to run. It + will be connected to processProtocol.
                      • + +
                      • args is a list of command line arguments to be passed to + the process. args[0] should be the name of the process.
                      • + +
                      • env is a dictionary containing the environment to pass + through to the process.
                      • + +
                      • path is the directory to run the process in. The child + will switch to the given directory just before starting the new program. + The default is to stay in the current directory.
                      • + +
                      • uid and gid are the user ID and group ID to + run the subprocess as. Of course, changing identities will be more likely + to succeed if you start as root.
                      • + +
                      • usePTY specifies whether the child process should be run + with a pty, or if it should just get a pair of pipes. Whether a program + needs to be run with a PTY or not depends on the particulars of that + program. Often, programs which primarily interact with users via a terminal + do need a PTY.
                      • + +
                      • childFDs lets you specify how the child's file + descriptors should be set up. Each key is a file descriptor number (an + integer) as seen by the child. 0, 1, and 2 are usually stdin, stdout, and + stderr, but some programs may be instructed to use additional fds through + command-line arguments or environment variables. Each value is either an + integer specifying one of the parent's current file descriptors, the + string r which creates a pipe that the parent can read from, or the + string w which creates a pipe that the parent can write to. If + childFDs is not provided, a default is used which creates the + usual stdin-writer, stdout-reader, and stderr-reader pipes.
                      • + +
                      + +

                      args and env have empty default values, but +many programs depend upon them to be set correctly. At the very least, +args[0] should probably be the same as executable. +If you just provide os.environ for env, the child +program will inherit the environment from the current process, which is +usually the civilized thing to do (unless you want to explicitly clean the +environment as a security precaution). The default is to give an empty +env to the child.

                      + +

                      reactor.spawnProcess returns an instance that +implements +IProcessTransport.

                      + +

                      Writing a ProcessProtocol

                      + +

                      The ProcessProtocol you pass to spawnProcess is your interaction with the +process. It has a very similar signature to a regular Protocol, but it has +several extra methods to deal with events specific to a process. In our +example, we will interface with 'wc' to create a word count of user-given +text. First, we'll start by importing the required modules, and writing the +initialization for our ProcessProtocol.

                      + +

                      1 +2 +3 +4 +5 +

                      from twisted.internet import protocol +class WCProcessProtocol(protocol.ProcessProtocol): + + def __init__(self, text): + self.text = text +
                      + +

                      When the ProcessProtocol is connected to the protocol, it has the +connectionMade method called. In our protocol, we will write our text to the +standard input of our process and then close standard input, to the let the +process know we are done writing to it.

                      + +

                      1 +2 +3 +

                      def connectionMade(self): + self.transport.write(self.text) + self.transport.closeStdin() +
                      + +

                      At this point, the process has receieved the data, and it's time for us +to read the results. Instead of being received in dataReceived, +data from standard output is received in outReceived. This is +to distinguish it from data on standard error.

                      + +

                      1 +2 +3 +4 +5 +6 +7 +

                      def outReceived(self, data): + fieldLength = len(data) / 3 + lines = int(data[:fieldLength]) + words = int(data[fieldLength:fieldLength*2]) + chars = int(data[fieldLength*2:]) + self.transport.loseConnection() + self.receiveCounts(lines, words, chars) +
                      + +

                      Now, the process has parsed the output, and ended the connection to the +process. Then it sends the results on to the final method, receiveCounts. +This is for users of the class to override, so as to do other things with +the data. For our demonstration, we will just print the results.

                      + +

                      1 +2 +3 +4 +5 +

                      def receiveCounts(self, lines, words, chars): + print 'Received counts from wc.' + print 'Lines:', lines + print 'Words:', words + print 'Characters:', chars +
                      + +

                      We're done! To use our WCProcessProtocol, we create an instance, and pass +it to spawnProcess.

                      + +

                      1 +2 +3 +4 +

                      from twisted.internet import reactor +wcProcess = WCProcessProtocol("accessing protocols through Twisted is fun!\n") +reactor.spawnProcess(wcProcess, 'wc', ['wc']) +reactor.run() +
                      + + +

                      Things that can happen to your ProcessProtocol

                      + +

                      These are the methods that you can usefully override in your subclass of +ProcessProtocol:

                      + +
                      + +

                      The base-class definitions of most of these functions are no-ops. This will +result in all stdout and stderr being thrown away. Note that it is important +for data you don't care about to be thrown away: if the pipe were not read, +the child process would eventually block as it tried to write to a full +pipe.

                      + + +

                      Things you can do from your ProcessProtocol

                      + +

                      The following are the basic ways to control the child process:

                      + +
                        + +
                      • self.transport.write(data): Stuff some data in the stdin + pipe. Note that this write method will queue any data that can't + be written immediately. Writing will resume in the future when the pipe + becomes writable again.
                      • + +
                      • self.transport.closeStdin: Close the stdin pipe. Programs + which act as filters (reading from stdin, modifying the data, writing to + stdout) usually take this as a sign that they should finish their job and + terminate. For these programs, it is important to close stdin when you're + done with it, otherwise the child process will never quit.
                      • + +
                      • self.transport.closeStdout: Not usually called, since you're + putting the process into a state where any attempt to write to stdout will + cause a SIGPIPE error. This isn't a nice thing to do to the poor + process.
                      • + +
                      • self.transport.closeStderr: Not usually called, same reason + as closeStdout.
                      • + +
                      • self.transport.loseConnection: Close all three pipes.
                      • + +
                      • self.transport.signalProcess('KILL'): Kill the child + process. This will eventually result in processEnded being + called.
                      • + +
                      + + +

                      Verbose Example

                      + +

                      Here is an example that is rather verbose about exactly when all the +methods are called. It writes a number of lines into the wc +program and then parses the output.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +

                      #!/usr/bin/env python + +# Copyright (c) 2009-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import protocol +from twisted.internet import reactor +import re + +class MyPP(protocol.ProcessProtocol): + def __init__(self, verses): + self.verses = verses + self.data = "" + def connectionMade(self): + print "connectionMade!" + for i in range(self.verses): + self.transport.write("Aleph-null bottles of beer on the wall,\n" + + "Aleph-null bottles of beer,\n" + + "Take one down and pass it around,\n" + + "Aleph-null bottles of beer on the wall.\n") + self.transport.closeStdin() # tell them we're done + def outReceived(self, data): + print "outReceived! with %d bytes!" % len(data) + self.data = self.data + data + def errReceived(self, data): + print "errReceived! with %d bytes!" % len(data) + def inConnectionLost(self): + print "inConnectionLost! stdin is closed! (we probably did it)" + def outConnectionLost(self): + print "outConnectionLost! The child closed their stdout!" + # now is the time to examine what they wrote + #print "I saw them write:", self.data + (dummy, lines, words, chars, file) = re.split(r'\s+', self.data) + print "I saw %s lines" % lines + def errConnectionLost(self): + print "errConnectionLost! The child closed their stderr." + def processExited(self, reason): + print "processExited, status %d" % (reason.value.exitCode,) + def processEnded(self, reason): + print "processEnded, status %d" % (reason.value.exitCode,) + print "quitting" + reactor.stop() + +pp = MyPP(10) +reactor.spawnProcess(pp, "wc", ["wc"], {}) +reactor.run() +
                      + +

                      The exact output of this program depends upon the relative timing of some +un-synchronized events. In particular, the program may observe the child +process close its stderr pipe before or after it reads data from the stdout +pipe. One possible transcript would look like this:

                      + +
                      +% ./process.py
                      +connectionMade!
                      +inConnectionLost! stdin is closed! (we probably did it)
                      +errConnectionLost! The child closed their stderr.
                      +outReceived! with 24 bytes!
                      +outConnectionLost! The child closed their stdout!
                      +I saw 40 lines
                      +processEnded, status 0
                      +quitting
                      +Main loop terminated.
                      +%
                      +
                      + +

                      Doing it the Easy Way

                      + +

                      Frequently, one just needs a simple way to get all the output from a +program. In the blocking world, you might use commands.getoutput from the standard library, but +using that in an event-driven program will cause everything else to stall +until the command finishes. (in addition, the SIGCHLD handler used by that +function does not play well with Twisted's own signal handling). For these +cases, the twisted.internet.utils.getProcessOutput +function can be used. Here is a simple example:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +

                      from twisted.internet import protocol, utils, reactor +from twisted.python import failure +from cStringIO import StringIO + +class FortuneQuoter(protocol.Protocol): + + fortune = '/usr/games/fortune' + + def connectionMade(self): + output = utils.getProcessOutput(self.fortune) + output.addCallbacks(self.writeResponse, self.noResponse) + + def writeResponse(self, resp): + self.transport.write(resp) + self.transport.loseConnection() + + def noResponse(self, err): + self.transport.loseConnection() + + +if __name__ == '__main__': + f = protocol.Factory() + f.protocol = FortuneQuoter + reactor.listenTCP(10999, f) + reactor.run() +
                      + +

                      If you only need the final exit code (like commands.getstatusoutput(cmd)[0]), the twisted.internet.utils.getProcessValue function is +useful. Here is an example:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +

                      from twisted.internet import utils, reactor + +def printTrueValue(val): + print "/bin/true exits with rc=%d" % val + output = utils.getProcessValue('/bin/false') + output.addCallback(printFalseValue) + +def printFalseValue(val): + print "/bin/false exits with rc=%d" % val + reactor.stop() + +output = utils.getProcessValue('/bin/true') +output.addCallback(printTrueValue) +reactor.run() +
                      + +

                      Mapping File Descriptors

                      + +

                      stdin, stdout, and stderr are just conventions. +Programs which operate as filters generally accept input on fd0, write their +output on fd1, and emit error messages on fd2. This is common enough that +the standard C library provides macros like stdin to mean fd0, and +shells interpret the pipe character | to mean redirect fd1 from +one command into fd0 of the next command.

                      + +

                      But these are just conventions, and programs are free to use additional +file descriptors or even ignore the standard three entirely. The +childFDs argument allows you to specify exactly what kind of files +descriptors the child process should be given.

                      + +

                      Each child FD can be put into one of three states:

                      + +
                        +
                      • Mapped to a parent FD: this causes the child's reads and writes to + come from or go to the same source/destination as the parent.
                      • + +
                      • Feeding into a pipe which can be read by the parent.
                      • + +
                      • Feeding from a pipe which the parent writes into.
                      • +
                      + +

                      Mapping the child FDs to the parent's is very commonly used to send the +child's stderr output to the same place as the parent's. When you run a +program from the shell, it will typically leave fds 0, 1, and 2 mapped to +the shell's 0, 1, and 2, allowing you to see the child program's output on +the same terminal you used to launch the child. Likewise, inetd will +typically map both stdin and stdout to the network socket, and may map +stderr to the same socket or to some kind of logging mechanism. This allows +the child program to be implemented with no knowledge of the network: it +merely speaks its protocol by doing reads on fd0 and writes on fd1.

                      + +

                      Feeding into a parent's read pipe is used to gather output from the +child, and is by far the most common way of interacting with child +processes.

                      + +

                      Feeding from a parent's write pipe allows the parent to control the +child. Programs like bc or ftp can be controlled this way, by +writing commands into their stdin stream.

                      + +

                      The childFDs dictionary maps file descriptor numbers (as will be +seen by the child process) to one of these three states. To map the fd to +one of the parent's fds, simply provide the fd number as the value. To map +it to a read pipe, use the string r as the value. To map it to a +write pipe, use the string w.

                      + +

                      For example, the default mapping sets up the standard stdin/stdout/stderr +pipes. It is implemented with the following dictionary:

                      + +

                      1 +

                      childFDs = { 0: "w", 1: "r", 2: "r" } +
                      + +

                      To launch a process which reads and writes to the same places that the +parent python program does, use this:

                      + +

                      1 +

                      childFDs = { 0: 0, 1: 1, 2: 2} +
                      + +

                      To write into an additional fd (say it is fd number 4), use this:

                      + +

                      1 +

                      childFDs = { 0: "w", 1: "r", 2: "r" , 4: "w"} +
                      + + + +

                      ProcessProtocols with extra file descriptors

                      + +

                      When you provide a childFDs dictionary with more than the normal +three fds, you need addtional methods to access those pipes. These methods +are more generalized than the .outReceived ones described above. +In fact, those methods (outReceived and +errReceived) are actually just wrappers left in for +compatibility with older code, written before this generalized fd mapping was +implemented. The new list of things that can happen to your ProcessProtocol +is as follows:

                      + +
                        + +
                      • .connectionMade: This is called when the program is + started.
                      • + +
                      • .childDataReceived(childFD, data): This is called with + data that was received from one of the process' output pipes (i.e. where + the childFDs value was r. The actual file number (from the point of + view of the child process) is in childFD. For compatibility, the + default implementation of .dataReceived dispatches to + .outReceived or .errReceived when childFD + is 1 or 2.
                      • + +
                      • .childConnectionLost(childFD): This is called when the + reactor notices that one of the process' pipes has been closed. This + either means you have just closed down the parent's end of the pipe (with + .transport.closeChildFD), the child closed the pipe + explicitly (sometimes to indicate EOF), or the child process has + terminated and the kernel has closed all of its pipes. The childFD + argument tells you which pipe was closed. Note that you can only find out + about file descriptors which were mapped to pipes: when they are mapped to + existing fds the parent has no way to notice when they've been closed. For + compatibility, the default implementation dispatches to + .inConnectionLost, .outConnectionLost, or + .errConnectionLost.
                      • + +
                      • .processEnded(status): This is called when the child + process has been reaped, and all pipes have been closed. This insures that + all data written by the child prior to its death will be received before + .processEnded is invoked.
                      • + +
                      + + +

                      In addition to those methods, there are other methods available to +influence the child process:

                      + +
                        + +
                      • self.transport.writeToChild(childFD, data): Stuff some + data into an input pipe. .write simply writes to + childFD=0.
                      • + +
                      • self.transport.closeChildFD(childFD): Close one of the + child's pipes. Closing an input pipe is a common way to indicate EOF to + the child process. Closing an output pipe is neither very friendly nor + very useful.
                      • +
                      + +

                      Examples

                      + +

                      GnuPG, the encryption program, can use additional file descriptors to +accept a passphrase and emit status output. These are distinct from stdin +(used to accept the crypttext), stdout (used to emit the plaintext), and +stderr (used to emit human-readable status/warning messages). The passphrase +FD reads until the pipe is closed and uses the resulting string to unlock +the secret key that performs the actual decryption. The status FD emits +machine-parseable status messages to indicate the validity of the signature, +which key the message was encrypted to, etc.

                      + +

                      gpg accepts command-line arguments to specify what these fds are, and +then assumes that they have been opened by the parent before the gpg process +is started. It simply performs reads and writes to these fd numbers.

                      + +

                      To invoke gpg in decryption/verification mode, you would do something +like the following:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +

                      class GPGProtocol(ProcessProtocol): + def __init__(self, crypttext): + self.crypttext = crypttext + self.plaintext = "" + self.status = "" + def connectionMade(self): + self.transport.writeToChild(3, self.passphrase) + self.transport.closeChildFD(3) + self.transport.writeToChild(0, self.crypttext) + self.transport.closeChildFD(0) + def childDataReceived(self, childFD, data): + if childFD == 1: self.plaintext += data + if childFD == 4: self.status += data + def processEnded(self, status): + rc = status.value.exitCode + if rc == 0: + self.deferred.callback(self) + else: + self.deferred.errback(rc) + +def decrypt(crypttext): + gp = GPGProtocol(crypttext) + gp.deferred = Deferred() + cmd = ["gpg", "--decrypt", "--passphrase-fd", "3", "--status-fd", "4", + "--batch"] + p = reactor.spawnProcess(gp, cmd[0], cmd, env=None, + childFDs={0:"w", 1:"r", 2:2, 3:"w", 4:"r"}) + return gp.deferred +
                      + +

                      In this example, the status output could be parsed after the fact. It +could, of course, be parsed on the fly, as it is a simple line-oriented +protocol. Methods from LineReceiver could be mixed in to make this parsing +more convenient.

                      + +

                      The stderr mapping (2:2) used will cause any GPG errors to be +emitted by the parent program, just as if those errors had caused in the +parent itself. This is sometimes desireable (it roughly corresponds to +letting exceptions propagate upwards), especially if you do not expect to +encounter errors in the child process and want them to be more visible to +the end user. The alternative is to map stderr to a read-pipe and handle any +such output from within the ProcessProtocol (roughly corresponding to +catching the exception locally).

                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/producers.html b/vendor/Twisted-10.0.0/doc/core/howto/producers.html new file mode 100644 index 000000000000..377379b6f0b3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/producers.html @@ -0,0 +1,88 @@ + + +Twisted Documentation: Producers and Consumers: Efficient High-Volume Streaming + + + + +

                      Producers and Consumers: Efficient High-Volume Streaming

                      + +
                      + + +

                      The purpose of this guide is to describe the Twisted producer and consumer system. The producer system allows applications to stream large amounts of data in a manner which is both memory and CPU efficient, and which does not introduce a source of unacceptable latency into the reactor.

                      + +

                      Readers should have at least a passing familiarity with the terminology associated with interfaces.

                      + +

                      Push Producers

                      + +

                      A push producer is one which will continue to generate data without external prompting until told to stop; a pull producer will generate one chunk of data at a time in response to an explicit request for more data.

                      + +

                      The push producer API is defined by the IPushProducer interface. It is best to create a push producer when data generation is closedly tied to an event source. For example, a proxy which forwards incoming bytes from one socket to another outgoing socket might be implemented using a push producer: the dataReceived takes the role of an event source from which the producer generates bytes, and requires no external intervention in order to do so.

                      + +

                      There are three methods which may be invoked on a push producer at various points in its lifetime: pauseProducing, resumeProducing, and stopProducing.

                      + +

                      pauseProducing()

                      + +

                      In order to avoid the possibility of using an unbounded amount of memory to buffer produced data which cannot be processed quickly enough, it is necessary to be able to tell a push producer to stop producing data for a while. This is done using the pauseProducing method. Implementers of a push producer should temporarily stop producing data when this method is invoked.

                      + +

                      resumeProducing()

                      + +

                      After a push producer has been paused for some time, the excess of data which it produced will have been processed and the producer may again begin producing data. When the time for this comes, the push producer will have resumeProducing invoked on it.

                      + +

                      stopProducing()

                      + +

                      Most producers will generate some finite (albeit, perhaps, unknown in advance) amount of data and then stop, having served their intended purpose. However, it is possible that before this happens an event will occur which renders the remaining, unproduced data irrelevant. In these cases, producing it anyway would be wasteful. The stopProducing method will be invoked on the push producer. The implementation should stop producing data and clean up any resources owned by the producer.

                      + +

                      Pull Producers

                      + +

                      The pull producer API is defined by the IPullProducer interface. Pull producers are useful in cases where there is no clear event source involved with the generation of data. For example, if the data is the result of some algorithmic process that is bound only by CPU time, a pull producer is appropriate.

                      + +

                      Pull producers are defined in terms of only two methods: resumeProducing and stopProducing.

                      + +

                      resumeProducing()

                      + +

                      Unlike push producers, a pull producer is expected to only produce data in response to resumeProducing being called. This method will be called whenever more data is required. How much data to produce in response to this method call depends on various factors: too little data and runtime costs will be dominated by the back-and-forth event notification associated with a buffer becoming empty and requesting more data to process; too much data and memory usage will be driven higher than it needs to be and the latency associated with creating so much data will cause overall performance in the application to suffer. A good rule of thumb is to generate between 16 and 64 kilobytes of data at a time, but you should experiment with various values to determine what is best for your application.

                      + +

                      stopProducing()

                      + +

                      This method has the same meaning for pull producers as it does for push producers.

                      + +

                      Consumers

                      + +

                      This far, I've discussed the various external APIs of the two kinds of producers supported by Twisted. However, I have not mentioned where the data a producer generates actually goes, nor what entity is responsible for invoking these APIs. Both of these roles are filled by consumers. Consumers are defined by the two interfaces IConsumer and IFinishableConsumer.

                      + +

                      The slightly simpler of these two interfaces, IConsumer, defines three methods: registerProducer, unregisterProducer, and write. IFinishableConsumer adds finish.

                      + +

                      registerProducer(producer, streaming)

                      + +

                      So that a consumer can invoke methods on a producer, the consumer needs to be told about the producer. This is done with the registerProducer method. The first argument is either a IPullProducer or IPushProducer provider; the second argument indicates which of these interfaces is provided: True for push producers, False for pull producers.

                      + +

                      unregisterProducer()

                      + +

                      Eventually a consumer will not longer be interested in a producer. This could be because the producer has finished generating all its data, or because the consumer is moving on to something else, or any number of other reasons. In any case, this method reverses the effects of registerProducer.

                      + +

                      write(data)

                      + +

                      As you might guess, this is the method which a producer calls when it has generated some data. Push producers should call it as frequently as they like as long as they are not paused. Pull producers should call it once for each time resumeProducing is called on them.

                      + +

                      finish()

                      + +

                      This method of IFinishableConsumers gives producers a way to explicitly notify the consumer that they have generated all the data they will ever generate.

                      + +

                      Further Reading

                      + + + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/quotes.html b/vendor/Twisted-10.0.0/doc/core/howto/quotes.html new file mode 100644 index 000000000000..c7a6d5f96c46 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/quotes.html @@ -0,0 +1,214 @@ + + +Twisted Documentation: Setting up the TwistedQuotes application + + + + +

                      Setting up the TwistedQuotes application

                      + +
                      + + + +

                      Goal

                      + +

                      This document describes how to set up the TwistedQuotes application used in +a number of other documents, such as designing Twisted applications.

                      + +

                      Setting up the TwistedQuotes project directory

                      + +

                      In order to run the Twisted Quotes example, you will need to do the +following:

                      + +
                        +
                      1. Make a TwistedQuotes directory on your system
                      2. +
                      3. Place the following files in the TwistedQuotes directory: +
                          +
                        • (this + file marks it as a package, see this section of the Python tutorial for more on packages);
                        • +
                        • 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +

                          from random import choice + +from zope.interface import implements + +from TwistedQuotes import quoteproto + + + +class StaticQuoter: + """ + Return a static quote. + """ + + implements(quoteproto.IQuoter) + + def __init__(self, quote): + self.quote = quote + + + def getQuote(self): + return self.quote + + + +class FortuneQuoter: + """ + Load quotes from a fortune-format file. + """ + implements(quoteproto.IQuoter) + + def __init__(self, filenames): + self.filenames = filenames + + + def getQuote(self): + quoteFile = file(choice(self.filenames)) + quotes = quoteFile.read().split('\n%\n') + quoteFile.close() + return choice(quotes) +
                          ;
                        • +
                        • 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +

                          from zope.interface import Interface + +from twisted.internet.protocol import Factory, Protocol + + + +class IQuoter(Interface): + """ + An object that returns quotes. + """ + def getQuote(): + """ + Return a quote. + """ + + + +class QOTD(Protocol): + def connectionMade(self): + self.transport.write(self.factory.quoter.getQuote()+'\r\n') + self.transport.loseConnection() + + + +class QOTDFactory(Factory): + """ + A factory for the Quote of the Day protocol. + + @type quoter: L{IQuoter} provider + @ivar quoter: An object which provides L{IQuoter} which will be used by + the L{QOTD} protocol to get quotes to emit. + """ + protocol = QOTD + + def __init__(self, quoter): + self.quoter = quoter +
                          ;
                        • +
                        +
                      4. +
                      5. Add the TwistedQuotes directory's parent to your Python +path. For example, if the TwistedQuotes directory's path is +/tmp/TwistedQuotes +add /tmp to your Python path. On UNIX this would be export PYTHONPATH=/my/stuff:$PYTHONPATH, on Microsoft +Windows change the PYTHONPATH variable through the +Systems Properites dialog to add /my/stuff; at the +beginning.
                      6. +
                      7. +Test your package by trying to import it in the Python interpreter: +
                        +Python 2.1.3 (#1, Apr 20 2002, 22:45:31) 
                        +[GCC 2.95.4 20011002 (Debian prerelease)] on linux2
                        +Type "copyright", "credits" or "license" for more information.
                        +>>> import TwistedQuotes
                        +>>> # No traceback means you're fine.
                        +
                        +
                      8. +
                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/rdbms.html b/vendor/Twisted-10.0.0/doc/core/howto/rdbms.html new file mode 100644 index 000000000000..9a038743c9c6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/rdbms.html @@ -0,0 +1,228 @@ + + +Twisted Documentation: twisted.enterprise.adbapi: Twisted RDBMS support + + + + +

                      twisted.enterprise.adbapi: Twisted RDBMS support

                      + +
                      + + +

                      Abstract

                      + +

                      Twisted is an asynchronous networking framework, but most + database API implementations unfortunately have blocking + interfaces -- for this reason, twisted.enterprise.adbapi was created. It is + a non-blocking interface to the standardized DB-API 2.0 API, + which allows you to access a number of different RDBMSes.

                      + +

                      What you should already know

                      + + + +

                      Quick Overview

                      + +

                      Twisted is an asynchronous framework. This means standard + database modules cannot be used directly, as they typically + work something like:

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +

                      # Create connection... +db = dbmodule.connect('mydb', 'andrew', 'password') +# ...which blocks for an unknown amount of time + +# Create a cursor +cursor = db.cursor() + +# Do a query... +resultset = cursor.query('SELECT * FROM table WHERE ...') +# ...which could take a long time, perhaps even minutes. +
                      + +

                      Those delays are unacceptable when using an asynchronous + framework such as Twisted. For this reason, twisted provides + twisted.enterprise.adbapi, an + asynchronous wrapper for any + DB-API 2.0-compliant module.

                      + +

                      enterprise.adbapi will do + blocking + database operations in seperate threads, which trigger + callbacks in the originating thread when they complete. In the + meantime, the original thread can continue doing normal work, + like servicing other requests.

                      + +

                      How do I use adbapi?

                      + +

                      Rather than creating a database connection directly, use the + adbapi.ConnectionPool + class to manage + a connections for you. This allows enterprise.adbapi to use multiple + connections, one per thread. This is easy:

                      +

                      1 +2 +3 +

                      # Using the "dbmodule" from the previous example, create a ConnectionPool +from twisted.enterprise import adbapi +dbpool = adbapi.ConnectionPool("dbmodule", 'mydb', 'andrew', 'password') +
                      + +

                      Things to note about doing this:

                      + +
                        +
                      • There is no need to import dbmodule directly. You just + pass the name to adbapi.ConnectionPool's constructor.
                      • + +
                      • The parameters you would pass to dbmodule.connect are + passed as extra arguments to adbapi.ConnectionPool's constructor. + Keyword parameters work as well.
                      • +
                      + +

                      Now we can do a database query:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                      # equivalent of cursor.execute(statement), return cursor.fetchall(): +def getAge(user): + return dbpool.runQuery("SELECT age FROM users WHERE name = ?", user) + +def printResult(l): + if l: + print l[0][0], "years old" + else: + print "No such user" + +getAge("joe").addCallback(printResult) +
                      + +

                      This is straightforward, except perhaps for the return value + of getAge. It returns a twisted.internet.defer.Deferred, which allows + arbitrary callbacks to be called upon completion (or upon + failure). More documentation on Deferred is available here.

                      + +

                      In addition to runQuery, there is also runOperation, + and runInteraction that gets called with a callable (e.g. a function). + The function will be called in the thread with a twisted.enterprise.adbapi.Transaction, + which basically mimics a DB-API cursor. In all cases a database transaction will be + commited after your database usage is finished, unless an exception is raised in + which case it will be rolled back.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +

                      def _getAge(txn, user): + # this will run in a thread, we can use blocking calls + txn.execute("SELECT * FROM foo") + # ... other cursor commands called on txn ... + txn.execute("SELECT age FROM users WHERE name = ?", user) + result = txn.fetchall() + if result: + return result[0][0] + else: + return None + +def getAge(user): + return dbpool.runInteraction(_getAge, user) + +def printResult(age): + if age != None: + print age, "years old" + else: + print "No such user" + +getAge("joe").addCallback(printResult) +
                      + +

                      Also worth noting is that these examples assumes that dbmodule + uses the qmarks paramstyle (see the DB-API specification). If + your dbmodule uses a different paramstyle (e.g. pyformat) then + use that. Twisted doesn't attempt to offer any sort of magic + paramater munging -- runQuery(query, + params, ...) maps directly onto cursor.execute(query, params, ...).

                      + +

                      Examples of various database adapters

                      + +

                      Notice that the first argument is the module name you would + usually import and get connect(...) + from, and that following arguments are whatever arguments you'd + call connect(...) with.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +

                      from twisted.enterprise import adbapi + +# Gadfly +cp = adbapi.ConnectionPool("gadfly", "test", "/tmp/gadflyDB") + +# PostgreSQL PyPgSQL +cp = adbapi.ConnectionPool("pyPgSQL.PgSQL", database="test") + +# MySQL +cp = adbapi.ConnectionPool("MySQLdb", db="test") +
                      + +

                      And that's it!

                      + +

                      That's all you need to know to use a database from within + Twisted. You probably should read the adbapi module's + documentation to get an idea of the other functions it has, but + hopefully this document presents the core ideas.

                      +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/reactor-basics.html b/vendor/Twisted-10.0.0/doc/core/howto/reactor-basics.html new file mode 100644 index 000000000000..d123f2be369c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/reactor-basics.html @@ -0,0 +1,92 @@ + + +Twisted Documentation: Reactor Overview + + + + +

                      Reactor Overview

                      + +
                      + + + +

                      + This HOWTO introduces the Twisted reactor, describes the basics of the + reactor and links to the various reactor interfaces. +

                      + +

                      Reactor Basics

                      + +

                      The reactor is the core of the event loop within Twisted -- the loop + which drives applications using Twisted. The event loop is a programming + construct that waits for and dispatches events or messages in a program. + It works by calling some internal or external "event provider", which + generally blocks until an event has arrived, and then calls the relevant + event handler ("dispatches the event"). The reactor provides basic + interfaces to a number of services, including network communications, + threading, and event dispatching. +

                      + +

                      + For information about using the reactor and the Twisted event loop, see: +

                      + +
                      + +

                      There are multiple implementations of the reactor, each + modified to provide better support for specialized features + over the default implementation. More information about these + and how to use a particular implementation is available via + Choosing a Reactor.

                      + + +

                      + Twisted applications can use the interfaces in twisted.application.service to configure and run the + application instead of using + boilerplate reactor code. See Using Application for an introduction to + Application. +

                      + +

                      Using the reactor object

                      + +

                      You can get to the reactor object using the following code:

                      + +

                      1 +

                      from twisted.internet import reactor +
                      + +

                      The reactor usually implements a set of interfaces, but + depending on the chosen reactor and the platform, some of + the interfaces may not be implemented:

                      + + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/row.html b/vendor/Twisted-10.0.0/doc/core/howto/row.html new file mode 100644 index 000000000000..bc12625e3794 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/row.html @@ -0,0 +1,279 @@ + + +Twisted Documentation: Twisted Enterprise Row Objects + + + + +

                      Twisted Enterprise Row Objects

                      + +
                      + +
                      Note: +

                      +Due to lack of maintenance, twisted.enterprise.row +and twisted.enterprise.reflector have been deprecated since +Twisted 8.0. +

                      + +

                      +This documentation is maintained only for users with an existing +codebase. +

                      +
                      + + + + +

                      The twisted.enterprise.row module is a method of +interfacing simple python objects with rows in relational database +tables. It has two components: the RowObject class which +developers sub-class for each relational table that their code +interacts with, and the Reflector which is responsible +for updates, inserts, queries and deletes against the database.

                      + +

                      The row module is intended for applications such as on-line +games, and websites that require a back-end database interface. +It is not a full functioned object-relational mapper for python +- it deals best with simple data types structured in ways that +can be easily represented in a relational database. It is well +suited to building a python interface to an existing relational +database, and slightly less suited to added database persistance +to an existing python application.

                      + +

                      If row does not fit your model, you will be best off using +the low-level database API directly, +or writing your own object/relational layer on top of it.

                      + +

                      Class Definitions

                      + +

                      To interface to relational database tables, the developer must +create a class derived from the twisted.enterprise.row.RowObject +class for each table. These derived classes must define a number +of class attributes which contains information about the database +table that class corresponds to. The required class attributes +are:

                      + +
                        +
                      • rowColumns - list of the column names and types in the table with + the correct case
                      • +
                      • rowKeyColumns - list of key columns in form: [(columnName, + typeName)]
                      • +
                      • rowTableName - the name of the database table
                      • +
                      + +

                      There are also two optional class attributes that can be specified:

                      + +
                        +
                      • rowForeignKeys - list of foreign keys to other database tables + in the form: [(tableName, [(childColumnName, childColumnType), ...], + [(parentColumnName, parentColumnType), ...], containerMethodName, autoLoad]
                      • +
                      • rowFactoryMethod - a method that creates instances of this + class
                      • +
                      + +

                      For example:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +

                      class RoomRow(row.RowObject): + rowColumns = [("roomId", "int"), + ("town_id", "int"), + ("name", "varchar"), + ("owner", "varchar"), + ("posx", "int"), + ("posy", "int"), + ("width", "int"), + ("height", "int")] + rowKeyColumns = [("roomId", "int4")] + rowTableName = "testrooms" + rowFactoryMethod = [testRoomFactory] +
                      + +

                      The items in the rowColumns list will become data members of +classes of this type when they are created by the Reflector.

                      + +

                      Initialization

                      + +

                      The initialization phase builds the SQL for the database interactions. +It uses the system catalogs of the database to do this, but requires +some basic information to get started. The class attributes of +the classes derived from RowClass are used for this. Those classes +are passed to a Reflector when it is created.

                      + +

                      There are currently two available reflectors in Twisted Enterprise, +the SQL Reflector for relational databases which uses the python DB +API, and the XML Reflector which uses a file system containing XML +files. The XML reflector is currently extremely slow.

                      + +

                      An example class list for the RoomRow class we specified above using the SQLReflector:

                      + +

                      1 +2 +3 +4 +

                      from twisted.enterprise.sqlreflector import SQLReflector + +dbpool = adbapi.ConnectionPool("pyPgSQL.PgSQL") +reflector = SQLReflector( dbpool, [RoomRow] ) +
                      + +

                      Creating Row Objects

                      + +

                      There are two methods of creating RowObjects - loading from +the database, and creating a new instance ready to be inserted.

                      + +

                      To load rows from the database and create RowObject instances +for each of the rows, use the loadObjectsFrom method of the Reflector. +This takes a tableName, an optional user data parameter, +and an optional where clause. The where clause may +be omitted which will retrieve all the rows from the table. For +example:

                      + +

                      1 +2 +3 +4 +5 +6 +7 +

                      def gotRooms(rooms): + for room in rooms: + print "Got room:", room.id + +d = reflector.loadObjectsFrom("testrooms", + whereClause=[("id", reflector.EQUAL, 5)]) +d.addCallback(gotRooms) +
                      + +

                      For more advanced RowObject construction, loadObjectsFrom may +use a factoryMethod that was specified as a class attribute for +the RowClass derived class. This method will be called for each +of the rows with the class object, the userData parameter, and +a dictionary of data from the database keyed by column name. This +factory method should return a fully populated RowObject instance +and may be used to do pre-processing, lookups, and data transformations +before exposing the data to user code. An example factory method:

                      + +

                      1 +2 +3 +4 +

                      def testRoomFactory(roomClass, userData, kw): + newRoom = roomClass(userData) + newRoom.__dict__.update(kw) + return newRoom +
                      + +

                      The last method of creating a row object is for new instances +that do not already exist in the database table. In this case, +create a new instance and assign its primary key attributes and +all of its member data attributes, then pass it to the insertRow +method of the Reflector. For example:

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +

                      newRoom = RoomRow() + newRoom.assignKeyAttr("roomI", 11) + newRoom.town_id = 20 + newRoom.name = 'newRoom1' + newRoom.owner = 'fred' + newRoom.posx = 100 + newRoom.posy = 100 + newRoom.width = 15 + newRoom.height = 20 + reflector.insertRow(newRoom).addCallback(onInsert) +
                      + +

                      This will insert a new row into the database table for this +new RowObject instance. Note that the assignKeyAttr +method must be used to set primary key attributes - regular attribute +assignment of a primary key attribute of a rowObject will raise +an exception. This prevents the database identity of RowObject +from being changed by mistake.

                      + + +

                      Relationships Between Tables

                      + +

                      Specifying a foreign key for a RowClass creates a relationship +between database tables. When loadObjectsFrom is called for a table, it will +automatically load all the children rows for the rows from the specified +table. The child rows will be put into a list member variable of the +rowObject instance with the name childRows or if a +containerMethod is specified for the foreign key relationship, +that method will be called on the parent row object for each row that is +being added to it as a child.

                      + +

                      The autoLoad member of the foreign key definition is a flag +that specifies whether child rows should be auto-loaded for that +relationship when a parent row is loaded.

                      + +

                      Duplicate Row Objects

                      + +

                      If a reflector tries to load an instance of a rowObject that +is already loaded, it will return a reference to the existing +rowObject rather than creating a new instance. The reflector maintains +a cache of weak references to all loaded row objects by their +unique keys for this purpose.

                      + +

                      Updating Row Objects

                      + +

                      RowObjects have a dirty member attribute that is +set to 1 when any of the member attributes of the instance that +map to database columns are changed. This dirty flag can be used +to tell when RowObjects need to be updated back to the database. +In addition, the setDirty method can be overridden +to provide more complex automated handling such as dirty lists +(be sure to call the base class setDirty though!).

                      + +

                      When it is determined that a RowObject instance is dirty and +need to have its state updated into the database, pass that object +to the updateRow method of the Reflector. For example:

                      + +

                      1 +

                      reflector.updateRow(room).addCallback(onUpdated) +
                      + +

                      For more complex behavior, the reflector can generate the SQL +for the update but not perform the update. This can be useful +for batching up multiple updates into single requests. For example:

                      + +

                      1 +

                      updateSQL = reflector.updateRowSQL(room) +
                      + +

                      Deleting Row Objects

                      + +

                      To delete a row from a database pass the RowObject instance +for that row to the Reflector deleteRow method. +Deleting the python Rowobject instance does not automatically +delete the row from the database. For example:

                      + +

                      1 +

                      reflector.deleteRow(room) +
                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/servers.html b/vendor/Twisted-10.0.0/doc/core/howto/servers.html new file mode 100644 index 000000000000..567aeca300f4 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/servers.html @@ -0,0 +1,429 @@ + + +Twisted Documentation: Writing Servers + + + + +

                      Writing Servers

                      + +
                      + + +

                      Overview

                      + +

                      Twisted is a framework designed to be very flexible and let + you write powerful servers. The cost of this flexibility is a + few layers in the way to writing your server.

                      + +

                      This document describes the + Protocol + layer, where you + implement protocol parsing and handling. If you are implementing + an application then you should read this document second, after + first reading the top level overview of how to begin writing your + Twisted application, in Writing Plug-Ins + for Twisted. This document is only relevant to TCP, SSL and + Unix socket servers, there is a separate document + for UDP.

                      + +

                      Your protocol handling class will usually subclass twisted.internet.protocol.Protocol. Most + protocol handlers inherit either from this class or from one of + its convenience children. An instance of the protocol class + might be instantiated per-connection, on demand, and might go + away when the connection is finished. This means that + persistent configuration is not saved in the + Protocol.

                      + +

                      The persistent configuration is kept in a Factory class, + which usually inherits from twisted.internet.protocol.Factory. The + default factory class just instantiates each Protocol, and then + sets on it an attribute called factory which + points to itself. This lets every Protocol access, + and possibly modify, the persistent configuration.

                      + +

                      It is usually useful to be able to offer the same service on + multiple ports or network addresses. This is why the Factory + does not listen to connections, and in fact does not + know anything about the network. See twisted.internet.interfaces.IReactorTCP.listenTCP, + and the other IReactor*.listen* APIs for more + information.

                      + +

                      This document will explain each step of the way.

                      + +

                      Protocols

                      + +

                      As mentioned above, this, along with auxiliary classes and + functions, is where most of the code is. A Twisted protocol + handles data in an asynchronous manner. What this means is that + the protocol never waits for an event, but rather responds to + events as they arrive from the network.

                      + +

                      Here is a simple example:

                      +

                      1 +2 +3 +4 +5 +6 +

                      from twisted.internet.protocol import Protocol + +class Echo(Protocol): + + def dataReceived(self, data): + self.transport.write(data) +
                      + +

                      This is one of the simplest protocols. It simply writes back + whatever is written to it, and does not respond to all events. Here is an + example of a Protocol responding to another event:

                      +

                      1 +2 +3 +4 +5 +6 +7 +

                      from twisted.internet.protocol import Protocol + +class QOTD(Protocol): + + def connectionMade(self): + self.transport.write("An apple a day keeps the doctor away\r\n") + self.transport.loseConnection() +
                      + +

                      This protocol responds to the initial connection with a well + known quote, and then terminates the connection.

                      + +

                      The connectionMade event is usually where set up of the + connection object happens, as well as any initial greetings (as + in the QOTD protocol above, which is actually based on RFC + 865). The connectionLost event is where tearing down of any + connection-specific objects is done. Here is an example:

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +

                      from twisted.internet.protocol import Protocol + +class Echo(Protocol): + + def connectionMade(self): + self.factory.numProtocols = self.factory.numProtocols+1 + if self.factory.numProtocols > 100: + self.transport.write("Too many connections, try later") + self.transport.loseConnection() + + def connectionLost(self, reason): + self.factory.numProtocols = self.factory.numProtocols-1 + + def dataReceived(self, data): + self.transport.write(data) +
                      + +

                      Here connectionMade and + connectionLost cooperate to keep a count of the + active protocols in the factory. connectionMade + immediately closes the connection if there are too many active + protocols.

                      + +

                      Using the Protocol

                      + +

                      In this section, I will explain how to test your protocol + easily. (In order to see how you should write a production-grade Twisted + server, though, you should read the Writing Plug-Ins + for Twisted HOWTO as well).

                      + +

                      Here is code that will run the QOTD server discussed + earlier

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +

                      from twisted.internet.protocol import Protocol, Factory +from twisted.internet import reactor + +class QOTD(Protocol): + + def connectionMade(self): + self.transport.write("An apple a day keeps the doctor away\r\n") + self.transport.loseConnection() + +# Next lines are magic: +factory = Factory() +factory.protocol = QOTD + +# 8007 is the port you want to run under. Choose something >1024 +reactor.listenTCP(8007, factory) +reactor.run() +
                      + +

                      Don't worry about the last 6 magic lines -- you will + understand what they do later in the document.

                      + +

                      Helper Protocols

                      + +

                      Many protocols build upon similar lower-level abstraction. + The most popular in internet protocols is being line-based. + Lines are usually terminated with a CR-LF combinations.

                      + +

                      However, quite a few protocols are mixed - they have + line-based sections and then raw data sections. Examples + include HTTP/1.1 and the Freenet protocol.

                      + +

                      For those cases, there is the LineReceiver + protocol. This protocol dispatches to two different event + handlers - lineReceived and + rawDataReceived. By default, only + lineReceived will be called, once for each line. + However, if setRawMode is called, the protocol + will call rawDataReceived until + setLineMode is called, which returns it to using + lineReceived.

                      + +

                      Here is an example for a simple use of the line + receiver:

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                      from twisted.protocols.basic import LineReceiver + +class Answer(LineReceiver): + + answers = {'How are you?': 'Fine', None : "I don't know what you mean"} + + def lineReceived(self, line): + if self.answers.has_key(line): + self.sendLine(self.answers[line]) + else: + self.sendLine(self.answers[None]) +
                      + +

                      Note that the delimiter is not part of the line.

                      + +

                      Several other, less popular, helpers exist, such as a + netstring based protocol and a prefixed-message-length + protocol.

                      + +

                      State Machines

                      + +

                      Many Twisted protocol handlers need to write a state machine + to record the state they are at. Here are some pieces of advice + which help to write state machines:

                      + +
                        +
                      • Don't write big state machines. Prefer to write a state + machine which deals with one level of abstraction at a + time.
                      • + +
                      • Use Python's dynamicity to create open ended state + machines. See, for example, the code for the SMTP + client.
                      • + +
                      • Don't mix application-specific code with Protocol + handling code. When the protocol handler has to make an + application-specific call, keep it as a method call.
                      • +
                      + +

                      Factories

                      + +

                      As mentioned before, usually the class twisted.internet.protocol.Factory works, + and there is no need to subclass it. However, sometimes there + can be factory-specific configuration of the protocols, or + other considerations. In those cases, there is a need to + subclass Factory.

                      + +

                      For a factory which simply instantiates instances of a + specific protocol class, simply instantiate + Factory, and sets its protocol attribute:

                      +

                      1 +2 +3 +4 +5 +

                      from twisted.internet.protocol import Factory +from twisted.protocols.wire import Echo + +myFactory = Factory() +myFactory.protocol = Echo +
                      + +

                      If there is a need to easily construct factories for a + specific configuration, a factory function is often useful:

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +

                      from twisted.internet.protocol import Factory, Protocol + +class QOTD(Protocol): + + def connectionMade(self): + self.transport.write(self.factory.quote+'\r\n') + self.transport.loseConnection() + + +def makeQOTDFactory(quote=None): + factory = Factory() + factory.protocol = QOTD + factory.quote = quote or 'An apple a day keeps the doctor away' + return factory +
                      + +

                      A Factory has two methods to perform application-specific + building up and tearing down (since a Factory is frequently + persisted, it is often not appropriate to do them in __init__ + or __del__, and would frequently be too early or too late).

                      + +

                      Here is an example of a factory which allows its Protocols + to write to a special log-file:

                      +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +

                      from twisted.internet.protocol import Factory +from twisted.protocols.basic import LineReceiver + + +class LoggingProtocol(LineReceiver): + + def lineReceived(self, line): + self.factory.fp.write(line+'\n') + + +class LogfileFactory(Factory): + + protocol = LoggingProtocol + + def __init__(self, fileName): + self.file = fileName + + def startFactory(self): + self.fp = open(self.file, 'a') + + def stopFactory(self): + self.fp.close() +
                      + +

                      Putting it All Together

                      + +

                      So, you know what factories are, and want to run the QOTD + with configurable quote server, do you? No problems, here is an + example.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +

                      from twisted.internet.protocol import Factory, Protocol +from twisted.internet import reactor + +class QOTD(Protocol): + + def connectionMade(self): + self.transport.write(self.factory.quote+'\r\n') + self.transport.loseConnection() + + +class QOTDFactory(Factory): + + protocol = QOTD + + def __init__(self, quote=None): + self.quote = quote or 'An apple a day keeps the doctor away' + +reactor.listenTCP(8007, QOTDFactory("configurable quote")) +reactor.run() +
                      + +

                      The only lines you might not understand are the last two.

                      + +

                      listenTCP is +the method which connects a Factory to the network. +It uses the reactor interface, which lets many different loops handle +the networking code, without modifying end-user code, like this. +As mentioned above, if you want to write your code to be a production-grade +Twisted server, and not a mere 20-line hack, you will want to +use the Application object.

                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/ssl.html b/vendor/Twisted-10.0.0/doc/core/howto/ssl.html new file mode 100644 index 000000000000..b67efa318dde --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/ssl.html @@ -0,0 +1,550 @@ + + +Twisted Documentation: Using SSL in Twisted + + + + +

                      Using SSL in Twisted

                      + +
                      + + +

                      Overview

                      + +

                      This document describes how to use SSL in Twisted servers and clients. It + assumes that you know what SSL is, what some of the major reasons to use it + are, and how to generate your own SSL certificates, in particular self-signed + certificates. It also assumes that you are comfortable with creating TCP + servers and clients as described in the server howto + and client howto. After reading this + document you should be able to create servers and clients that can use SSL to + encrypt their connections, switch from using an unencrypted channel to an + encrypted one mid-connection, and require client authentication.

                      + +

                      Using SSL in Twisted requires that you have + pyOpenSSL installed. A quick test to + verify that you do is to run from OpenSSL import SSL at a + python prompt and not get an error.

                      + +

                      SSL connections require SSL contexts. These contexts are generated by a + ContextFactory that maintains state like the SSL method, private + key file name, and certificate file name.

                      + +

                      Instead of using listenTCP and connectTCP to create a connection, use + listenSSL and + connectSSL for a + server and client respectively. These methods take a contextFactory as an + additional argument.

                      + +

                      The basic server context factory is + twisted.internet.ssl.ContextFactory, and the basic + client context factory is + twisted.internet.ssl.ClientContextFactory. They can + be used as-is or subclassed. + twisted.internet.ssl.DefaultOpenSSLContextFactory + is a convenience server class that subclasses ContextFactory + and adds default parameters to the SSL handshake and connection. Another + useful class is + twisted.internet.ssl.CertificateOptions; it is a + factory for SSL context objects that lets you specify many of the common + verification and session options so it can do the proper pyOpenSSL + initialization for you.

                      + +

                      Those are the big immediate differences between TCP and SSL connections, + so let's look at an example. In it and all subsequent examples it is assumed + that keys and certificates for the server, certificate authority, and client + should they exist live in a keys/ subdirectory of the directory + containing the example code, and that the certificates are self-signed.

                      + +

                      SSL echo server and client without client authentication

                      + +

                      Authentication and encryption are two separate parts of the SSL protocol. + The server almost always needs a key and certificate to authenticate itself + to the client but is usually configured to allow encrypted connections with + unauthenticated clients who don't have certificates. This common case is + demonstrated first by adding SSL support to the echo client and server in + the core examples.

                      + +

                      SSL echo server

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +

                      from twisted.internet import ssl, reactor +from twisted.internet.protocol import Factory, Protocol + +class Echo(Protocol): + def dataReceived(self, data): + """As soon as any data is received, write it back.""" + self.transport.write(data) + +if __name__ == '__main__': + factory = Factory() + factory.protocol = Echo + reactor.listenSSL(8000, factory, + ssl.DefaultOpenSSLContextFactory( + 'keys/server.key', 'keys/server.crt')) + reactor.run() +
                      + +

                      SSL echo client

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +

                      from twisted.internet import ssl, reactor +from twisted.internet.protocol import ClientFactory, Protocol + +class EchoClient(Protocol): + def connectionMade(self): + print "hello, world" + self.transport.write("hello, world!") + + def dataReceived(self, data): + print "Server said:", data + self.transport.loseConnection() + +class EchoClientFactory(ClientFactory): + protocol = EchoClient + + def clientConnectionFailed(self, connector, reason): + print "Connection failed - goodbye!" + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print "Connection lost - goodbye!" + reactor.stop() + +if __name__ == '__main__': + factory = EchoClientFactory() + reactor.connectSSL('localhost', 8000, factory, ssl.ClientContextFactory()) + reactor.run() +
                      + +

                      Contexts are created according to a specified method. + SSLv3_METHOD, SSLv23_METHOD, and + TLSv1_METHOD are the valid constants that represent SSL methods + to use when creating a context object. DefaultOpenSSLContextFactory and + ClientContextFactory default to using SSL.SSLv23_METHOD as their + method, and it is compatible for communication with all the other methods + listed above. An older method constant, SSLv2_METHOD, exists but + is explicitly disallowed in both DefaultOpenSSLContextFactory and + ClientContextFactory for being insecure by calling + set_options(SSL.OP_NO_SSLv2) on their contexts. See + twisted.internet.ssl for additional comments.

                      + +

                      Using startTLS

                      + +

                      If you want to switch from unencrypted to encrypted traffic + mid-connection, you'll need to turn on SSL with startTLS on both + ends of the connection at the same time via some agreed-upon signal like the + reception of a particular message. You can readily verify the switch to an + encrypted channel by examining the packet payloads with a tool like + Wireshark.

                      + +

                      startTLS server

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +

                      from OpenSSL import SSL +from twisted.internet import reactor, ssl +from twisted.internet.protocol import ServerFactory +from twisted.protocols.basic import LineReceiver + +class TLSServer(LineReceiver): + def lineReceived(self, line): + print "received: " + line + + if line == "STARTTLS": + print "-- Switching to TLS" + self.sendLine('READY') + ctx = ServerTLSContext( + privateKeyFileName='keys/server.key', + certificateFileName='keys/server.crt', + ) + self.transport.startTLS(ctx, self.factory) + + +class ServerTLSContext(ssl.DefaultOpenSSLContextFactory): + def __init__(self, *args, **kw): + kw['sslmethod'] = SSL.TLSv1_METHOD + ssl.DefaultOpenSSLContextFactory.__init__(self, *args, **kw) + +if __name__ == '__main__': + factory = ServerFactory() + factory.protocol = TLSServer + reactor.listenTCP(8000, factory) + reactor.run() +
                      + +

                      startTLS client

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +

                      from OpenSSL import SSL +from twisted.internet import reactor, ssl +from twisted.internet.protocol import ClientFactory +from twisted.protocols.basic import LineReceiver + +class ClientTLSContext(ssl.ClientContextFactory): + isClient = 1 + def getContext(self): + return SSL.Context(SSL.TLSv1_METHOD) + +class TLSClient(LineReceiver): + pretext = [ + "first line", + "last thing before TLS starts", + "STARTTLS"] + + posttext = [ + "first thing after TLS started", + "last thing ever"] + + def connectionMade(self): + for l in self.pretext: + self.sendLine(l) + + def lineReceived(self, line): + print "received: " + line + if line == "READY": + ctx = ClientTLSContext() + self.transport.startTLS(ctx, self.factory) + for l in self.posttext: + self.sendLine(l) + self.transport.loseConnection() + +class TLSClientFactory(ClientFactory): + protocol = TLSClient + + def clientConnectionFailed(self, connector, reason): + print "connection failed: ", reason.getErrorMessage() + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print "connection lost: ", reason.getErrorMessage() + reactor.stop() + +if __name__ == "__main__": + factory = TLSClientFactory() + reactor.connectTCP('localhost', 8000, factory) + reactor.run() +
                      + +

                      startTLS is a transport method that gets passed a context. + It is invoked at an agreed-upon time in the data reception method of the + client and server protocols. The ServerTLSContext and + ClientTLSContext classes used above inherit from the basic + server and client context factories used in the earlier echo examples and + illustrate two more ways of setting an SSL method.

                      + +

                      Client authentication

                      + +

                      Server and client-side changes to require client authentication fall + largely under the dominion of pyOpenSSL, but few examples seem to exist on + the web so for completeness a sample server and client are provided here.

                      + +

                      Client-authenticating server

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +

                      from OpenSSL import SSL +from twisted.internet import ssl, reactor +from twisted.internet.protocol import Factory, Protocol + +class Echo(Protocol): + def dataReceived(self, data): + self.transport.write(data) + +def verifyCallback(connection, x509, errnum, errdepth, ok): + if not ok: + print 'invalid cert from subject:', x509.get_subject() + return False + else: + print "Certs are fine" + return True + +if __name__ == '__main__': + factory = Factory() + factory.protocol = Echo + + myContextFactory = ssl.DefaultOpenSSLContextFactory( + 'keys/server.key', 'keys/server.crt' + ) + + ctx = myContextFactory.getContext() + + ctx.set_verify( + SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, + verifyCallback + ) + + # Since we have self-signed certs we have to explicitly + # tell the server to trust them. + ctx.load_verify_locations("keys/ca.pem") + + reactor.listenSSL(8000, factory, myContextFactory) + reactor.run() +
                      + +

                      Use the set_verify method to set the verification mode for a + context object and the verification callback. The mode is either + VERIFY_NONE or VERIFY_PEER. If + VERIFY_PEER is set, the mode can be augmented by + VERIFY_FAIL_IF_NO_PEER_CERT and/or + VERIFY_CLIENT_ONCE.

                      + +

                      The callback takes as its arguments a connection object, X509 object, + error number, error depth, and return code. The purpose of the callback is + to allow you to enforce additional restrictions on the verification. Thus, + if the return code is False, you should return False; if the return code is + True and further verification passes, return True.

                      + + +

                      Client with certificates

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +

                      from OpenSSL import SSL +from twisted.internet import ssl, reactor +from twisted.internet.protocol import ClientFactory, Protocol + +class EchoClient(Protocol): + def connectionMade(self): + print "hello, world" + self.transport.write("hello, world!") + + def dataReceived(self, data): + print "Server said:", data + self.transport.loseConnection() + +class EchoClientFactory(ClientFactory): + protocol = EchoClient + + def clientConnectionFailed(self, connector, reason): + print "Connection failed - goodbye!" + reactor.stop() + + def clientConnectionLost(self, connector, reason): + print "Connection lost - goodbye!" + reactor.stop() + +class CtxFactory(ssl.ClientContextFactory): + def getContext(self): + self.method = SSL.SSLv23_METHOD + ctx = ssl.ClientContextFactory.getContext(self) + ctx.use_certificate_file('keys/client.crt') + ctx.use_privatekey_file('keys/client.key') + + return ctx + +if __name__ == '__main__': + factory = EchoClientFactory() + reactor.connectSSL('localhost', 8000, factory, CtxFactory()) + reactor.run() +
                      + +

                      Other facilities

                      + +

                      twisted.protocols.amp supports encrypted + connections and exposes a startTLS method one can use or + subclass. twisted.web has built-in SSL support in + its client, http, and xmlrpc modules.

                      + +

                      Conclusion

                      + +

                      After reading through this tutorial, you should be able to:

                      +
                        +
                      • Use listenSSL and connectSSL to create servers and clients that use + SSL
                      • +
                      • Use startTLS to switch a channel from being unencrypted to using SSL + mid-connection
                      • +
                      • Add server and client support for client authentication
                      • +
                      + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/stylesheet-unprocessed.css b/vendor/Twisted-10.0.0/doc/core/howto/stylesheet-unprocessed.css new file mode 100644 index 000000000000..e4a62cc1580d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/stylesheet-unprocessed.css @@ -0,0 +1,20 @@ + +span.footnote { + vertical-align: super; + font-size: small; +} + +span.footnote:before +{ + content: "[Footnote: "; +} + +span.footnote:after +{ + content: "]"; +} + +div.note:before +{ + content: "Note: "; +} diff --git a/vendor/Twisted-10.0.0/doc/core/howto/stylesheet.css b/vendor/Twisted-10.0.0/doc/core/howto/stylesheet.css new file mode 100644 index 000000000000..3c5961e7e3f9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/stylesheet.css @@ -0,0 +1,189 @@ + +body +{ + margin-left: 2em; + margin-right: 2em; + border: 0px; + padding: 0px; + font-family: sans-serif; + } + +.done { color: #005500; background-color: #99ff99 } +.notdone { color: #550000; background-color: #ff9999;} + +pre +{ + padding: 1em; + border: thin black solid; + line-height: 1.2em; +} + +.boxed +{ + padding: 1em; + border: thin black solid; +} + +.shell +{ + background-color: #ffffdd; +} + +.python +{ + background-color: #dddddd; +} + +.htmlsource +{ + background-color: #dddddd; +} + +.py-prototype +{ + background-color: #ddddff; +} + + +.python-interpreter +{ + background-color: #ddddff; +} + +.doit +{ + border: thin blue dashed ; + background-color: #0ef +} + +.py-src-comment +{ + color: #1111CC +} + +.py-src-keyword +{ + color: #3333CC; + font-weight: bold; + line-height: 1.0em +} + +.py-src-parameter +{ + color: #000066; + font-weight: bold; + line-height: 1.0em +} + +.py-src-identifier +{ + color: #CC0000 +} + +.py-src-string +{ + + color: #115511 +} + +.py-src-endmarker +{ + display: block; /* IE hack; prevents following line from being sucked into the py-listing box. */ +} + +.py-linenumber +{ + background-color: #cdcdcd; + float: left; + margin-top: 0px; + width: 4.0em +} + +.py-listing, .html-listing, .listing +{ + margin: 1ex; + border: thin solid black; + background-color: #eee; +} + +.py-listing pre, .html-listing pre, .listing pre +{ + margin: 0px; + border: none; + border-bottom: thin solid black; +} + +.py-listing .python +{ + margin-top: 0; + margin-bottom: 0; + border: none; + border-bottom: thin solid black; + } + +.html-listing .htmlsource +{ + margin-top: 0; + margin-bottom: 0; + border: none; + border-bottom: thin solid black; + } + +.caption +{ + text-align: center; + padding-top: 0.5em; + padding-bottom: 0.5em; +} + +.filename +{ + font-style: italic; + } + +.manhole-output +{ + color: blue; +} + +hr +{ + display: inline; + } + +ul +{ + padding: 0px; + margin: 0px; + margin-left: 1em; + padding-left: 1em; + border-left: 1em; + } + +li +{ + padding: 2px; + } + +dt +{ + font-weight: bold; + margin-left: 1ex; + } + +dd +{ + margin-bottom: 1em; + } + +div.note +{ + background-color: #FFFFCC; + margin-top: 1ex; + margin-left: 5%; + margin-right: 5%; + padding-top: 1ex; + padding-left: 5%; + padding-right: 5%; + border: thin black solid; +} diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tap.html b/vendor/Twisted-10.0.0/doc/core/howto/tap.html new file mode 100644 index 000000000000..6a4485f21566 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tap.html @@ -0,0 +1,346 @@ + + +Twisted Documentation: Writing a twistd Plugin + + + + +

                      Writing a twistd Plugin

                      + +
                      + + +

                      This document describes adding subcommands to +the twistd command, as a way to facilitate the deployment +of your applications. (This feature was added in Twisted 2.5)

                      + +

                      The target audience of this document are those that have developed +a Twisted application which needs a command line-based deployment +mechanism.

                      + +

                      There are a few prerequisites to understanding this document:

                      +
                        +
                      • A basic understanding of the Twisted Plugin System (i.e., + the twisted.plugin module) is + necessary, however, step-by-step instructions will be + given. Reading The Twisted Plugin + System is recommended, in particular the Extending an + Existing Program section.
                      • +
                      • The Application infrastructure + is used in twistd plugins; in particular, you should + know how to expose your program's functionality as a Service.
                      • +
                      • In order to parse command line arguments, the twistd plugin + mechanism relies + on twisted.python.usage, which is documented + in Using usage.Options.
                      • +
                      + +

                      Goals

                      + +

                      After reading this document, the reader should be able to expose +their Service-using application as a subcommand +of twistd, taking into consideration whatever was passed +on the command line.

                      + +

                      A note on .tap files

                      + +

                      Readers may be confused about a historical file type associated +with Twisted, the .tap file. This was a kind of file that +was generated by a program named mktap and +which twistd can read. .tap files are +deprecated; this document has nothing to do with them, although the +technology described herein is very closely related to the old +system. Existing plugins that were written for the mktap system are +compatible with this twistd plugin system; the following +commands, +

                      + +
                      +$ mktap [foo] [options...]
                      +$ twistd -n -f [foo].tap
                      +
                      + +

                      +are equivalent to the command:

                      + +
                      $ twistd -n [foo] [options...]
                      + +

                      Alternatives to twistd plugins

                      +

                      The major alternative to the twistd plugin mechanism is the .tac +file, which is a simple script to be used with the +twistd -y/--python parameter. The twistd plugin mechanism +exists to offer a more extensible command-line-driven interface to +your application. For more information on .tac files, see +the document Using the Twisted Application +Framework.

                      + + +

                      Creating the plugin

                      + +

                      The following directory structure is assumed of your project:

                      + +
                        +
                      • MyProject - Top level directory +
                          +
                        • myproject - Python package +
                          • __init__.py
                          +
                        • +
                        +
                      • +
                      + +

                      + During development of your project, Twisted plugins can be loaded + from a special directory in your project, assuming your top level + directory ends up in sys.path. Create a directory + named twisted containing a directory + named plugins, and add a file + named myproject_plugin.py to it. This file will contain your + plugin. Note that you should not add any __init__.py files + to this directory structure, and the plugin file should not + be named myproject.py (because that would conflict with + your project's module name). +

                      + +

                      + In this file, define an object which provides the interfaces + twisted.plugin.IPlugin + and twisted.application.service.IServiceMaker. +

                      + +

                      The tapname attribute of your IServiceMaker provider +will be used as the subcommand name in a command +like twistd [subcommand] [args...], and +the options attribute (which should be +a usage.Options +subclass) will be used to parse the given args.

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +

                      from zope.interface import implements + +from twisted.python import usage +from twisted.plugin import IPlugin +from twisted.application.service import IServiceMaker +from twisted.application import internet + +from myproject import MyFactory + + +class Options(usage.Options): + optParameters = [["port", "p", 1235, "The port number to listen on."]] + + +class MyServiceMaker(object): + implements(IServiceMaker, IPlugin) + tapname = "myproject" + description = "Run this! It'll make your dog happy." + options = Options + + def makeService(self, options): + """ + Construct a TCPServer from a factory defined in myproject. + """ + return internet.TCPServer(int(options["port"]), MyFactory()) + + +# Now construct an object which *provides* the relevant interfaces +# The name of this variable is irrelevant, as long as there is *some* +# name bound to a provider of IPlugin and IServiceMaker. + +serviceMaker = MyServiceMaker() +
                      + +

                      + Now running twistd --help should + print myproject in the list of available subcommands, + followed by the description that we specified in the + plugin. twistd -n myproject would, + assuming we defined a MyFactory factory + inside myproject, start a listening server on port 1235 + with that factory. +

                      + +

                      Using cred with your TAP

                      + +

                      + Twisted ships with a robust authentication framework to use with + your application. If your server needs authentication functionality, + and you haven't read about twisted.cred + yet, read up on it first. +

                      + +

                      + If you are building a twistd plugin and you want to support a wide + variety of authentication patterns, Twisted provides an easy-to-use + mixin for your Options subclass: + strcred.AuthOptionMixin. + The following code is an example of using this mixin: +

                      + +

                      1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +

                      from twisted.cred import credentials, portal, strcred +from twisted.python import usage +from twisted.plugin import IPlugin +from twisted.application.service import IServiceMaker +from myserver import myservice + +class ServerOptions(usage.Options, strcred.AuthOptionMixin): + # This part is optional; it tells AuthOptionMixin what + # kinds of credential interfaces the user can give us. + supportedInterfaces = (credentials.IUsernamePassword,) + + optParameters = [ + ["port", "p", 1234, "Server port number"], + ["host", "h", "localhost", "Server hostname"]] + +class MyServerServiceMaker(object): + implements(IServiceMaker, IPlugin) + tapname = "myserver" + description = "This server does nothing productive." + options = ServerOptions + + def makeService(self, options): + """Construct a service object.""" + # The realm is a custom object that your server defines. + realm = myservice.MyServerRealm(options["host"]) + + # The portal is something Cred can provide, as long as + # you have a list of checkers that you'll support. This + # list is provided my AuthOptionMixin. + portal = portal.Portal(realm, options["credCheckers"]) + + # OR, if you know you might get multiple interfaces, and + # only want to give your application one of them, you + # also have that option with AuthOptionMixin: + interface = credentials.IUsernamePassword + portal = portal.Portal(realm, options["credInterfaces"][interface]) + + # The protocol factory is, like the realm, something you implement. + factory = myservice.ServerFactory(realm, portal) + + # Finally, return a service that will listen for connections. + return internet.TCPServer(int(options["port"]), factory) + + +# As in our example above, we have to construct an object that +# provides the IPlugin and IServiceMaker interfaces. + +serviceMaker = MyServerServiceMaker() +
                      + +

                      + Now that you have your TAP configured to support any authentication + we can throw at it, you're ready to use it. Here is an example of + starting your server using the /etc/passwd file for + authentication. (Clearly, this won't work on servers with shadow + passwords.) +

                      + +
                      +$ twistd myserver --auth passwd:/etc/passwd
                      +
                      + +

                      + For a full list of cred plugins supported, see twisted.plugins, or use the command-line help: +

                      + +
                      +$ twistd myserver --help-auth
                      +$ twistd myserver --help-auth-type passwd
                      +
                      + +

                      Conclusion

                      + +

                      You should now be able to

                      +
                        +
                      • Create a twistd plugin
                      • +
                      • Incorporate authentication into your plugin
                      • +
                      • Use it from your development environment
                      • +
                      • Install it correctly and use it in deployment
                      • +
                      + + +
                      + +

                      Index

                      + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/telnet.html b/vendor/Twisted-10.0.0/doc/core/howto/telnet.html new file mode 100644 index 000000000000..cfc0665f27a3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/telnet.html @@ -0,0 +1,83 @@ + + +Twisted Documentation: Using telnet to manipulate a twisted server + + + + +

                      Using telnet to manipulate a twisted server

                      +
                        +
                        + + +

                        To start things off, we're going to create a simple server that just +gives you remote access to a Python interpreter. We will use a telnet client +to access this server.

                        + +

                        Run twistd telnet -p 4040 -u admin -w admin at +your shell prompt. The Application has a telnet server that you specified to +be on port 4040, and it will start listening for connections on this port. Try +connecting with your favorite telnet utility to 127.0.0.1 port 4040.

                        + +
                        +$ telnet localhost 4040
                        +Trying 127.0.0.1...
                        +Connected to localhost.
                        +Escape character is '^]'.
                        +
                        +twisted.manhole.telnet.ShellFactory
                        +Twisted 1.1.0
                        +username: admin
                        +password: admin
                        +>>>
                        +
                        + +

                        Now, you should see a Python prompt -- +>>>. You can type any valid Python code +here. Let's try looking around.

                        + +
                        +>>> dir()
                        +['__builtins__']
                        +
                        + +

                        Ok, not much. let's play a little more:

                        +
                        +>>> import __main__
                        +>>> dir(__main__)
                        +['__builtins__', '__doc__', '__name__', 'os', 'run', 'string', 'sys']
                        +
                        +>>> service
                        +<twisted.application.internet.TCPServer instance at 0x10270f48>
                        +>>> service._port
                        +<twisted.manhole.telnet.ShellFactory on 4040>
                        +>>> service.parent
                        +<twisted.application.service.MultiService instance at 0x1024d7a8>
                        +
                        + +

                        The service object is the service used to serve the telnet shell, +and that it is listening on port 4040 with something called a +ShellFactory. +Its parent is a twisted.application.service.MultiService, +a collection of services. We can keep getting the parent attribute +of services until we hit the root of all services.

                        + +

                        As you can see, this is quite useful - we can introspect a +running process, see the internal objects, and even change +their attributes. The telnet server can of course be used from straight +Python code; you can see how to do this by reading the code for +twisted.tap.telnet.

                        + +

                        A final note - if you want access to be more secure, you can even +have the telnet server use SSL. Assuming you have the appropriate +certificate and private key files, you can twistd +telnet -p ssl:443:privateKey=mykey.pem:certKey=cert.pem -u admin -w +admin. See twisted.application.strports for more examples of +options for listening on a port.

                        + +
                        + +

                        Index

                        + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/template.tpl b/vendor/Twisted-10.0.0/doc/core/howto/template.tpl new file mode 100644 index 000000000000..1fbb5177bb96 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/template.tpl @@ -0,0 +1,23 @@ + + + + + +Twisted Documentation: + + + + +

                        +
                        +
                        + +
                        + +

                        Index

                        + Version: + + + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/testing.html b/vendor/Twisted-10.0.0/doc/core/howto/testing.html new file mode 100644 index 000000000000..dd5d3143ab4f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/testing.html @@ -0,0 +1,168 @@ + + +Twisted Documentation: Writing tests for Twisted code using Trial + + + + +

                        Writing tests for Twisted code using Trial

                        + +
                        + + + +

                        Trial basics

                        + +

                        Trial is Twisted's testing framework. It provides a +library for writing test cases and utility functions for working with the +Twisted environment in your tests, and a command-line utility for running your +tests. Trial is built on the Python standard library's unittest +module.

                        + +

                        To run all the Twisted tests, do:

                        + +
                        +$ trial twisted
                        +
                        + +

                        Refer to the Trial man page for other command-line options.

                        + +

                        Trial directories

                        + +

                        You might notice a new _trial_temp folder in the +current working directory after Trial completes the tests. This folder is the +working directory for the Trial process. It can be used by unit tests and +allows them to write whatever data they like to disk, and not worry +about polluting the current working directory.

                        + +

                        Folders named _trial_temp-<counter> are +created if two instances of Trial are run in parallel from the same directory, +so as to avoid giving two different test-runs the same temporary directory.

                        + +

                        The twisted.python.lockfile utility is used to lock +the _trial_temp directories. On Linux, this results +in symlinks to pids. On Windows, directories are created with a single file with +a pid as the contents. These lock files will be cleaned up if Trial exits normally +and otherwise they will be left behind. They should be cleaned up the next time +Trial tries to use the directory they lock, but it's also safe to delete them +manually if desired.

                        + +

                        Twisted-specific quirks: reactor, Deferreds, callLater

                        + +

                        The standard Python unittest framework, from which Trial is +derived, is ideal for testing code with a fairly linear flow of control. +Twisted is an asynchronous networking framework which provides a clean, +sensible way to establish functions that are run in response to events (like +timers and incoming data), which creates a highly non-linear flow of control. +Trial has a few extensions which help to test this kind of code. This section +provides some hints on how to use these extensions and how to best structure +your tests.

                        + +

                        Leave the Reactor as you found it

                        + +

                        Trial runs the entire test suite (over four thousand tests) in a single +process, with a single reactor. Therefore it is important that your test +leave the reactor in the same state as it found it. Leftover timers may +expire during somebody else's unsuspecting test. Leftover connection attempts +may complete (and fail) during a later test. These lead to intermittent +failures that wander from test to test and are very time-consuming to track +down.

                        + +

                        If your test leaves event sources in the reactor, Trial will fail the test. +The tearDown method is a good place to put cleanup code: it is +always run regardless of whether your test passes or fails (like a bare +except clause in a try-except construct). Exceptions in tearDown + are flagged as errors and flunk the test. +TestCase.addCleanup is +another useful tool for cleaning up. With it, you can register callables to +clean up resources as the test allocates them. Generally, code should be +written so that only resources allocated in the tests need to be cleaned up in +the tests. Resources which are allocated internally by the implementation +should be cleaned up by the implementation.

                        + +

                        If your code uses Deferreds or depends on the reactor running, you can +return a Deferred from your test method, setUp, or tearDown and Trial will +do the right thing. That is, it will run the reactor for you until the +Deferred has triggered and its callbacks have been run. Don't use +reactor.run(), reactor.stop(), reactor.crash() +or reactor.iterate() in your tests.

                        + +

                        Calls to reactor.callLater create IDelayedCalls. These need to be run +or cancelled during a test, otherwise they will outlive the test. This would +be bad, because they could interfere with a later test, causing confusing +failures in unrelated tests! For this reason, Trial checks the reactor to make +sure there are no leftover IDelayedCalls in the reactor after a +test, and will fail the test if there are. The cleanest and simplest way to +make sure this all works is to return a Deferred from your test.

                        + +

                        Similarly, sockets created during a test should be closed by the end of the +test. This applies to both listening ports and client connections. So, calls +to reactor.listenTCP (and listenUNIX, and so on) +return IListeningPorts, and these should be +cleaned up before a test ends by calling their stopListening method. +Calls to reactor.connectTCP return IConnectors, which should be cleaned +up by calling their disconnect method. Trial +will warn about unclosed sockets.

                        + +

                        The golden rule is: If your tests call a function which returns a Deferred, +your test should return a Deferred.

                        + +

                        Using Timers to Detect Failing Tests

                        + +

                        It is common for tests to establish some kind of fail-safe timeout that +will terminate the test in case something unexpected has happened and none of +the normal test-failure paths are followed. This timeout puts an upper bound +on the time that a test can consume, and prevents the entire test suite from +stalling because of a single test. This is especially important for the +Twisted test suite, because it is run automatically by the buildbot whenever +changes are committed to the Subversion repository.

                        + +

                        The way to do this in Trial is to set the .timeout attribute +on your unit test method. Set the attribute to the number of seconds you wish +to elapse before the test raises a timeout error. Trial has a default timeout +which will be applied even if the timeout attribute is not set. +The Trial default timeout is usually sufficient and should be overridden only +in unusual cases.

                        + +

                        Interacting with warnings in tests

                        + +

                        Trial includes specific support for interacting with Python's +warnings module. This support allows warning-emitting code to +be written test-driven, just as any other code would be. It also improves +the way in which warnings reporting when a test suite is running.

                        + +

                        TestCase.assertWarns and TestCase.flushWarnings +allow tests to be written which make assertions about what warnings have +been emitted during a particular test method. flushWarnings is +the new method and has a simpler and more flexible API and should be +preferred when writing new code. In order to test a warning with +flushWarnings, write a test which first invokes the code which +will emit a warning and then calls flushWarnings and makes +assertions about the result. For example:

                        + +

                        1 +2 +3 +

                        def test_warning(self): + warnings.warn("foo is bad") + self.assertEqual(len(self.flushWarnings()), 1) +
                        + +

                        Warnings emitted in tests which are not flushed will be included by the +default reporter in its output after the result of the test. If Python's +warnings filter system (see the -W command +line option to Python) is configured to treat a warning as an error, +then unflushed warnings will causes tests to fail and will be included in +the summary section of the default reporter. Note that unlike usual +operation, when warnings.warn is called as part of a test +method, it will not raise an exception when warnings have been configured as +errors. However, if called outside of a test method (for example, at module +scope in a test module or a module imported by a test module) then it +will raise an exception.

                        + +
                        + +

                        Index

                        + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/threading.html b/vendor/Twisted-10.0.0/doc/core/howto/threading.html new file mode 100644 index 000000000000..2c7a51119251 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/threading.html @@ -0,0 +1,213 @@ + + +Twisted Documentation: Using Threads in Twisted + + + + +

                        Using Threads in Twisted

                        + +
                        + + +

                        Running code in a thread-safe manner

                        + +

                        Most code in Twisted is not thread-safe. For example, + writing data to a transport from a protocol is not thread-safe. + Therefore, we want a way to schedule methods to be run in the + main event loop. This can be done using the function twisted.internet.interfaces.IReactorThreads.callFromThread:

                        +

                        1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                        from twisted.internet import reactor + +def notThreadSafe(x): + """do something that isn't thread-safe""" + # ... + +def threadSafeScheduler(): + """Run in thread-safe manner.""" + reactor.callFromThread(notThreadSafe, 3) # will run 'notThreadSafe(3)' + # in the event loop +reactor.run() +
                        + +

                        Running code in threads

                        + +

                        Sometimes we may want to run methods in threads - for + example, in order to access blocking APIs. Twisted provides + methods for doing so using the IReactorThreads API (twisted.internet.interfaces.IReactorThreads). + Additional utility functions are provided in twisted.internet.threads. Basically, these + methods allow us to queue methods to be run by a thread + pool.

                        + +

                        For example, to run a method in a thread we can do:

                        +

                        1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +

                        from twisted.internet import reactor + +def aSillyBlockingMethod(x): + import time + time.sleep(2) + print x + +# run method in thread +reactor.callInThread(aSillyBlockingMethod, "2 seconds have passed") +reactor.run() +
                        + +

                        Utility Methods

                        + +

                        The utility methods are not part of the twisted.internet.reactor APIs, but are implemented + in twisted.internet.threads.

                        + +

                        If we have multiple methods to run sequentially within a thread, + we can do:

                        + +

                        1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +

                        from twisted.internet import reactor, threads + +def aSillyBlockingMethodOne(x): + import time + time.sleep(2) + print x + +def aSillyBlockingMethodTwo(x): + print x + +# run both methods sequentially in a thread +commands = [(aSillyBlockingMethodOne, ["Calling First"], {})] +commands.append((aSillyBlockingMethodTwo, ["And the second"], {})) +threads.callMultipleInThread(commands) +reactor.run() +
                        + +

                        For functions whose results we wish to get, we can have the + result returned as a Deferred:

                        +

                        1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +

                        from twisted.internet import reactor, threads + +def doLongCalculation(): + # .... do long calculation here ... + return 3 + +def printResult(x): + print x + +# run method in thread and get result as defer.Deferred +d = threads.deferToThread(doLongCalculation) +d.addCallback(printResult) +reactor.run() +
                        + +

                        If you wish to call a method in the reactor thread and get its result, + you can use blockingCallFromThread:

                        + +

                        1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +

                        from twisted.internet import threads, reactor, defer +from twisted.web.client import getPage +from twisted.web.error import Error + +def inThread(): + try: + result = threads.blockingCallFromThread( + reactor, getPage, "http://twistedmatrix.com/") + except Error, exc: + print exc + else: + print result + reactor.callFromThread(reactor.stop) + +reactor.callInThread(inThread) +reactor.run() +
                        + +

                        blockingCallFromThread will return the object or raise + the exception returned or raised by the function passed to it. If the + function passed to it returns a Deferred, it will return the value the + Deferred is called back with or raise the exception it is errbacked + with.

                        + +

                        Managing the Thread Pool

                        + +

                        The thread pool is implemented by twisted.python.threadpool.ThreadPool.

                        + +

                        We may want to modify the size of the threadpool, increasing + or decreasing the number of threads in use. We can do this + do this quite easily:

                        + +

                        1 +2 +3 +

                        from twisted.internet import reactor + +reactor.suggestThreadPoolSize(30) +
                        + +

                        The default size of the thread pool depends on the reactor being used; + the default reactor uses a minimum size of 5 and a maximum size of 10. Be + careful that you understand threads and their resource usage before + drastically altering the thread pool sizes.

                        +
                        + +

                        Index

                        + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/time.html b/vendor/Twisted-10.0.0/doc/core/howto/time.html new file mode 100644 index 000000000000..6365c36781ec --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/time.html @@ -0,0 +1,118 @@ + + +Twisted Documentation: Scheduling tasks for the future + + + + +

                        Scheduling tasks for the future

                        +
                          +
                          + + +

                          Let's say we want to run a task X seconds in the future. + The way to do that is defined in the reactor interface twisted.internet.interfaces.IReactorTime:

                          +

                          1 +2 +3 +4 +5 +6 +7 +8 +9 +

                          from twisted.internet import reactor + +def f(s): + print "this will run 3.5 seconds after it was scheduled: %s" % s + +reactor.callLater(3.5, f, "hello, world") + +# f() will only be called if the event loop is started. +reactor.run() +
                          + +

                          If the result of the function is important or if it may be necessary + to handle exceptions it raises, then the twisted.internet.task.deferLater utility conveniently + takes care of creating a Deferred and setting up a delayed + call:

                          +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +

                          from twisted.internet import task +from twisted.internet import reactor + +def f(s): + return "This will run 3.5 seconds after it was scheduled: %s" % s + +d = task.deferLater(reactor, 3.5, f, "hello, world") +def called(result): + print result +d.addCallback(called) + +# f() will only be called if the event loop is started. +reactor.run() +
                          + +

                          If we want a task to run every X seconds repeatedly, we can + use twisted.internet.task.LoopingCall:

                          +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                          from twisted.internet import task +from twisted.internet import reactor + +def runEverySecond(): + print "a second has passed" + +l = task.LoopingCall(runEverySecond) +l.start(1.0) # call every second + +# l.stop() will stop the looping calls +reactor.run() +
                          + +

                          If we want to cancel a task that we've scheduled:

                          +

                          1 +2 +3 +4 +5 +6 +7 +8 +

                          from twisted.internet import reactor + +def f(): + print "I'll never run." + +callID = reactor.callLater(5, f) +callID.cancel() +reactor.run() +
                          + +

                          As with all reactor-based code, in order for scheduling to work the reactor must be started using reactor.run().

                          +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/backends.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/backends.html new file mode 100644 index 000000000000..a5c3a69fd65e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/backends.html @@ -0,0 +1,1207 @@ + + +Twisted Documentation: The Evolution of Finger: pluggable backends + + + + +

                          The Evolution of Finger: pluggable backends

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the fifth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part we will add new several new backends to our finger service +using the component-based architecture developed in The Evolution of Finger: moving to a +component based architecture. This will show just how convenient it is to +implement new back-ends when we move to a component based architecture. Note +that here we also use an interface we previously wrote, FingerSetterFactory, +by supporting one single method. We manage to preserve the service's ignorance +of the network.

                          + +

                          Another Back-end

                          + + +

                          +Full source code here:

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer, utils +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi +import pwd + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['<li><a href="%s">%s</a></li>' % (user, user) + for user in users] + return '<ul>'+''.join(l)+'</ul>' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '<h1>%s</h1>'%self.user+'<p>%s</p>'%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + +# Another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + # need a local finger daemon running for this to work + return utils.getProcessOutput("finger", [user]) + + def getUsers(self): + return defer.succeed([]) + + +application = service.Application('finger', uid=1, gid=1) +f = LocalFingerService() +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +
                          +

                          + +

                          We've already written this, but now we get more for less work: +the network code is completely separate from the back-end.

                          + + +

                          Yet Another Back-end: Doing the Standard Thing

                          + + +

                          +Full source code here:

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer, utils +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi +import pwd +import os + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['<li><a href="%s">%s</a></li>' % (user, user) + for user in users] + return '<ul>'+''.join(l)+'</ul>' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '<h1>%s</h1>'%self.user+'<p>%s</p>'%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + +# Yet another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + user = user.strip() + try: + entry = pwd.getpwnam(user) + except KeyError: + return defer.succeed("No such user") + try: + f = file(os.path.join(entry[5],'.plan')) + except (IOError, OSError): + return defer.succeed("No such user") + data = f.read() + data = data.strip() + f.close() + return defer.succeed(data) + + def getUsers(self): + return defer.succeed([]) + + +application = service.Application('finger', uid=1, gid=1) +f = LocalFingerService() +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +
                          +

                          + +

                          Not much to say except that now we +can be churn out backends like crazy. Feel like doing a back-end +for Advogato, for example? Dig out the XML-RPC client support Twisted +has, and get to work!

                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/client.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/client.html new file mode 100644 index 000000000000..446a7b04d755 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/client.html @@ -0,0 +1,260 @@ + + +Twisted Documentation: The Evolution of Finger: a Twisted finger client + + + + +

                          The Evolution of Finger: a Twisted finger client

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the ninth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we develop a client for the finger server: a proxy finger +server which forwards requests to another finger server.

                          + +

                          Finger Proxy

                          + +

                          Writing new clients with Twisted is much like writing new servers. +We implement the protocol, which just gathers up all the data, and +give it to the factory. The factory keeps a deferred which is triggered +if the connection either fails or succeeds. When we use the client, +we first make sure the deferred will never fail, by producing a message +in that case. Implementing a wrapper around client which just returns +the deferred is a common pattern. While less flexible than +using the factory directly, it's also more convenient.

                          + + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +

                          # finger proxy +from twisted.application import internet, service +from twisted.internet import defer, protocol, reactor +from twisted.protocols import basic +from twisted.python import components +from zope.interface import Interface, implements + + +def catchError(err): + return "Internal error in server" + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value) + self.transport.loseConnection() + d.addCallback(writeValue) + + + +class FingerFactoryFromService(protocol.ClientFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerClient(protocol.Protocol): + + def connectionMade(self): + self.transport.write(self.factory.user+"\r\n") + self.buf = [] + + def dataReceived(self, data): + self.buf.append(data) + + def connectionLost(self, reason): + self.factory.gotData(''.join(self.buf)) + +class FingerClientFactory(protocol.ClientFactory): + + protocol = FingerClient + + def __init__(self, user): + self.user = user + self.d = defer.Deferred() + + def clientConnectionFailed(self, _, reason): + self.d.errback(reason) + + def gotData(self, data): + self.d.callback(data) + + +def finger(user, host, port=79): + f = FingerClientFactory(user) + reactor.connectTCP(host, port, f) + return f.d + + +class ProxyFingerService(service.Service): + implements(IFingerService) + + def getUser(self, user): + try: + user, host = user.split('@', 1) + except: + user = user.strip() + host = '127.0.0.1' + ret = finger(user, host) + ret.addErrback(lambda _: "Could not connect to remote host") + return ret + + def getUsers(self): + return defer.succeed([]) + +application = service.Application('finger', uid=1, gid=1) +f = ProxyFingerService() +internet.TCPServer(7779, IFingerFactory(f)).setServiceParent( + service.IServiceCollection(application)) +
                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/components.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/components.html new file mode 100644 index 000000000000..baf3ce3a33d6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/components.html @@ -0,0 +1,1068 @@ + + +Twisted Documentation: The Evolution of Finger: moving to a component based architecture + + + + +

                          The Evolution of Finger: moving to a component based architecture

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the fourth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this section of the tutorial, we'll move our code to a component +architecture so that adding new features is trivial. +See Interfaces and Adapters for a more +complete discussion of components.

                          + +

                          Write Maintainable Code

                          + + +

                          In the last version, the service class was three times longer than +any other class, and was hard to understand. This was because it turned +out to have multiple responsibilities. It had to know how to access +user information, by rereading the file every half minute, +but also how to display itself in a myriad of protocols. Here, we +used the component-based architecture that Twisted provides to achieve +a separation of concerns. All the service is responsible for, now, +is supporting getUser/getUsers. It declares its support via a call to +zope.interface.implements. Then, adapters are used to make this service +look like an appropriate class for various things: for supplying +a finger factory to TCPServer, for supplying a resource to site's +constructor, and to provide an IRC client factory for TCPClient. +All the adapters use are the methods in FingerService they are +declared to use: getUser/getUsers. We could, of course, +skip the interfaces and let the configuration code use +things like FingerFactoryFromService(f) directly. However, using +interfaces provides the same flexibility inheritance gives: future +subclasses can override the adapters.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['<li><a href="%s">%s</a></li>' % (user, user) + for user in users] + return '<ul>'+''.join(l)+'</ul>' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '<h1>%s</h1>'%self.user+'<p>%s</p>'%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +
                          + +

                          Advantages of Latest Version

                          + +
                            +
                          • Readable -- each class is short
                          • +
                          • Maintainable -- each class knows only about interfaces
                          • +
                          • Dependencies between code parts are minimized
                          • +
                          • Example: writing a new IFingerService is easy
                          • +
                          +
                          +

                          +Full source code here:

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['<li><a href="%s">%s</a></li>' % (user, user) + for user in users] + return '<ul>'+''.join(l)+'</ul>' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '<h1>%s</h1>'%self.user+'<p>%s</p>'%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + +class MemoryFingerService(service.Service): + + implements([IFingerService, IFingerSetterService]) + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + def setUser(self, user, status): + self.users[user] = status + + +application = service.Application('finger', uid=1, gid=1) +f = MemoryFingerService(moshez='Happy and well') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(1079, IFingerSetterFactory(f), interface='127.0.0.1' + ).setServiceParent(serviceCollection) +
                          +

                          + +

                          Aspect-Oriented Programming

                          + +

                          At last, an example of aspect-oriented programming that isn't about logging +or timing. This code is actually useful! Watch how aspect-oriented programming +helps you write less code and have fewer dependencies! +

                          + + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/configuration.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/configuration.html new file mode 100644 index 000000000000..ccf782124ac2 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/configuration.html @@ -0,0 +1,792 @@ + + +Twisted Documentation: The Evolution of Finger: configuration and packaging of the finger service + + + + +

                          The Evolution of Finger: configuration and packaging of the finger service

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the eleventh part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we make it easier for non-programmers to configure a finger +server and show how to package it in the .deb and RPM package formats. Plugins +are discussed further in the Twisted Plugin System +howto. .tap files are covered in Writing a twistd +Plugin, and .tac applications are covered +in Using the Twisted Application +Framework.

                          + +

                          Plugins

                          + +

                          So far, the user had to be somewhat of a programmer to be able to configure +stuff. Maybe we can eliminate even that? Move old code to finger/__init__.py and...

                          +

                          +Full source code for finger module here:

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +

                          # finger.py module + +from zope.interface import Interface, implements + +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components, log +from twisted.web import resource, server, xmlrpc +from twisted.spread import pb + +from OpenSSL import SSL + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + template = """<html><head><title>Users</title></head><body> + <h1>Users</h1> + <ul> + %(users)s + </ul> + </body> + </html>""" + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + + def getChild(self, path, request): + if path == '': + return self + elif path == 'RPC2': + return UserStatusXR(self.service) + else: + return UserStatus(path, self.service) + + def render_GET(self, request): + users = self.service.getUsers() + def cbUsers(users): + request.write(self.template % {'users': ''.join([ + # Name should be quoted properly these uses. + '<li><a href="%s">%s</a></li>' % (name, name) + for name in users])}) + request.finish() + users.addCallback(cbUsers) + def ebUsers(err): + log.err(err, "UserStatusTree failed") + request.finish() + users.addErrback(ebUsers) + return server.NOT_DONE_YET + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + + +class UserStatus(resource.Resource): + + template='''<html><head><title>%(title)s</title></head> + <body><h1>%(name)s</h1><p>%(status)s</p></body></html>''' + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + status = self.service.getUser(self.user) + def cbStatus(status): + request.write(self.template % { + 'title': self.user, + 'name': self.user, + 'status': status}) + request.finish() + status.addCallback(cbStatus) + def ebStatus(err): + log.err(err, "UserStatus failed") + request.finish() + status.addErrback(ebStatus) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self._read() + + def _read(self): + self.users = {} + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +class ServerContextFactory: + + def getContext(self): + """Create an SSL context. + + This is a sample implementation that loads a certificate from a file + called 'server.pem'.""" + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.use_certificate_file('server.pem') + ctx.use_privatekey_file('server.pem') + return ctx + + + + +# Easy configuration + +def makeService(config): + # finger on port 79 + s = service.MultiService() + f = FingerService(config['file']) + h = internet.TCPServer(1079, IFingerFactory(f)) + h.setServiceParent(s) + + + # website on port 8000 + r = resource.IResource(f) + r.templateDirectory = config['templates'] + site = server.Site(r) + j = internet.TCPServer(8000, site) + j.setServiceParent(s) + + # ssl on port 443 +# if config.get('ssl'): +# k = internet.SSLServer(443, site, ServerContextFactory()) +# k.setServiceParent(s) + + # irc fingerbot + if config.has_key('ircnick'): + i = IIRCClientFactory(f) + i.nickname = config['ircnick'] + ircserver = config['ircserver'] + b = internet.TCPClient(ircserver, 6667, i) + b.setServiceParent(s) + + # Pespective Broker on port 8889 + if config.has_key('pbport'): + m = internet.TCPServer( + int(config['pbport']), + pb.PBServerFactory(IPerspectiveFinger(f))) + m.setServiceParent(s) + + return s +
                          +

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +

                          # finger/tap.py +from twisted.application import internet, service +from twisted.internet import interfaces +from twisted.python import usage +import finger + +class Options(usage.Options): + + optParameters = [ + ['file', 'f', '/etc/users'], + ['templates', 't', '/usr/share/finger/templates'], + ['ircnick', 'n', 'fingerbot'], + ['ircserver', None, 'irc.freenode.net'], + ['pbport', 'p', 8889], + ] + + optFlags = [['ssl', 's']] + +def makeService(config): + return finger.makeService(config) +
                          + +

                          And register it all:

                          + +

                          1 +2 +3 +4 +5 +

                          from twisted.application.service import ServiceMaker + +finger = ServiceMaker( + 'finger', 'finger.tap', 'Run a finger service', 'finger') +
                          +twisted/plugins/finger_tutorial.py + - listings/finger/twisted/plugins/finger_tutorial.py
                          + +

                          And now, the following works

                          + +
                          +% sudo twistd -n finger --file=/etc/users --ircnick=fingerbot
                          +
                          + +

                          + For more details about this, see the twistd plugin + documentation. +

                          + +

                          OS Integration

                          + +

                          If we already have the finger package installed in +PYTHONPATH (e.g. we added it to site-packages), we can achieve easy +integration:

                          + +

                          Debian

                          + +
                          +% tap2deb --unsigned -m "Foo <foo@example.com>" --type=python finger.tac
                          +% sudo dpkg -i .build/*.deb
                          +
                          + +

                          Red Hat / Mandrake

                          + +
                          +% tap2rpm --type=python finger.tac #[maybe other options needed]
                          +% sudo rpm -i .build/*.rpm
                          +
                          + +

                          Will properly register the tap/tac, init.d scripts, etc. for the given file.

                          + +

                          If it doesn't work on your favorite OS: patches accepted!

                          +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/factory.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/factory.html new file mode 100644 index 000000000000..5f1c6af9d565 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/factory.html @@ -0,0 +1,633 @@ + + +Twisted Documentation: The Evolution of Finger: using a single factory for + multiple protocols + + + + +

                          The Evolution of Finger: using a single factory for + multiple protocols

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the eighth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we add HTTPS support to our web frontend, showing how to have a +single factory listen on multiple ports. More information on using SSL in +Twisted can be found in the SSL howto.

                          + +

                          Support HTTPS

                          + +

                          All we need to do to code an HTTPS site is just write a context factory (in +this case, which loads the certificate from a certain file) and then use the +twisted.application.internet.SSLServer method. Note that one factory (in this +case, a site) can listen on multiple ports with multiple protocols.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from twisted.spread import pb +from zope.interface import Interface, implements +from OpenSSL import SSL +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["<li><a href=\"%s\">%s</a></li>" % (user, user) + for user in users]) + request.write(""" + <html><head><title>Users</title></head><body> + <h1>Users</h1> + <ul> + %s + </ul></body></html>""" % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""<html><head><title>%s</title></head> + <body><h1>%s</h1> + <p>%s</p> + </body></html>""" % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +class ServerContextFactory: + + def getContext(self): + """Create an SSL context. + + This is a sample implementation that loads a certificate from a file + called 'server.pem'.""" + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.use_certificate_file('server.pem') + ctx.use_privatekey_file('server.pem') + return ctx + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +site = server.Site(resource.IResource(f)) +internet.TCPServer(8000, site + ).setServiceParent(serviceCollection) +internet.SSLServer(443, site, ServerContextFactory() + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(8889, pb.PBServerFactory(IPerspectiveFinger(f)) + ).setServiceParent(serviceCollection) +
                          + + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/index.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/index.html new file mode 100644 index 000000000000..38024d48407a --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/index.html @@ -0,0 +1,83 @@ + + +Twisted Documentation: Twisted from Scratch, or The Evolution of Finger + + + + +

                          Twisted from Scratch, or The Evolution of Finger

                          + +
                          + + + +

                          Introduction

                          + +

                          +Twisted is a big system. People are often daunted when they approach it. It's +hard to know where to start looking. +

                          + +

                          +This guide builds a full-fledged Twisted application from the ground up, using +most of the important bits of the framework. There is a lot of code, but don't +be afraid. +

                          + +

                          +The application we are looking at is a finger service, along the +lines of the familiar service traditionally provided by UNIXâ„¢ servers. +We will extend this service slightly beyond the standard, in order to +demonstrate some of Twisted's higher-level features. +

                          + +

                          +Each section of the tutorial dives straight into applications for various +Twisted topics. These topics have their own introductory howtos listed in +the core howto index and in the documentation for +other Twisted projects like Twisted Web and Twisted Words. There are at least +three ways to use this tutorial: you may find it useful to read through the rest +of the topics listed in the core howto index before +working through the finger tutorial, work through the finger tutorial and then +go back and hit the introductory material that is relevant to the Twisted +project you're working on, or read the introductory material one piece at a time +as it comes up in the finger tutorial. +

                          + +

                          Contents

                          + +

                          +This tutorial is split into eleven parts: +

                          + +
                            +
                          1. The Evolution of Finger: building a simple +finger service
                          2. +
                          3. The Evolution of Finger: adding features +to the finger service
                          4. +
                          5. The Evolution of Finger: cleaning up the +finger code
                          6. +
                          7. The Evolution of Finger: moving to a +component based architecture
                          8. +
                          9. The Evolution of Finger: pluggable +backends
                          10. +
                          11. The Evolution of Finger: a web +frontend
                          12. +
                          13. The Evolution of Finger: Twisted client +support using Perspective Broker
                          14. +
                          15. The Evolution of Finger: using a single +factory for multiple protocols
                          16. +
                          17. The Evolution of Finger: a Twisted finger +client
                          18. +
                          19. The Evolution of Finger: making a finger library
                          20. +
                          21. The Evolution of Finger: +configuration and packaging of the finger service
                          22. +
                          + + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/intro.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/intro.html new file mode 100644 index 000000000000..0b2df75ecdc0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/intro.html @@ -0,0 +1,716 @@ + + +Twisted Documentation: The Evolution of Finger: building a simple finger service + + + + +

                          The Evolution of Finger: building a simple finger service

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the first part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          If you're not familiar with 'finger' it's probably because it's not used as +much nowadays as it used to be. Basically, if you run finger nail +or finger nail@example.com the target computer spits out some +information about the user named nail. For instance:

                          + +
                          +Login: nail                           Name: Nail Sharp
                          +Directory: /home/nail                 Shell: /usr/bin/sh
                          +Last login Wed Mar 31 18:32 2004 (PST)
                          +New mail received Thu Apr  1 10:50 2004 (PST)
                          +     Unread since Thu Apr  1 10:50 2004 (PST)
                          +No Plan.
                          +
                          + +

                          If the target computer does not have the fingerd daemon running you'll get a "Connection +Refused" error. Paranoid sysadmins keep fingerd off or limit the +output to hinder crackers and harassers. The above format is the standard +fingerd default, but an alternate implementation can output +anything it wants, such as automated responsibility status for everyone in an +organization. You can also define pseudo "users", which are essentially +keywords.

                          + +

                          This portion of the tutorial makes use of factories and protocols as +introduced in the Writing a TCP Server howto and +deferreds as introduced in Using Deferreds +and Generating Deferreds. Services and +applications are discussed in Using the Twisted +Application Framework.

                          + +

                          By the end of this section of the tutorial, our finger server will answer +TCP finger requests on port 1079, and will read data from the web.

                          + +

                          Refuse Connections

                          + + + +

                          This example only runs the reactor. It will consume almost no CPU +resources. As it is not listening on any port, it can't respond to network +requests — nothing at all will happen until we interrupt the program. At +this point if you run finger nail or telnet localhost +1079, you'll get a "Connection refused" error since there's no daemon +running to respond. Not very useful, perhaps — but this is the skeleton +inside which the Twisted program will grow. +

                          + +

                          As implied above, at various points in this tutorial you'll want to +observe the behavior of the server being developed. Unless you have a +finger program which can use an alternate port, the easiest way to do this +is with a telnet client. telnet localhost 1079 will connect to +the local host on port 1079, where a finger server will eventually be +listening.

                          + +

                          The Reactor

                          + +

                          +You don't call Twisted, Twisted calls you. The reactor +is Twisted's main event loop, similar to the main loop in other toolkits available +in Python (Qt, wx, and Gtk). There is exactly one reactor in any running Twisted +application. Once started it loops over and over again, responding to network +events and making scheduled calls to code.

                          + +

                          Note that there are actually several different reactors to choose from; +from twisted.internet import reactor returns the current reactor. +If you haven't chosen a reactor class yet, it automatically chooses the +default. See the Reactor Basics HOWTO +for more information.

                          + +

                          Do Nothing

                          + + + +

                          Here, reactor.listenTCP opens port 1079. (The number 1079 is a +reminder that eventually we want to run on port 79, the standard port for +finger servers.) The specified factory, FingerFactory, is used to +handle incoming requests on that port. Specifically, for each request, the +reactor calls the factory's buildProtocol method, which in this +case causes FingerProtocol to be instantiated. Since the protocol +defined here does not actually respond to any events, connections to 1079 will +be accepted, but the input ignored.

                          + +

                          A Factory is the proper place for data that you want to make available to +the protocol instances, since the protocol instances are garbage collected when +the connection is closed.

                          + + +

                          Drop Connections

                          + + + +

                          Here we add to the protocol the ability to respond to the event of beginning +a connection — by terminating it. Perhaps not an interesting behavior, but +it is already close to behaving according to the letter of the standard finger protocol. After +all, there is no requirement to send any data to the remote connection in the +standard. The only problem, as far as the standard is concerned, is that we +terminate the connection too soon. A client which is slow enough will see his +send() of the username result in an error.

                          + + +

                          Read Username, Drop Connections

                          + + + +

                          Here we make FingerProtocol inherit from LineReceiver, so that we get data-based +events on a line-by-line basis. We respond to the event of receiving the line +with shutting down the connection.

                          + +

                          If you use a telnet client to interact with this server, the result will +look something like this:

                          + +
                          +$ telnet localhost 1079
                          +Trying 127.0.0.1...
                          +Connected to localhost.localdomain.
                          +alice
                          +Connection closed by foreign host.
                          +
                          + +

                          Congratulations, this is the first standard-compliant version of the code. +However, usually people actually expect some data about users to be +transmitted.

                          + +

                          Read Username, Output Error, Drop Connections

                          + + + +

                          Finally, a useful version. Granted, the usefulness is somewhat limited by +the fact that this version only prints out a No such user message. It +could be used for devastating effect in honey-pots (decoy servers), of +course.

                          + + +

                          Output From Empty Factory

                          + + + +

                          The same behavior, but finally we see what usefulness the +factory has: as something that does not get constructed for +every connection, it can be in charge of the user database. +In particular, we won't have to change the protocol if +the user database back-end changes.

                          + + +

                          Output from Non-empty Factory

                          + + + +

                          Finally, a really useful finger database. While it does not +supply information about logged in users, it could be used to +distribute things like office locations and internal office +numbers. As hinted above, the factory is in charge of keeping +the user database: note that the protocol instance has not +changed. This is starting to look good: we really won't have +to keep tweaking our protocol.

                          + + +

                          Use Deferreds

                          + + + +

                          But, here we tweak it just for the hell of it. Yes, while the +previous version worked, it did assume the result of getUser is +always immediately available. But what if instead of an in-memory +database, we would have to fetch the result from a remote Oracle server? By +allowing getUser to return a Deferred, we make it easier for the data to be +retrieved asynchronously so that the CPU can be used for other tasks in the +meanwhile.

                          + +

                          As described in the Deferred HOWTO, Deferreds +allow a program to be driven by events. For instance, if one task in a program +is waiting on data, rather than have the CPU (and the program!) idly waiting +for that data (a process normally called 'blocking'), the program can perform +other operations in the meantime, and waits for some signal that data is ready +to be processed before returning to that process.

                          + +

                          In brief, the code in FingerFactory above creates a Deferred, +to which we start to attach callbacks. The deferred action in +FingerFactory is actually a fast-running expression consisting of +one dictionary method, get. Since this action can execute without +delay, FingerFactory.getUser uses defer.succeed to +create a Deferred which already has a result, meaning its return value will be +passed immediately to the first callback function, which turns out to be +FingerProtocol.writeResponse. We've also defined an +errback (appropriately named FingerProtocol.onError) that +will be called instead of writeResponse if something goes +wrong.

                          + +

                          Run 'finger' Locally

                          + + + +

                          This example also makes use of a Deferred. +twisted.internet.utils.getProcessOutput is a non-blocking version +of Python's commands.getoutput: it runs a shell command +(finger, in this case) and captures its standard output. However, +getProcessOutput returns a Deferred instead of the output itself. +Since FingerProtocol.lineReceived is already expecting a Deferred +to be returned by getUser, it doesn't need to be changed, and it +returns the standard output as the finger result.

                          + +

                          Note that in this case the shell's built-in finger command is +simply run with whatever arguments it is given. This is probably insecure, so +you probably don't want a real server to do this without a lot more validation +of the user input. This will do exactly what the standard version of the finger +server does.

                          + +

                          Read Status from the Web

                          + +

                          The web. That invention which has infiltrated homes around the world finally +gets through to our invention. In this case we use the built-in Twisted web +client via twisted.web.client.getPage, a non-blocking version of +Python's urllib2.urlopen(URL).read(). Like +getProcessOutput it returns a Deferred which will be called back +with a string, and can thus be used as a drop-in replacement.

                          + +

                          Thus, we have examples of three different database back-ends, none of which +change the protocol class. In fact, we will not have to change the protocol +again until the end of this tutorial: we have achieved, here, one truly usable +class.

                          + +
                          + +

                          Use Application

                          + +

                          Up until now, we faked. We kept using port 1079, because really, +who wants to run a finger server with root privileges? Well, the +common solution is privilege shedding: after binding to the +network, become a different, less privileged user. We could have done +it ourselves, but Twisted has a built-in way to do it. We will create +a snippet as above, but now we will define an application object. That +object will have uid and gid attributes. When running it (later we +will see how) it will bind to ports, shed privileges and then run.

                          + +

                          Read on to find out how to run this code using the twistd utility.

                          + +

                          twistd

                          + +

                          This is how to run Twisted Applications — files which define an +'application'. A daemon is expected to adhere to certain behavioral standards +so that standard tools can stop/start/query them. If a Twisted application is +run via twistd, the TWISTed Daemonizer, all this behavioral stuff will be +handled for you. twistd does everything a daemon can be expected to — +shuts down stdin/stdout/stderr, disconnects from the terminal and can even +change runtime directory, or even the root filesystems. In short, it does +everything so the Twisted application developer can concentrate on writing his +networking code.

                          + +
                          +root% twistd -ny finger11.tac # just like before
                          +root% twistd -y finger11.tac # daemonize, keep pid in twistd.pid
                          +root% twistd -y finger11.tac --pidfile=finger.pid
                          +root% twistd -y finger11.tac --rundir=/
                          +root% twistd -y finger11.tac --chroot=/var
                          +root% twistd -y finger11.tac -l /var/log/finger.log
                          +root% twistd -y finger11.tac --syslog # just log to syslog
                          +root% twistd -y finger11.tac --syslog --prefix=twistedfinger # use given prefix
                          +
                          + +

                          There are several ways to tell twistd where your application is; here we +show how it is done using the application global variable in a +Python source file (a Twisted Application +Configuration file).

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +

                          # Read username, output from non-empty factory, drop connections +# Use deferreds, to minimize synchronicity assumptions +# Write application. Save in 'finger.tpy' + +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + +application = service.Application('finger', uid=1, gid=1) +factory = FingerFactory(moshez='Happy and well') +internet.TCPServer(79, factory).setServiceParent( + service.IServiceCollection(application)) +
                          + +

                          Instead of using reactor.listenTCP as in the above examples, +here we are using its application-aware counterpart, +internet.TCPServer. Notice that when it is instantiated, the +application object itself does not reference either the protocol or the +factory. Any services (such as TCPServer) which have the application as their +parent will be started when the application is started by twistd. The +application object is more useful for returning an object that supports the +IService, IServiceCollection, IProcess, and sob.IPersistable interfaces with +the given parameters; we'll be seeing these in the next part of the +tutorial. As the parent of the TCPServer we opened, the application lets us +manage the TCPServer.

                          + +

                          With the daemon running on the standard finger port, you can test it with +the standard finger command: finger moshez.

                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/library.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/library.html new file mode 100644 index 000000000000..00388d59d10e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/library.html @@ -0,0 +1,269 @@ + + +Twisted Documentation: The Evolution of Finger: making a finger library + + + + +

                          The Evolution of Finger: making a finger library

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the tenth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we separate the application code that launches a finger service +from the library code which defines a finger service, placing the application in +a Twisted Application Configuration (.tac) file. We also move configuration +(such as HTML templates) into separate files. Configuration and deployment with +.tac and twistd are introduced in Using the +Twisted Application Framework.

                          + +

                          Organization

                          + +

                          Now this code, while quite modular and well-designed, isn't +properly organized. Everything above the application= belongs in a +module, and the HTML templates all belong in separate files. +

                          + +

                          We can use the templateFile and templateDirectory attributes to indicate +what HTML template file to use for each Page, and where to look for it.

                          + +
                          + +

                          +Note that our program is now quite separated. We have: +

                            +
                          • Code (in the module)
                          • +
                          • Configuration (file above)
                          • +
                          • Presentation (templates)
                          • +
                          • Content (/etc/users)
                          • +
                          • Deployment (twistd)
                          • +
                          + +Prototypes don't need this level of separation, so our earlier examples all +bunched together. However, real applications do. Thankfully, if we write our +code correctly, it is easy to achieve a good separation of parts. +

                          + + +

                          Easy Configuration

                          + +

                          We can also supply easy configuration for common cases with a makeService method that will also help build .tap files later:

                          + +
                          + +

                          And we can write simpler files now:

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +

                          # simple-finger.tac +# eg: twistd -ny simple-finger.tac + +from twisted.application import service + +import finger + +options = { 'file': '/etc/users', + 'templates': '/usr/share/finger/templates', + 'ircnick': 'fingerbot', + 'ircserver': 'irc.freenode.net', + 'pbport': 8889, + 'ssl': 'ssl=0' } + +ser = finger.makeService(options) +application = service.Application('finger', uid=1, gid=1) +ser.setServiceParent(service.IServiceCollection(application)) +
                          + +
                          +% twisted -ny simple-finger.tac
                          +
                          + + +

                          Note: the finger user still has ultimate power: he can use +makeService, or he can use the lower-level interface if he has +specific needs (maybe an IRC server on some other port? maybe we +want the non-SSL webserver to listen only locally? etc. etc.) +This is an important design principle: never force a layer of abstraction: +allow usage of layers of abstractions.

                          + +

                          The pasta theory of design:

                          + +
                            +
                          • Spaghetti: each piece of code interacts with every other piece of + code [can be implemented with GOTO, functions, objects]
                          • +
                          • Lasagna: code has carefully designed layers. Each layer is, in + theory independent. However low-level layers usually cannot be + used easily, and high-level layers depend on low-level layers.
                          • +
                          • Ravioli: each part of the code is useful by itself. There is a thin + layer of interfaces between various parts [the sauce]. Each part + can be usefully be used elsewhere.
                          • +
                          • ...but sometimes, the user just wants to order Ravioli, so one + coarse-grain easily definable layer of abstraction on top of it all + can be useful.
                          • +
                          + + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/etc.users b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/etc.users new file mode 100644 index 000000000000..d8c8f8cd2130 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/etc.users @@ -0,0 +1,2 @@ +moshez: happy and well +shawn: alive diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/__init__.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/__init__.py new file mode 100755 index 000000000000..bcb24fabd13e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/__init__.py @@ -0,0 +1,3 @@ +""" +Finger example application. +""" diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/finger.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/finger.py new file mode 100755 index 000000000000..b05053666b11 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/finger.py @@ -0,0 +1,331 @@ +# finger.py module + +from zope.interface import Interface, implements + +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components, log +from twisted.web import resource, server, xmlrpc +from twisted.spread import pb + +from OpenSSL import SSL + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + template = """Users +

                          Users

                          +
                            + %(users)s +
                          + + """ + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + + def getChild(self, path, request): + if path == '': + return self + elif path == 'RPC2': + return UserStatusXR(self.service) + else: + return UserStatus(path, self.service) + + def render_GET(self, request): + users = self.service.getUsers() + def cbUsers(users): + request.write(self.template % {'users': ''.join([ + # Name should be quoted properly these uses. + '
                        1. %s
                        2. ' % (name, name) + for name in users])}) + request.finish() + users.addCallback(cbUsers) + def ebUsers(err): + log.err(err, "UserStatusTree failed") + request.finish() + users.addErrback(ebUsers) + return server.NOT_DONE_YET + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + + +class UserStatus(resource.Resource): + + template='''%(title)s +

                          %(name)s

                          %(status)s

                          ''' + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + status = self.service.getUser(self.user) + def cbStatus(status): + request.write(self.template % { + 'title': self.user, + 'name': self.user, + 'status': status}) + request.finish() + status.addCallback(cbStatus) + def ebStatus(err): + log.err(err, "UserStatus failed") + request.finish() + status.addErrback(ebStatus) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self._read() + + def _read(self): + self.users = {} + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +class ServerContextFactory: + + def getContext(self): + """Create an SSL context. + + This is a sample implementation that loads a certificate from a file + called 'server.pem'.""" + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.use_certificate_file('server.pem') + ctx.use_privatekey_file('server.pem') + return ctx + + + + +# Easy configuration + +def makeService(config): + # finger on port 79 + s = service.MultiService() + f = FingerService(config['file']) + h = internet.TCPServer(1079, IFingerFactory(f)) + h.setServiceParent(s) + + + # website on port 8000 + r = resource.IResource(f) + r.templateDirectory = config['templates'] + site = server.Site(r) + j = internet.TCPServer(8000, site) + j.setServiceParent(s) + + # ssl on port 443 +# if config.get('ssl'): +# k = internet.SSLServer(443, site, ServerContextFactory()) +# k.setServiceParent(s) + + # irc fingerbot + if config.has_key('ircnick'): + i = IIRCClientFactory(f) + i.nickname = config['ircnick'] + ircserver = config['ircserver'] + b = internet.TCPClient(ircserver, 6667, i) + b.setServiceParent(s) + + # Pespective Broker on port 8889 + if config.has_key('pbport'): + m = internet.TCPServer( + int(config['pbport']), + pb.PBServerFactory(IPerspectiveFinger(f))) + m.setServiceParent(s) + + return s diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/tap.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/tap.py new file mode 100644 index 000000000000..a06102c4ce96 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger/tap.py @@ -0,0 +1,20 @@ +# finger/tap.py +from twisted.application import internet, service +from twisted.internet import interfaces +from twisted.python import usage +import finger + +class Options(usage.Options): + + optParameters = [ + ['file', 'f', '/etc/users'], + ['templates', 't', '/usr/share/finger/templates'], + ['ircnick', 'n', 'fingerbot'], + ['ircserver', None, 'irc.freenode.net'], + ['pbport', 'p', 8889], + ] + + optFlags = [['ssl', 's']] + +def makeService(config): + return finger.makeService(config) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger01.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger01.py new file mode 100755 index 000000000000..05615107188e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger01.py @@ -0,0 +1,2 @@ +from twisted.internet import reactor +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger02.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger02.py new file mode 100755 index 000000000000..e7efbf4e6f1c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger02.py @@ -0,0 +1,10 @@ +from twisted.internet import protocol, reactor + +class FingerProtocol(protocol.Protocol): + pass + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger03.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger03.py new file mode 100755 index 000000000000..d32302367cca --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger03.py @@ -0,0 +1,11 @@ +from twisted.internet import protocol, reactor + +class FingerProtocol(protocol.Protocol): + def connectionMade(self): + self.transport.loseConnection() + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger04.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger04.py new file mode 100755 index 000000000000..d35f590789a6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger04.py @@ -0,0 +1,12 @@ +from twisted.internet import protocol, reactor +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + self.transport.loseConnection() + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger05.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger05.py new file mode 100755 index 000000000000..0d8da8cb1c80 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger05.py @@ -0,0 +1,13 @@ +from twisted.internet import protocol, reactor +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + self.transport.write("No such user\r\n") + self.transport.loseConnection() + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger06.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger06.py new file mode 100755 index 000000000000..7f789861b74b --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger06.py @@ -0,0 +1,18 @@ +# Read username, output from empty factory, drop connections + +from twisted.internet import protocol, reactor +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + self.transport.write(self.factory.getUser(user)+"\r\n") + self.transport.loseConnection() + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def getUser(self, user): + return "No such user" + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger07.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger07.py new file mode 100755 index 000000000000..cc5dbf130630 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger07.py @@ -0,0 +1,21 @@ +# Read username, output from non-empty factory, drop connections + +from twisted.internet import protocol, reactor +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + self.transport.write(self.factory.getUser(user)+"\r\n") + self.transport.loseConnection() + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return self.users.get(user, "No such user") + +reactor.listenTCP(1079, FingerFactory(moshez='Happy and well')) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger08.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger08.py new file mode 100755 index 000000000000..624c5b041f57 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger08.py @@ -0,0 +1,30 @@ +# Read username, output from non-empty factory, drop connections +# Use deferreds, to minimize synchronicity assumptions + +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + +reactor.listenTCP(1079, FingerFactory(moshez='Happy and well')) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger09.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger09.py new file mode 100755 index 000000000000..336acb368883 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger09.py @@ -0,0 +1,26 @@ +# Read username, output from factory interfacing to OS, drop connections + +from twisted.internet import protocol, reactor, defer, utils +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def getUser(self, user): + return utils.getProcessOutput("finger", [user]) + +reactor.listenTCP(1079, FingerFactory()) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger10.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger10.py new file mode 100755 index 000000000000..7e4cb931c31f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger10.py @@ -0,0 +1,30 @@ +# Read username, output from factory interfacing to web, drop connections + +from twisted.internet import protocol, reactor, defer, utils +from twisted.protocols import basic +from twisted.web import client + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, prefix): + self.prefix=prefix + + def getUser(self, user): + return client.getPage(self.prefix+user) + +reactor.listenTCP(1079, FingerFactory(prefix='http://livejournal.com/~')) +reactor.run() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger11.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger11.tac new file mode 100755 index 000000000000..aae8ca689c03 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger11.tac @@ -0,0 +1,34 @@ +# Read username, output from non-empty factory, drop connections +# Use deferreds, to minimize synchronicity assumptions +# Write application. Save in 'finger.tpy' + +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + +application = service.Application('finger', uid=1, gid=1) +factory = FingerFactory(moshez='Happy and well') +internet.TCPServer(79, factory).setServiceParent( + service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger12.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger12.tac new file mode 100755 index 000000000000..69120f1248dd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger12.tac @@ -0,0 +1,55 @@ +# But let's try and fix setting away messages, shall we? +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + +class FingerSetterProtocol(basic.LineReceiver): + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + user = self.lines[0] + status = self.lines[1] + self.factory.setUser(user, status) + +class FingerSetterFactory(protocol.ServerFactory): + protocol = FingerSetterProtocol + + def __init__(self, fingerFactory): + self.fingerFactory = fingerFactory + + def setUser(self, user, status): + self.fingerFactory.users[user] = status + +ff = FingerFactory(moshez='Happy and well') +fsf = FingerSetterFactory(ff) + +application = service.Application('finger', uid=1, gid=1) +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79,ff).setServiceParent(serviceCollection) +internet.TCPServer(1079,fsf).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger13.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger13.tac new file mode 100755 index 000000000000..5cf60c9af04f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger13.tac @@ -0,0 +1,59 @@ +# Fix asymmetry +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerSetterProtocol(basic.LineReceiver): + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self,reason): + user = self.lines[0] + status = self.lines[1] + self.factory.setUser(user, status) + +class FingerService(service.Service): + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def setUser(self, user, status): + self.users[user] = status + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getFingerSetterFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerSetterProtocol + f.setUser = self.setUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService(moshez='Happy and well') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79,f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(1079,f.getFingerSetterFactory() + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger14.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger14.tac new file mode 100755 index 000000000000..61d35d05fb18 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger14.tac @@ -0,0 +1,55 @@ +# Read from file +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.users = {} + self.filename = filename + self._read() + + def _read(self): + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def startService(self): + self._read() + service.Service.startService(self) + + def stopService(self): + service.Service.stopService(self) + self.call.cancel() + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +finger = internet.TCPServer(79, f.getFingerFactory()) + +finger.setServiceParent(service.IServiceCollection(application)) +f.setServiceParent(service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger15.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger15.tac new file mode 100755 index 000000000000..18b3d8738289 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger15.tac @@ -0,0 +1,76 @@ +# Read from file, announce on the web! +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic +from twisted.web import resource, server, static +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerResource(resource.Resource): + + def __init__(self, users): + self.users = users + resource.Resource.__init__(self) + + # we treat the path as the username + def getChild(self, username, request): + """ + 'username' is a string. + 'request' is a 'twisted.web.server.Request'. + """ + messagevalue = self.users.get(username) + username = cgi.escape(username) + if messagevalue is not None: + messagevalue = cgi.escape(messagevalue) + text = '

                          %s

                          %s

                          ' % (username,messagevalue) + else: + text = '

                          %s

                          No such user

                          ' % username + return static.Data(text, 'text/html') + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = FingerResource(self.users) + return r + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger16.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger16.tac new file mode 100755 index 000000000000..f5d350240df8 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger16.tac @@ -0,0 +1,91 @@ +# Read from file, announce on the web, irc +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static + +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + + +class IRCReplyBot(irc.IRCClient): + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + irc.IRCClient.msg(self, user, msg+': '+message) + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = resource.Resource() + r.getChild = (lambda path, request: + static.Data('

                          %s

                          %s

                          ' % + tuple(map(cgi.escape, + [path,self.users.get(path, + "No such user

                          usage: site/user")])), + 'text/html')) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger17.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger17.tac new file mode 100755 index 000000000000..5ef9170c40bc --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger17.tac @@ -0,0 +1,91 @@ +# Read from file, announce on the web, irc, xml-rpc +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static, xmlrpc +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class IRCReplyBot(irc.IRCClient): + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + irc.IRCClient.msg(self, user, msg+': '+message) + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = resource.Resource() + r.getChild = (lambda path, request: + static.Data('

                          %s

                          %s

                          ' % + tuple(map(cgi.escape, + [path,self.users.get(path, "No such user")])), + 'text/html')) + x = xmlrpc.XMLRPC() + x.xmlrpc_getUser = self.getUser + r.putChild('RPC2', x) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac new file mode 100755 index 000000000000..6ddc1c87988c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger18.tac @@ -0,0 +1,137 @@ +# Do everything properly +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static, xmlrpc +import cgi + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class UserStatusTree(resource.Resource): + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['
                        3. %s
                        4. ' % (user, user) + for user in users] + return '
                            '+''.join(l)+'
                          ' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '

                          %s

                          '%self.user+'

                          %s

                          '%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = UserStatusTree(self) + x = UserStatusXR(self) + r.putChild('RPC2', x) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19.tac new file mode 100755 index 000000000000..248bd9c81af7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19.tac @@ -0,0 +1,238 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['
                        5. %s
                        6. ' % (user, user) + for user in users] + return '
                            '+''.join(l)+'
                          ' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '

                          %s

                          '%self.user+'

                          %s

                          '%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a.tac new file mode 100755 index 000000000000..e6c66b5a4328 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a.tac @@ -0,0 +1,231 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['
                        7. %s
                        8. ' % (user, user) + for user in users] + return '
                            '+''.join(l)+'
                          ' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '

                          %s

                          '%self.user+'

                          %s

                          '%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + +class MemoryFingerService(service.Service): + + implements([IFingerService, IFingerSetterService]) + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + def setUser(self, user, status): + self.users[user] = status + + +application = service.Application('finger', uid=1, gid=1) +f = MemoryFingerService(moshez='Happy and well') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(1079, IFingerSetterFactory(f), interface='127.0.0.1' + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a_changes.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a_changes.py new file mode 100644 index 000000000000..cbb3623a0d97 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19a_changes.py @@ -0,0 +1,29 @@ + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +# Advantages of latest version + +class MemoryFingerService(service.Service): + + implements([IFingerService, IFingerSetterService]) + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + def setUser(self, user, status): + self.users[user] = status + + +f = MemoryFingerService(moshez='Happy and well') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(1079, IFingerSetterFactory(f), interface='127.0.0.1' + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b.tac new file mode 100755 index 000000000000..b4790a6f7af2 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b.tac @@ -0,0 +1,257 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer, utils +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi +import pwd + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['
                        9. %s
                        10. ' % (user, user) + for user in users] + return '
                            '+''.join(l)+'
                          ' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '

                          %s

                          '%self.user+'

                          %s

                          '%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + +# Another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + # need a local finger daemon running for this to work + return utils.getProcessOutput("finger", [user]) + + def getUsers(self): + return defer.succeed([]) + + +application = service.Application('finger', uid=1, gid=1) +f = LocalFingerService() +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b_changes.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b_changes.py new file mode 100644 index 000000000000..3c8ff75b08ac --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19b_changes.py @@ -0,0 +1,19 @@ + +from twisted.internet import protocol, reactor, defer, utils +import pwd + +# Another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + # need a local finger daemon running for this to work + return utils.getProcessOutput("finger", [user]) + + def getUsers(self): + return defer.succeed([]) + + +f = LocalFingerService() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c.tac new file mode 100755 index 000000000000..15e37150d101 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c.tac @@ -0,0 +1,269 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer, utils +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc +from zope.interface import Interface, implements +import cgi +import pwd +import os + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + implements(resource.IResource) + + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + self.putChild('RPC2', UserStatusXR(self.service)) + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['
                        11. %s
                        12. ' % (user, user) + for user in users] + return '
                            '+''.join(l)+'
                          ' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +components.registerAdapter(UserStatusTree, IFingerService, + resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '

                          %s

                          '%self.user+'

                          %s

                          '%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + +# Yet another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + user = user.strip() + try: + entry = pwd.getpwnam(user) + except KeyError: + return defer.succeed("No such user") + try: + f = file(os.path.join(entry[5],'.plan')) + except (IOError, OSError): + return defer.succeed("No such user") + data = f.read() + data = data.strip() + f.close() + return defer.succeed(data) + + def getUsers(self): + return defer.succeed([]) + + +application = service.Application('finger', uid=1, gid=1) +f = LocalFingerService() +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c_changes.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c_changes.py new file mode 100644 index 000000000000..cc592ea3d5f6 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger19c_changes.py @@ -0,0 +1,32 @@ +from twisted.internet import protocol, reactor, defer, utils +import pwd +import os + + +# Yet another back-end + +class LocalFingerService(service.Service): + + implements(IFingerService) + + def getUser(self, user): + user = user.strip() + try: + entry = pwd.getpwnam(user) + except KeyError: + return defer.succeed("No such user") + try: + f = file(os.path.join(entry[5],'.plan')) + except (IOError, OSError): + return defer.succeed("No such user") + data = f.read() + data = data.strip() + f.close() + return defer.succeed(data) + + def getUsers(self): + return defer.succeed([]) + + + +f = LocalFingerService() diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger20.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger20.tac new file mode 100755 index 000000000000..48c0b029b55d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger20.tac @@ -0,0 +1,251 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["
                        13. %s
                        14. " % (user, user) + for user in users]) + request.write(""" + Users +

                          Users

                          +
                            + %s +
                          """ % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""%s +

                          %s

                          +

                          %s

                          + """ % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger21.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger21.tac new file mode 100755 index 000000000000..8ac2603ac2b9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger21.tac @@ -0,0 +1,280 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from twisted.spread import pb +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["
                        15. %s
                        16. " % (user, user) + for user in users]) + request.write(""" + Users +

                          Users

                          +
                            + %s +
                          """ % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""%s +

                          %s

                          +

                          %s

                          + """ % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(8889, pb.PBServerFactory(IPerspectiveFinger(f)) + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger22.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger22.py new file mode 100755 index 000000000000..dc8deb94e8a7 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger22.py @@ -0,0 +1,297 @@ +# Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from twisted.spread import pb +from zope.interface import Interface, implements +from OpenSSL import SSL +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["
                        17. %s
                        18. " % (user, user) + for user in users]) + request.write(""" + Users +

                          Users

                          +
                            + %s +
                          """ % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""%s +

                          %s

                          +

                          %s

                          + """ % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +class ServerContextFactory: + + def getContext(self): + """Create an SSL context. + + This is a sample implementation that loads a certificate from a file + called 'server.pem'.""" + ctx = SSL.Context(SSL.SSLv23_METHOD) + ctx.use_certificate_file('server.pem') + ctx.use_privatekey_file('server.pem') + return ctx + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +site = server.Site(resource.IResource(f)) +internet.TCPServer(8000, site + ).setServiceParent(serviceCollection) +internet.SSLServer(443, site, ServerContextFactory() + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(8889, pb.PBServerFactory(IPerspectiveFinger(f)) + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerPBclient.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerPBclient.py new file mode 100755 index 000000000000..66ed0ae7696d --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerPBclient.py @@ -0,0 +1,26 @@ +# test the PB finger on port 8889 +# this code is essentially the same as +# the first example in howto/pb-usage + +from twisted.spread import pb +from twisted.internet import reactor + +def gotObject(object): + print "got object:", object + object.callRemote("getUser","moshez").addCallback(gotData) +# or +# object.callRemote("getUsers").addCallback(gotData) + +def gotData(data): + print 'server sent:', data + reactor.stop() + +def gotNoObject(reason): + print "no object:",reason + reactor.stop() + +factory = pb.PBClientFactory() +reactor.connectTCP("127.0.0.1",8889, factory) +factory.getRootObject().addCallbacks(gotObject,gotNoObject) +reactor.run() + diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerXRclient.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerXRclient.py new file mode 100755 index 000000000000..b854bcfc67d0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerXRclient.py @@ -0,0 +1,5 @@ +# testing xmlrpc finger + +import xmlrpclib +server = xmlrpclib.Server('http://127.0.0.1:8000/RPC2') +print server.getUser('moshez') diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger_config.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger_config.py new file mode 100644 index 000000000000..226a26ab3b86 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/finger_config.py @@ -0,0 +1,38 @@ +# Easy configuration +# makeService from finger module + +def makeService(config): + # finger on port 79 + s = service.MultiService() + f = FingerService(config['file']) + h = internet.TCPServer(79, IFingerFactory(f)) + h.setServiceParent(s) + + # website on port 8000 + r = resource.IResource(f) + r.templateDirectory = config['templates'] + site = server.Site(r) + j = internet.TCPServer(8000, site) + j.setServiceParent(s) + + # ssl on port 443 + if config.get('ssl'): + k = internet.SSLServer(443, site, ServerContextFactory()) + k.setServiceParent(s) + + # irc fingerbot + if config.has_key('ircnick'): + i = IIRCClientFactory(f) + i.nickname = config['ircnick'] + ircserver = config['ircserver'] + b = internet.TCPClient(ircserver, 6667, i) + b.setServiceParent(s) + + # Pespective Broker on port 8889 + if config.has_key('pbport'): + m = internet.TCPServer( + int(config['pbport']), + pb.PBServerFactory(IPerspectiveFinger(f))) + m.setServiceParent(s) + + return s diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerproxy.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerproxy.tac new file mode 100644 index 000000000000..839c63dc422e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/fingerproxy.tac @@ -0,0 +1,110 @@ +# finger proxy +from twisted.application import internet, service +from twisted.internet import defer, protocol, reactor +from twisted.protocols import basic +from twisted.python import components +from zope.interface import Interface, implements + + +def catchError(err): + return "Internal error in server" + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value) + self.transport.loseConnection() + d.addCallback(writeValue) + + + +class FingerFactoryFromService(protocol.ClientFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerClient(protocol.Protocol): + + def connectionMade(self): + self.transport.write(self.factory.user+"\r\n") + self.buf = [] + + def dataReceived(self, data): + self.buf.append(data) + + def connectionLost(self, reason): + self.factory.gotData(''.join(self.buf)) + +class FingerClientFactory(protocol.ClientFactory): + + protocol = FingerClient + + def __init__(self, user): + self.user = user + self.d = defer.Deferred() + + def clientConnectionFailed(self, _, reason): + self.d.errback(reason) + + def gotData(self, data): + self.d.callback(data) + + +def finger(user, host, port=79): + f = FingerClientFactory(user) + reactor.connectTCP(host, port, f) + return f.d + + +class ProxyFingerService(service.Service): + implements(IFingerService) + + def getUser(self, user): + try: + user, host = user.split('@', 1) + except: + user = user.strip() + host = '127.0.0.1' + ret = finger(user, host) + ret.addErrback(lambda _: "Could not connect to remote host") + return ret + + def getUsers(self): + return defer.succeed([]) + +application = service.Application('finger', uid=1, gid=1) +f = ProxyFingerService() +internet.TCPServer(7779, IFingerFactory(f)).setServiceParent( + service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/organized-finger.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/organized-finger.tac new file mode 100644 index 000000000000..2f9a129cf8ed --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/organized-finger.tac @@ -0,0 +1,31 @@ +# organized-finger.tac +# eg: twistd -ny organized-finger.tac + +import finger + +from twisted.internet import protocol, reactor, defer +from twisted.spread import pb +from twisted.web import resource, server +from twisted.application import internet, service, strports +from twisted.python import log + +application = service.Application('finger', uid=1, gid=1) +f = finger.FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, finger.IFingerFactory(f) + ).setServiceParent(serviceCollection) + +site = server.Site(resource.IResource(f)) +internet.TCPServer(8000, site + ).setServiceParent(serviceCollection) + +internet.SSLServer(443, site, finger.ServerContextFactory() + ).setServiceParent(serviceCollection) + +i = finger.IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) + +internet.TCPServer(8889, pb.PBServerFactory(finger.IPerspectiveFinger(f)) + ).setServiceParent(serviceCollection) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/simple-finger.tac b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/simple-finger.tac new file mode 100644 index 000000000000..2e75cb1c89ab --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/simple-finger.tac @@ -0,0 +1,17 @@ +# simple-finger.tac +# eg: twistd -ny simple-finger.tac + +from twisted.application import service + +import finger + +options = { 'file': '/etc/users', + 'templates': '/usr/share/finger/templates', + 'ircnick': 'fingerbot', + 'ircserver': 'irc.freenode.net', + 'pbport': 8889, + 'ssl': 'ssl=0' } + +ser = finger.makeService(options) +application = service.Application('finger', uid=1, gid=1) +ser.setServiceParent(service.IServiceCollection(application)) diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/twisted/plugins/finger_tutorial.py b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/twisted/plugins/finger_tutorial.py new file mode 100644 index 000000000000..73361ae23488 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/listings/finger/twisted/plugins/finger_tutorial.py @@ -0,0 +1,5 @@ + +from twisted.application.service import ServiceMaker + +finger = ServiceMaker( + 'finger', 'finger.tap', 'Run a finger service', 'finger') diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/pb.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/pb.html new file mode 100644 index 000000000000..65e1065a6a87 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/pb.html @@ -0,0 +1,650 @@ + + +Twisted Documentation: The Evolution of Finger: Twisted client support using Perspective Broker + + + + +

                          The Evolution of Finger: Twisted client support using Perspective Broker

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the seventh part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we add a Perspective Broker service to the finger application +so that Twisted clients can access the finger server. Perspective Broker is +introduced in depth in its own section of the +core howto index.

                          + +

                          Use Perspective Broker

                          + +

                          We add support for perspective broker, Twisted's native remote object +protocol. Now, Twisted clients will not have to go through XML-RPCish +contortions to get information about users.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from twisted.spread import pb +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["<li><a href=\"%s\">%s</a></li>" % (user, user) + for user in users]) + request.write(""" + <html><head><title>Users</title></head><body> + <h1>Users</h1> + <ul> + %s + </ul></body></html>""" % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""<html><head><title>%s</title></head> + <body><h1>%s</h1> + <p>%s</p> + </body></html>""" % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class IPerspectiveFinger(Interface): + + def remote_getUser(username): + """return a user's status""" + + def remote_getUsers(): + """return a user's status""" + +class PerspectiveFingerFromService(pb.Root): + + implements(IPerspectiveFinger) + + def __init__(self, service): + self.service = service + + def remote_getUser(self, username): + return self.service.getUser(username) + + def remote_getUsers(self): + return self.service.getUsers() + +components.registerAdapter(PerspectiveFingerFromService, + IFingerService, + IPerspectiveFinger) + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +internet.TCPServer(8889, pb.PBServerFactory(IPerspectiveFinger(f)) + ).setServiceParent(serviceCollection) +
                          + +

                          A simple client to test the perspective broker finger:

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +

                          # test the PB finger on port 8889 +# this code is essentially the same as +# the first example in howto/pb-usage + +from twisted.spread import pb +from twisted.internet import reactor + +def gotObject(object): + print "got object:", object + object.callRemote("getUser","moshez").addCallback(gotData) +# or +# object.callRemote("getUsers").addCallback(gotData) + +def gotData(data): + print 'server sent:', data + reactor.stop() + +def gotNoObject(reason): + print "no object:",reason + reactor.stop() + +factory = pb.PBClientFactory() +reactor.connectTCP("127.0.0.1",8889, factory) +factory.getRootObject().addCallbacks(gotObject,gotNoObject) +reactor.run() +
                          +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/protocol.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/protocol.html new file mode 100644 index 000000000000..ac38d59307a0 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/protocol.html @@ -0,0 +1,1055 @@ + + +Twisted Documentation: The Evolution of Finger: adding features to the finger service + + + + +

                          The Evolution of Finger: adding features to the finger service

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the second part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this section of the tutorial, our finger server will continue to sprout +features: the ability for users to set finger announces, and using our finger +service to send those announcements on the web, on IRC and over XML-RPC. +Resources and XML-RPC are introduced in the Web Applications portion of +the Twisted Web howto. More examples +using twisted.words.protocols.irc can be found +in Writing a TCP Client and +the Twisted Words examples.

                          + +

                          Setting Message By Local Users

                          + +

                          Now that port 1079 is free, maybe we can use it with a different +server, one which will let people set their messages. It does +no access control, so anyone who can login to the machine can +set any message. We assume this is the desired behavior in +our case. Testing it can be done by simply: +

                          + +
                          +% nc localhost 1079   # or telnet localhost 1079
                          +moshez
                          +Giving a tutorial now, sorry!
                          +^D
                          +
                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +

                          # But let's try and fix setting away messages, shall we? +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerFactory(protocol.ServerFactory): + protocol = FingerProtocol + + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + +class FingerSetterProtocol(basic.LineReceiver): + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + user = self.lines[0] + status = self.lines[1] + self.factory.setUser(user, status) + +class FingerSetterFactory(protocol.ServerFactory): + protocol = FingerSetterProtocol + + def __init__(self, fingerFactory): + self.fingerFactory = fingerFactory + + def setUser(self, user, status): + self.fingerFactory.users[user] = status + +ff = FingerFactory(moshez='Happy and well') +fsf = FingerSetterFactory(ff) + +application = service.Application('finger', uid=1, gid=1) +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79,ff).setServiceParent(serviceCollection) +internet.TCPServer(1079,fsf).setServiceParent(serviceCollection) +
                          + +

                          This program has two protocol-factory-TCPServer pairs, which are both child +services of the application. Specifically, the +setServiceParent +method is used to define the two TCPServer services as children of +application, which implements +IServiceCollection. +Both services are thus started with the application.

                          + +

                          Use Services to Make Dependencies Sane

                          + +

                          The previous version had the setter poke at the innards of the +finger factory. This strategy is usually not a good idea: this version makes +both factories symmetric by making them both look at a single +object. Services are useful for when an object is needed which is +not related to a specific network server. Here, we define a common service +class with methods that will create factories on the fly. The service +also contains methods the factories will depend on.

                          + +

                          The factory-creation methods, getFingerFactory and +getFingerSetterFactory, follow this pattern:

                          + +
                            + +
                          1. Instantiate a generic server factory, +twisted.internet.protocol.ServerFactory.
                          2. + +
                          3. Set the protocol class, just like our factory class would have.
                          4. + +
                          5. Copy a service method to the factory as a function attribute. The +function won't have access to the factory's self, but that's OK +because as a bound method it has access to the service's self, +which is what it needs. For getUser, a custom method defined in +the service gets copied. For setUser, a standard method of the +users dictionary is copied.
                          6. + +
                          + +

                          Thus, we stopped subclassing: the service simply puts useful methods and +attributes inside the factories. We are getting better at protocol design: +none of our protocol classes had to be changed, and neither will have to +change until the end of the tutorial.

                          + +

                          As an application +service , this new +finger service implements the +IService interface +and can be started and stopped in a standardized manner. We'll make use of +this in the next example.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +

                          # Fix asymmetry +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerSetterProtocol(basic.LineReceiver): + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self,reason): + user = self.lines[0] + status = self.lines[1] + self.factory.setUser(user, status) + +class FingerService(service.Service): + def __init__(self, **kwargs): + self.users = kwargs + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def setUser(self, user, status): + self.users[user] = status + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getFingerSetterFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerSetterProtocol + f.setUser = self.setUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService(moshez='Happy and well') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79,f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(1079,f.getFingerSetterFactory() + ).setServiceParent(serviceCollection) +
                          + + + +

                          Read Status File

                          + +

                          This version shows how, instead of just letting users set their +messages, we can read those from a centrally managed file. We cache +results, and every 30 seconds we refresh it. Services are useful +for such scheduled tasks.

                          + +
                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +

                          # Read from file +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.users = {} + self.filename = filename + self._read() + + def _read(self): + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def startService(self): + self._read() + service.Service.startService(self) + + def stopService(self): + service.Service.stopService(self) + self.call.cancel() + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +finger = internet.TCPServer(79, f.getFingerFactory()) + +finger.setServiceParent(service.IServiceCollection(application)) +f.setServiceParent(service.IServiceCollection(application)) +
                          + +

                          Since this verison is reading data from a file (and refreshing the data +every 30 seconds), there is no FingerSetterFactory and thus +nothing listening on port 1079.

                          + +

                          Here we override the standard +startService +and +stopService +hooks in the Finger service, which is set up as a child service of +the application in the last line of the code. startService calls +_read, the function responsible for reading the data; +reactor.callLater is then used to schedule it to run again after +thirty seconds every time it is called. reactor.callLater returns +an object that lets us cancel the scheduled run in stopService +using its cancel method.

                          + +

                          Announce on Web, Too

                          + +

                          The same kind of service can also produce things useful for +other protocols. For example, in twisted.web, the factory +itself (Site) is almost +never subclassed — instead, it is given a resource, which represents the tree +of resources available via URLs. That hierarchy is navigated by +Site +and overriding it dynamically is possible with +getChild.

                          + +

                          To integrate this into the Finger application (just because we can), we set +up a new TCPServer that calls the Site factory and retrieves resources via a +new function of FingerService named getResource. +This function specifically returns a Resource object with an overridden getChild method.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +

                          # Read from file, announce on the web! +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.protocols import basic +from twisted.web import resource, server, static +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class FingerResource(resource.Resource): + + def __init__(self, users): + self.users = users + resource.Resource.__init__(self) + + # we treat the path as the username + def getChild(self, username, request): + """ + 'username' is a string. + 'request' is a 'twisted.web.server.Request'. + """ + messagevalue = self.users.get(username) + username = cgi.escape(username) + if messagevalue is not None: + messagevalue = cgi.escape(messagevalue) + text = '<h1>%s</h1><p>%s</p>' % (username,messagevalue) + else: + text = '<h1>%s</h1><p>No such user</p>' % username + return static.Data(text, 'text/html') + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = FingerResource(self.users) + return r + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +
                          + + +

                          Announce on IRC, Too

                          + +

                          This is the first time there is client code. IRC clients often +act a lot like servers: responding to events from the network. +The reconnecting client factory will make sure that severed links +will get re-established, with intelligent tweaked exponential +back-off algorithms. The IRC client itself is simple: the only +real hack is getting the nickname from the factory in connectionMade.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +

                          # Read from file, announce on the web, irc +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static + +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + + +class IRCReplyBot(irc.IRCClient): + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + irc.IRCClient.msg(self, user, msg+': '+message) + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = resource.Resource() + r.getChild = (lambda path, request: + static.Data('<h1>%s</h1><p>%s</p>' % + tuple(map(cgi.escape, + [path,self.users.get(path, + "No such user <p/> usage: site/user")])), + 'text/html')) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) +
                          + +

                          FingerService now has another new function, +getIRCbot, which returns the +ReconnectingClientFactory. This factory in turn will instantiate +the IRCReplyBot protocol. The IRCBot is configured in the last +line to connect to irc.freenode.org with a nickname of +fingerbot.

                          + +

                          By overriding irc.IRCClient.connectionMade, +IRCReplyBot can access the nickname attribute of the +factory that instantiated it.

                          + +

                          Add XML-RPC Support

                          + +

                          In Twisted, XML-RPC support is handled just as though it was +another resource. That resource will still support GET calls normally +through render(), but that is usually left unimplemented. Note +that it is possible to return deferreds from XML-RPC methods. +The client, of course, will not get the answer until the deferred +is triggered.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +

                          # Read from file, announce on the web, irc, xml-rpc +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static, xmlrpc +import cgi + +class FingerProtocol(basic.LineReceiver): + def lineReceived(self, user): + d = self.factory.getUser(user) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + self.transport.write(message + '\r\n') + self.transport.loseConnection() + d.addCallback(writeResponse) + +class IRCReplyBot(irc.IRCClient): + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + + def onError(err): + return 'Internal error in server' + d.addErrback(onError) + + def writeResponse(message): + irc.IRCClient.msg(self, user, msg+': '+message) + d.addCallback(writeResponse) + +class FingerService(service.Service): + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = resource.Resource() + r.getChild = (lambda path, request: + static.Data('<h1>%s</h1><p>%s</p>' % + tuple(map(cgi.escape, + [path,self.users.get(path, "No such user")])), + 'text/html')) + x = xmlrpc.XMLRPC() + x.xmlrpc_getUser = self.getUser + r.putChild('RPC2', x) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) +
                          + +

                          Instead of a web browser, we can test the XMLRPC finger using a simple +client based on Python's built-in xmlrpclib, which will access +the resource we've made available at localhost/RPC2.

                          + +

                          1 +2 +3 +4 +5 +

                          # testing xmlrpc finger + +import xmlrpclib +server = xmlrpclib.Server('http://127.0.0.1:8000/RPC2') +print server.getUser('moshez') +
                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/style.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/style.html new file mode 100644 index 000000000000..71931d9b796e --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/style.html @@ -0,0 +1,313 @@ + + +Twisted Documentation: The Evolution of Finger: cleaning up the finger code + + + + +

                          The Evolution of Finger: cleaning up the finger code

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the third part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this section of the tutorial, we'll clean up our code so that it is +closer to a readable and extendable style.

                          + +

                          Write Readable Code

                          + +

                          The last version of the application had a lot of hacks. We avoided +sub-classing, didn't support things like user listings over the web, +and removed all blank lines -- all in the interest of code +which is shorter. Here we take a step back, subclass what is more +naturally a subclass, make things which should take multiple lines +take them, etc. This shows a much better style of developing Twisted +applications, though the hacks in the previous stages are sometimes +used in throw-away prototypes.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +

                          # Do everything properly +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.web import resource, server, static, xmlrpc +import cgi + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class UserStatusTree(resource.Resource): + def __init__(self, service): + resource.Resource.__init__(self) + self.service = service + + def render_GET(self, request): + d = self.service.getUsers() + def formatUsers(users): + l = ['<li><a href="%s">%s</a></li>' % (user, user) + for user in users] + return '<ul>'+''.join(l)+'</ul>' + d.addCallback(formatUsers) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + def getChild(self, path, request): + if path=="": + return UserStatusTree(self.service) + else: + return UserStatus(path, self.service) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(cgi.escape) + d.addCallback(lambda m: + '<h1>%s</h1>'%self.user+'<p>%s</p>'%m) + d.addCallback(request.write) + d.addCallback(lambda _: request.finish()) + return server.NOT_DONE_YET + + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + +class FingerService(service.Service): + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + def getFingerFactory(self): + f = protocol.ServerFactory() + f.protocol = FingerProtocol + f.getUser = self.getUser + return f + + def getResource(self): + r = UserStatusTree(self) + x = UserStatusXR(self) + r.putChild('RPC2', x) + return r + + def getIRCBot(self, nickname): + f = protocol.ReconnectingClientFactory() + f.protocol = IRCReplyBot + f.nickname = nickname + f.getUser = self.getUser + return f + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, f.getFingerFactory() + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(f.getResource()) + ).setServiceParent(serviceCollection) +internet.TCPClient('irc.freenode.org', 6667, f.getIRCBot('fingerbot') + ).setServiceParent(serviceCollection) +
                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/tutorial/web.html b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/web.html new file mode 100644 index 000000000000..ebb0ca8bd0ac --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/tutorial/web.html @@ -0,0 +1,537 @@ + + +Twisted Documentation: The Evolution of Finger: a web frontend + + + + +

                          The Evolution of Finger: a web frontend

                          + +
                          + + + +

                          Introduction

                          + +

                          This is the sixth part of the Twisted tutorial Twisted from Scratch, or The Evolution of Finger.

                          + +

                          In this part, we demonstrate adding a web frontend using simple twisted.web.resource.Resource objects: UserStatusTree, which will produce a listing of all +users at the base URL (/) of our site; UserStatus, which gives the status of each user at the +locaton /username; and UserStatusXR, +which exposes an XMLRPC interface to getUser and +getUsers functions at the URL /RPC2.

                          + +

                          In this example we construct HTML segments manually. If the web interface +was less trivial, we would want to use more sophisticated web templating and +design our system so that HTML rendering and logic were clearly separated.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +

                          # Do everything properly, and componentize +from twisted.application import internet, service +from twisted.internet import protocol, reactor, defer +from twisted.words.protocols import irc +from twisted.protocols import basic +from twisted.python import components +from twisted.web import resource, server, static, xmlrpc, microdom +from zope.interface import Interface, implements +import cgi + +class IFingerService(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def getUsers(): + """Return a deferred returning a list of strings""" + +class IFingerSetterService(Interface): + + def setUser(user, status): + """Set the user's status to something""" + +def catchError(err): + return "Internal error in server" + +class FingerProtocol(basic.LineReceiver): + + def lineReceived(self, user): + d = self.factory.getUser(user) + d.addErrback(catchError) + def writeValue(value): + self.transport.write(value+'\r\n') + self.transport.loseConnection() + d.addCallback(writeValue) + + +class IFingerFactory(Interface): + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerFactoryFromService(protocol.ServerFactory): + + implements(IFingerFactory) + + protocol = FingerProtocol + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(FingerFactoryFromService, + IFingerService, + IFingerFactory) + +class FingerSetterProtocol(basic.LineReceiver): + + def connectionMade(self): + self.lines = [] + + def lineReceived(self, line): + self.lines.append(line) + + def connectionLost(self, reason): + if len(self.lines) == 2: + self.factory.setUser(*self.lines) + + +class IFingerSetterFactory(Interface): + + def setUser(user, status): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol returning a string""" + + +class FingerSetterFactoryFromService(protocol.ServerFactory): + + implements(IFingerSetterFactory) + + protocol = FingerSetterProtocol + + def __init__(self, service): + self.service = service + + def setUser(self, user, status): + self.service.setUser(user, status) + + +components.registerAdapter(FingerSetterFactoryFromService, + IFingerSetterService, + IFingerSetterFactory) + +class IRCReplyBot(irc.IRCClient): + + def connectionMade(self): + self.nickname = self.factory.nickname + irc.IRCClient.connectionMade(self) + + def privmsg(self, user, channel, msg): + user = user.split('!')[0] + if self.nickname.lower() == channel.lower(): + d = self.factory.getUser(msg) + d.addErrback(catchError) + d.addCallback(lambda m: "Status of %s: %s" % (msg, m)) + d.addCallback(lambda m: self.msg(user, m)) + + +class IIRCClientFactory(Interface): + + """ + @ivar nickname + """ + + def getUser(user): + """Return a deferred returning a string""" + + def buildProtocol(addr): + """Return a protocol""" + + +class IRCClientFactoryFromService(protocol.ClientFactory): + + implements(IIRCClientFactory) + + protocol = IRCReplyBot + nickname = None + + def __init__(self, service): + self.service = service + + def getUser(self, user): + return self.service.getUser(user) + +components.registerAdapter(IRCClientFactoryFromService, + IFingerService, + IIRCClientFactory) + +class UserStatusTree(resource.Resource): + + def __init__(self, service): + resource.Resource.__init__(self) + self.service=service + + # add a specific child for the path "RPC2" + self.putChild("RPC2", UserStatusXR(self.service)) + + # need to do this for resources at the root of the site + self.putChild("", self) + + def _cb_render_GET(self, users, request): + userOutput = ''.join(["<li><a href=\"%s\">%s</a></li>" % (user, user) + for user in users]) + request.write(""" + <html><head><title>Users</title></head><body> + <h1>Users</h1> + <ul> + %s + </ul></body></html>""" % userOutput) + request.finish() + + def render_GET(self, request): + d = self.service.getUsers() + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + + def getChild(self, path, request): + return UserStatus(user=path, service=self.service) + +components.registerAdapter(UserStatusTree, IFingerService, resource.IResource) + +class UserStatus(resource.Resource): + + def __init__(self, user, service): + resource.Resource.__init__(self) + self.user = user + self.service = service + + def _cb_render_GET(self, status, request): + request.write("""<html><head><title>%s</title></head> + <body><h1>%s</h1> + <p>%s</p> + </body></html>""" % (self.user, self.user, status)) + request.finish() + + def render_GET(self, request): + d = self.service.getUser(self.user) + d.addCallback(self._cb_render_GET, request) + + # signal that the rendering is not complete + return server.NOT_DONE_YET + +class UserStatusXR(xmlrpc.XMLRPC): + + def __init__(self, service): + xmlrpc.XMLRPC.__init__(self) + self.service = service + + def xmlrpc_getUser(self, user): + return self.service.getUser(user) + + def xmlrpc_getUsers(self): + return self.service.getUsers() + + +class FingerService(service.Service): + + implements(IFingerService) + + def __init__(self, filename): + self.filename = filename + self.users = {} + self._read() + + def _read(self): + self.users.clear() + for line in file(self.filename): + user, status = line.split(':', 1) + user = user.strip() + status = status.strip() + self.users[user] = status + self.call = reactor.callLater(30, self._read) + + def getUser(self, user): + return defer.succeed(self.users.get(user, "No such user")) + + def getUsers(self): + return defer.succeed(self.users.keys()) + + +application = service.Application('finger', uid=1, gid=1) +f = FingerService('/etc/users') +serviceCollection = service.IServiceCollection(application) +internet.TCPServer(79, IFingerFactory(f) + ).setServiceParent(serviceCollection) +internet.TCPServer(8000, server.Site(resource.IResource(f)) + ).setServiceParent(serviceCollection) +i = IIRCClientFactory(f) +i.nickname = 'fingerbot' +internet.TCPClient('irc.freenode.org', 6667, i + ).setServiceParent(serviceCollection) +
                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/udp.html b/vendor/Twisted-10.0.0/doc/core/howto/udp.html new file mode 100644 index 000000000000..27272ffd702c --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/udp.html @@ -0,0 +1,275 @@ + + +Twisted Documentation: UDP Networking + + + + +

                          UDP Networking

                          + +
                          + + +

                          Overview

                          + +

                          Unlike TCP, UDP has no notion of connections. A UDP socket can receive + datagrams from any server on the network, and send datagrams to any host + on the network. In addition, datagrams may arrive in any order, never + arrive at all, or be duplicated in transit.

                          + +

                          Since there are no multiple connections, we only use a single object, + a protocol, for each UDP socket. We then use the reactor to connect + this protocol to a UDP transport, using the + twisted.internet.interfaces.IReactorUDP + reactor API.

                          + +

                          DatagramProtocol

                          + +

                          At the base, the place where you actually implement the protocol + parsing and handling, is the DatagramProtocol class. This class will + usually be decended from twisted.internet.protocol.DatagramProtocol. Most + protocol handlers inherit either from this class or from one of its + convenience children. The DatagramProtocol class receives datagrams, and + can send them out over the network. Received datagrams include the + address they were sent from, and when sending datagrams the address to + send to must be specified.

                          + +

                          Here is a simple example:

                          +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +

                          from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor + +class Echo(DatagramProtocol): + + def datagramReceived(self, data, (host, port)): + print "received %r from %s:%d" % (data, host, port) + self.transport.write(data, (host, port)) + +reactor.listenUDP(9999, Echo()) +reactor.run() +
                          + +

                          As you can see, the protocol is registed with the reactor. This means + it may be persisted if it's added to an application, and thus it has + twisted.internet.protocol.DatagramProtocol.startProtocol + and twisted.internet.protocol.DatagramProtocol.stopProtocol + methods that will get called when the protocol is connected and + disconnected from a UDP socket.

                          + +

                          The protocol's transport attribute will + implement the twisted.internet.interfaces.IUDPTransport interface. + Notice that the host argument should be an + IP, not a hostname. If you only have the hostname use reactor.resolve() to resolve the address (see twisted.internet.interfaces.IReactorCore.resolve).

                          + + +

                          Connected UDP

                          + +

                          A connected UDP socket is slighly different from a standard one - it + can only send and receive datagrams to/from a single address, but this + does not in any way imply a connection. Datagrams may still arrive in any + order, and the port on the other side may have no one listening. The + benefit of the connected UDP socket is that it it may + provide notification of undelivered packages. This depends on many + factors, almost all of which are out of the control of the application, + but it still presents certain benefits which occassionally make it + useful.

                          + +

                          Unlike a regular UDP protocol, we do not need to specify where to + send datagrams to, and are not told where they came from since + they can only come from address the socket is 'connected' to.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +

                          from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor + +class Helloer(DatagramProtocol): + + def startProtocol(self): + self.transport.connect("192.168.1.1", 1234) + print "we can only send to %s now" % str((host, port)) + self.transport.write("hello") # no need for address + + def datagramReceived(self, data, (host, port)): + print "received %r from %s:%d" % (data, host, port) + + # Possibly invoked if there is no server listening on the + # address to which we are sending. + def connectionRefused(self): + print "No one listening" + +# 0 means any port, we don't care in this case +reactor.listenUDP(0, Helloer()) +reactor.run() +
                          + +

                          Note that connect(), like write() will only accept IP addresses, not + unresolved domain names. To obtain the IP of a domain name use reactor.resolve(), e.g.:

                          + +

                          1 +2 +3 +4 +5 +6 +

                          from twisted.internet import reactor + +def gotIP(ip): + print "IP of 'example.com' is", ip + +reactor.resolve('example.com').addCallback(gotIP) +
                          + +

                          Connecting to a new address after a previous connection, or + making a connected port unconnected are not currently supported, + but will likely be supported in the future.

                          + +

                          Multicast UDP

                          + +

                          A multicast UDP socket can send and receive datagrams from multiple clients. + The interesting and useful feature of the multicast is that a client can + contact multiple servers with a single packet, without knowing the specific IP + of any of the hosts.

                          + +
                          + +

                          + The server protocol is very simple, and closely resembles a normal listenUDP + implementation. The main difference is that instead of listenUDP, + listenMulticast is called with a specified port number. The server must also + call joinGroup to specify on which multicast IP address it will service + requests. Another item of interest is the contents of the datagram. Many + different applications use multicast as a way of device discovery, which leads + to an abundance of packets flying around. Checking the payload can ensure + that we only service requests from our specific clients. +

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +

                          from twisted.internet.protocol import DatagramProtocol +from twisted.internet import reactor +from twisted.application.internet import MulticastServer + +class MulticastClientUDP(DatagramProtocol): + + def datagramReceived(self, datagram, address): + print "Received:" + repr(datagram) + +# Send multicast on 224.0.0.1:8005, on our dynamically allocated port +reactor.listenUDP(0, MulticastClientUDP()).write('UniqueID', + ('224.0.0.1', 8005)) +reactor.run() +
                          MulticastServer.py - listings/udp/MulticastClient.py
                          + +

                          + This is a mirror implementation of a standard UDP client. The only difference + is that the destination IP is the multicast address. This datagram will be + distributed to every server listening on 224.0.0.1 and port 8005. Note that + the client port is specified as 0, as we have no need to keep track of what + port the client is listening on. +

                          + +

                          Acknowledgements

                          + +

                          Thank you to all contributors to this document, including:

                          + +
                            +
                          • Kyle Robertson, author of the explanation and examples of multicast
                          • +
                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/upgrading.html b/vendor/Twisted-10.0.0/doc/core/howto/upgrading.html new file mode 100644 index 000000000000..3719ce1b77b9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/upgrading.html @@ -0,0 +1,331 @@ + + +Twisted Documentation: Upgrading Applications + + + + +

                          Upgrading Applications

                          + +
                          + + +

                          Applications must frequently deal with data that lives longer than the +programs that create it. Sometimes the structure of that data changes over +time, but new versions of a program must be able to accomodate data created +by an older version. These versions may change very quickly, especially +during development of new code. Sometimes different versions of the same +program are running at the same time, sharing data across a network +connection. These situations all result in a need for a way to upgrade data +structures.

                          + +

                          Basic Persistence: Application and .tap files

                          + +

                          Simple object persistence (using pickle or +jelly) provides the fundamental save the object to disk +functionality at application shutdown. If you use the Application object, every object +referenced by your Application will be saved into the +-shutdown.tap file when the program terminates. When you use +twistd to launch that new .tap file, the Application object +will be restored along with all of its referenced data.

                          + +

                          This provides a simple way to have data outlive any particular invocation +of your program: simply store it as an attribute of the Application. Note +that all Services are referenced by the Application, so their attributes +will be stored as well. Ports that have been bound with listenTCP (and the +like) are also remembered, and the sockets are created at startup time (when +Application.run is called).

                          + +

                          To influence the way that the Application is persisted, you can adapt +it to twisted.persisted.sob.IPersistable and use +the setStyle(style) method with +a string like pickle or source. These use different serializers (and different +extensions: .tap and .tas respectively) for the +saved Application.

                          + +

                          You can manually cause the application to be saved by calling its +.save method (on the twisted.persisted.sob.IPersistable +adapted object).

                          + + +

                          Versioned: New Code Meets Old Data

                          + +

                          So suppose you're running version 1 of some application, and you want to +upgrade to version 2. You shut down the program, giving you a .tap file that +you could restore with twistd to get back to the same state that you had +before. The upgrade process is to then install the new version of the +application, and then use twistd to launch the saved .tap file. The old data +will be loaded into classes created with the new code, and now you'll have a +program running with the new behavior but the old data.

                          + +

                          But what about the data structures that have changed? Since these +structures are really just pickled class instances, the real question is +what about the class definitions that have changed? Changes to class methods +are easy: nothing about them is saved in the .tap file. The issue is when +the data attributes of a instance are added, removed, or their format is +changed.

                          + +

                          Twisted provides a mechanism called Versioned to ease these upgrades. +Each version of the data structure (i.e. each version of the class) gets a +version number. This number must change every time you add or remove a data +attribute to the class. It must also change every time you modify one of +those data attributes: for example, if you use a string in one version and +an integer in another, those versions must have different version numbers. +

                          + +

                          The version number is defined in a class attribute named +persistenceVersion. This is an integer which will be stored in +the .tap file along with the rest of the instance state. When the object is +unserialized, the saved persistenceVersion is compared against the current +class's value, and if they differ, special upgrade methods are called. These +methods are named upgradeToVersionNN, and there must be one for +each intermediate version. These methods are expected to manipulate the +instance's state from the previous version's format into that of the new +version.

                          + +

                          To use this, simply have your class inherit from Versioned. You don't have to do this +from the very beginning of time: all objects have an implicit version number +of 0 when they don't inherit from Versioned. So when you first make +an incompatible data-format change to your class, add Versioned to the +inheritance list, and add an upgradeToVersion1 method.

                          + +

                          For example, suppose the first version of our class saves an integer +which measures the size of a line. We release this as version 1.0 of our +neat application:

                          + +

                          1 +2 +3 +

                          class Thing: + def __init__(self, length): + self.length = length +
                          + +

                          Then we fix some bugs elsewhere, and release versions 1.1 and 1.2 of the +application. Later, we decide that we should add some units to the length, +so that people can refer to it in inches or meters. Version 1.3 is shipped +with the following code:

                          + +

                          1 +2 +3 +4 +5 +6 +

                          class Thing(Versioned): + persistenceVersion = 1 + def __init__(self, length, units): + self.length = "%d %s" % (length, units) + def upgradeToVersion1(self): + self.length = "%d inches" % self.length +
                          + +

                          Note that we must make an assumption about what the previous value meant: +in this case, we assume the number was in inches.

                          + +

                          1.4 and 1.5 are shipped with other changes. Then in version 1.6 we decide +that saving the two values as a string was foolish and that it would be +better to save the number and the string separately, using a tuple. We ship +1.6 with the following:

                          + +

                          1 +2 +3 +4 +5 +6 +7 +8 +9 +

                          class Thing(Versioned): + persistenceVersion = 2 + def __init__(self, length, units): + self.length = (length, units) + def upgradeToVersion1(self): + self.length = "%d inches" % self.length + def upgradeToVersion2(self): + (length, units) = self.length.split() + self.length = (length, units) +
                          + +

                          Note that we must provide both upgradeToVersion1 +and upgradeToVersion2. We have to assume that the +saved .tap files which will be provided to this class come from a random +assortment of old versions: we must be prepared to accept anything ever +saved by a released version of our application.

                          + +

                          Finally, version 2.0 adds multiple dimensions. Instead of merely +recording the length of a line, it records the size of an N-dimensional +rectangular solid. For backwards compatiblity, all 1.X version of the +program are assumed to be dealing with a 1-dimensional line. We change the +name of the attribute from .length to .size to +reflect the new meaning.

                          + +

                          1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 +10 +11 +12 +13 +14 +15 +

                          class Thing(Versioned): + persistenceVersion = 3 + def __init__(self, dimensions): + # dimensions is a list of tuples, each is (length, units) + self.size = dimensions + self.name = ["line", "square", "cube", "hypercube"][len(dimensions)] + def upgradeToVersion1(self): + self.length = "%d inches" % self.length + def upgradeToVersion2(self): + (length, units) = self.length.split() + self.length = (length, units) + def upgradeToVersion3(self): + self.size = [self.length] + del self.length + self.name = "line" +
                          + +

                          If a .tap file from the earliest version of our program were to be loaded +by the latest code, the following sequence would occur for each Thing +instance contained inside:

                          + +
                            + +
                          1. An instance of Thing would be created, with a __dict__ that contained + a single attribute .size, which was an integer, like + 5.
                          2. + +
                          3. self.upgradeToVersion1() would be called, + changing self.size into a string, like 5 inches.
                          4. + +
                          5. self.upgradeToVersion2() would be called, + changing self.size into a tuple, like (5, + inches).
                          6. + +
                          7. Finally, self.upgradeToVersion3() would be + called, creating self.size as a list holding a single + dimension, like [(5, inches)]. The old .length + attribute is deleted, and a new .name is created with the + type of shape this instance represents (line).
                          8. + +
                          + +

                          Some hints for the upgradeVersion methods:

                          + +
                            + +
                          • They must do everything the __init__ method would have + done, as well as any methods that might have been called during the + lifetime of the object.
                          • + +
                          • If the class has (or used to have) methods which can add attributes + that weren't created in __init__, then the saved object may + have a haphazard subset of those attributes, depending upon which methods + were called. The upgradeVersion methods must be prepared to deal with + this. hasattr and .get may be useful.
                          • + +
                          • Once you have released a class with a given + upgradeVersion method, you should never change that method. + (assuming you care about infinite backwards compatibility).
                          • + +
                          • You must add a new upgradeVersion method (and bump the + persistenceVersion value) for each and every release that has a different + set of data attributes than the previous release.
                          • + +
                          • Versioned works by providing __setstate__ + and __getstate__ methods. You probably don't want to override + these methods without being very careful to call the Versioned versions at + exactly the right time. It also requires a doUpgrade function + to be called after all the objects are loaded. This is done automatically + by Application.run.
                          • + +
                          • Depending upon how they are serialized, Versioned objects + can probably be sent across a network connection, and the upgrade process + can be made to occur upon receipt. (You'll want to look at the requireUpgrade + function). This might be useful in providing compability with an older + peer. Note, however, that Versioned does not let you go + backwards in time; there is no downgradeVersionNN method. + This means it is probably only useful for compatibility in one direction: + the newer-to-older direction must still be explicitly handled by the + application.
                          • + +
                          • In general, backwards compatibility is handled by pretending that the + old code was restricting itself to a narrow subset of the capabilities of + the new code. The job of the upgrade routines is then to translate the old + representation into a new one.
                          • + +
                          + +

                          For more information, look at the doc strings for styles.Versioned, as well as the app.Application class and the Application HOWTO.

                          + + +

                          Rebuild: Loading New Code Without Restarting

                          + +

                          Versioned is good for handling changes between +released versions of your program, where the application state is saved on +disk during the upgrade. But while you are developing that code, you often +want to change the behavior of the running program, without the +slowdown of saving everything out to disk, shutting down, and restarting. +Sometimes it will be difficult or time-consuming to get back to the previous +state: the running program could include ephemeral objects (like open +sockets) which cannot be persisted.

                          + +

                          twisted.python.rebuild provides a function +called rebuild which helps smooth this cycle. It allows objects +in a running program to be upgraded to a new version of the code without +shutting down.

                          + +

                          To use it, simply call rebuild on the module +that holds the classes you want to be upgraded. Through deep gc magic, all instances of classes in that module will +be located and upgraded.

                          + +

                          Typically, this is done in response to a privileged command sent over a +network connection. The usual development cycle is to start the server, get +it into an interesting state, see a problem, edit the class definition, then +push the rebuild yourself button. That button could be a magic +web page which, when requested, runs rebuild(mymodule), or a special IRC command, or +perhaps just a socket that listens for connections and accepts a password to +trigger the rebuild. (You want this to be a privileged operation to prevent +someone from making your server do a rebuild while you're in the middle of +editing the code).

                          + +

                          A few useful notes about the rebuild process:

                          + +
                            +
                          • If the module has a top-level attribute named + ALLOW_TWISTED_REBUILD, this attribute must evaluate to True. + Should it be false, the rebuild attempt will raise an exception.
                          • + +
                          • Adapters (from twisted.python.components) use + top-level registration function calls. These are handled correctly during + rebuilds, and the usual duplicate registration errors are not raised.
                          • + +
                          • Rebuilds may be slow: every single object known to the interpreter + must be examined to see if it is one of the classes being changed.
                          • +
                          + +

                          Finally, note that rebuild cannot currently be +mixed with Versioned. rebuild does +not run any of the classes' methods, whereas Versioned works by +running __setstate__ during the load process and +doUpgrade afterwards. This means rebuild can only +be used to process upgrades that do not change the data attributes of any of +the involved classes. Any time attributes are added or removed, the program +must be shut down, persisted, and restarted, with upgradeToVersionNN methods +used to handle the attributes. (this may change in the future, but for now +the implementation is easier and more reliable with this restriction).

                          + +
                          + +

                          Index

                          + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/vision.html b/vendor/Twisted-10.0.0/doc/core/howto/vision.html new file mode 100644 index 000000000000..8b763b12c592 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/vision.html @@ -0,0 +1,43 @@ + + +Twisted Documentation: The Vision For Twisted + + + + +

                          The Vision For Twisted

                          +
                            +
                            + + +

                            Many other documents in this repository are dedicated to + defining what Twisted is. Here, I will attempt to explain not + what Twisted is, but what it should be, once I've met my goals + with it.

                            + +

                            First, Twisted should be fun. It began as a game, it is + being used commercially in games, and it will be, I hope, an + interactive and entertaining experience for the end-user.

                            + +

                            Twisted is a platform for developing internet applications. + While python, by itself, is a very powerful language, there are + many facilities it lacks which other languages have spent great + attention to adding. It can do this now; Twisted is a good (if + somewhat idiosyncratic) pure-python framework or library, + depending on how you treat it, and it continues to improve.

                            + +

                            As a platform, Twisted should be focused on integration. + Ideally, all functionality will be accessible through all + protocols. Failing that, all functionality should be + configurable through at least one protocol, with a seamless and + consistent user-interface. The next phase of development will + be focusing strongly on a configuration system which will unify + many disparate pieces of the current infrastructure, and allow + them to be tacked together by a non-programmer.

                            + +
                            + +

                            Index

                            + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/howto/website-template.tpl b/vendor/Twisted-10.0.0/doc/core/howto/website-template.tpl new file mode 100644 index 000000000000..cf742e8e98fd --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/howto/website-template.tpl @@ -0,0 +1,22 @@ + + + + + +Twisted Documentation: + + + + +

                            +
                            +
                            + +
                            + +

                            Index

                            + + + diff --git a/vendor/Twisted-10.0.0/doc/core/img/TwistedLogo.bmp b/vendor/Twisted-10.0.0/doc/core/img/TwistedLogo.bmp new file mode 100644 index 0000000000000000000000000000000000000000..940ede07ea6ce925effe24bc77b7bf798abaf766 GIT binary patch literal 55494 zcmeI*1F$5?76#y1+qP}nwrv};wr$(CZQHhOd-uIBHse+FD_7&roqOv>%tUo{R#xUY zc~1UE*$Xc7$ZLK6{hQVQllym+e+T(DK%de5->1)UzTaS|@bBLn{`*h;S5u&-Kuv*~ z0yPC{3e*&+DNs|Ora(=BiWK|%P+rld<69iaw+i6JMS#H>T*kD*P{`lkHa?34$ z{`u!a4?Q&1R8wVPhE(R!dwu)%{rTsg+cvVgqkk#z*kg~)G}BCtG)L+^_uTXQ@4x@- zv(HvrZ8fT_@!P`dIp>`7*=L`vsoNF`{QmpzXPj}y2qTP;Bs;z=u)qSZzy3NfaQyMd z4?FCz$u>E6Mjdt3i!Z+Tk3ars%Q$;&C!c@*`5J4iG4Q|x=a3rwzT%22Y%vc%{O}Aj z%#hPE`IVkG+ibHhzWAcoQn;0gbMM@L|NT==Ic1Wt$vgBODE{rY-+uMgR~u}wK_$GW zw;5-g@rfs%Xl1m$@LBHbV~#n7GbhQ8oIA74Hk&Pm?xGt<8fm1QmdUS%AAb1LPCLze zdm*)(nDR#-eYC;~E9C39B$2}%dJi~<_nv(6$?2w>t`gqkFo!)p{q)l&#@u@zq2F%1 z?Y0RgoG^<8W$sgSTWqlfpbqkFv&}YDc|FAxQ`~poeMZuI$XcR^z9A%VI^4rJ09d5YcILvFk7vmlL3`JK`O)V~?HFGWnIU?Xt@*KmGJm z@06f7-gsl)dFQR9f|4{Wd57LR_Sj=>Pj9~Y=G=47UGk~WjPxzK=%T#1y^2lTI}q}S zBaT>!RxLKR7*<|+WqB#QsRtZz0JFDJhemtprI+GzZ{QRXZL-NGoz`zz^r!c@XTv5a zPLb6t^}diI&D`S?B)D9dmZ=ozvYWC~_SRy6sIC)2_SjDl1H&aGiD5sp>E{-@ER*tG{p@k$M_F zWbbs89YVC7(4q@2xZq1Ky(CabgK>aYnu0{Ku)B35I9$gGp9Dwy60 zX&rs^(YX|`iM3lbP-yNbfDYY|P4t_@4_dv_!fT@O#1l_!%b-%8dg>{5sG#jCtE|%Y zRY4DMDToq=q`s>C%P{A^?N$bqB~6oeTE7Xfg2x|x@PREuF$={)FUbuSAv*HNBdaEE z6)q2WFG`R&_uO-<1`1IFO;Dyg2-FKNyuf{3IOP@_;0XQ387uC#@WKm6y^-9jufAI1 zRugzE>LllaEA_0i&SK|2_uO-{%rZ+(%haz>27US=n~a?MdbH6-OC4UjZd%U@^yC;| z1b8fqIMy@du*V*I5H87hYp=a_RXi5!E%}kBh!Yst`|`^#x8HvIs*WS{32880| z`DUxFwyK)RmZU)b9sMIa(}U8W-?HeBk8{m67d#fcf~iTZ`7bK`RwnbKk3MRPx#W^d z#vF6Z!uE{~%mS@YtjpMBzm^$g%qY`1lD1z9%*hb8-3ZLlZ-pDT#I1nG5R;dt?Zcz@ z0CITm)mLAYE?%jBjZqBUy1;-kLb-yd*LIG?I%MoW^2j4*BqbXdj(wZ;k)Xk25*VmH zMHW@{QdYOoeWYG<%{2uz^uN-tws2XZ&$7!dyT~Go6t=|o2OoUUzQ*t`Q-of1T-FfGoO*p2OUV3R1 zZbhK(xZ@6afK`WGwSBM??WDl7Y=J_uOGpTZWAFv37BBQ~*plEe+Q3ml^Uptj)!`Mk z=XzydlL*20rIuQ%aQv|jQJ4YVG@H7>K}v#-IN}KUO=+X^V+X;#G=s`Yqar>M`1jp+Uk`HP$ss9R!0=e`SKNH_&27wnvCKj)D_fgKksgjs zFz2HgwFTo@u`Sk{g_rnF8CasA0upeVMZNB;Dlk!TS z$YUX};W(lvDM@UlmvYuaYyepwQO1s7VE&(IqKUGaCGVT#*=L_^*iB1N>_Ib9M^Ly> zlOw5gN1{#$ShHTIo_gx&SxGx;UD1IXAr_pNwDpeh_;5w14VkCzjet6+RjR@P^1p;zyLCXLh5fnmIFR-5qw=A!pfBeY@?p zn+G*6<5{)l16%EGVp3H{iz5debP%cMhc5#!qyCLk30p^#{Dtfb7iu(~<}OGpy~m|* zqv0;_KUG%7p2b0WPlk_2PCW6%!g8}HVfEd2--YDMYYFmBvtf{Zt&)Co+Q4O(T?V(r z)KuF)TBvq98)b7WDDxC@G2Th8>BtB*wK7kIHo!petd_~^^CF2`c!aI|8-;#IVy;$K zG9EoCb)mf7YW)Do)=%5nk<}sUnNii0jFNOlhj3;Bla^ZWvE+kMGg}Np7#VFk6lNhV zwVj1Sgp?^Y9wr%e7%m7V{RVHqmWwU6Sb4bvT-k9Q)o-bsvV$wMcd|kGp~j}1`jIQ( zoO8~hMs&_a&~)b>RA0%n6eKYT6k~5E5#fXfY)#b?FbpJPU^m@#Q>CgyOs5jth~HA# zk#p0|E=JkJQjd`+d=#e(rwVh2w#9ytgg{jytor4`NH?}Z_0$n9ar)_}M-Fppiv3H! z@ym%b7P>W}sd{h25TK{v*F-Nq7DqRTS3FOfRlu6Y*-jXk( z_t4xcF2DTp!VDjJFNC&nmdYiDS6+E#s$G`#cC+B2vK^FP>Rr>)LS>;8F4V?((FPW$ z+I;HegjJ2tR=p0M3NPZhh3)enCogoP*b~x8Xe`el{3W*CIrrN_l}S~yb{jVx zKo`&#tFycs$tQ2lw^Zm_Ypun0fXzgd3XwC+FSCv^rzPx_IX#E3C`RrbFurSqqLsHW zz11-00|{XlWlXn4wA_`AC?X)~gC}{wN89w;9?z-y&dmJ5GZ@qOEU@(5= z>MZV7=+?#03L+nT@WEL#Vq8e|v7hpSGWH|tb=d{Wj%ua%45N&tBg;tcw-uF|xE1y< zsIQBO-1_V6m=0))xxxf)>{(QeloGf!U)DOxGI~$Z5JYV-A`6AvC_N^n$GQUW8jsD9 z;>Zt7AGS`he$|7D(I}+CwIwQtcbr{Kgq1tNGHR9w(l$!(6&?iu;i&4xR-AIO4ex>I z21)E!v~A;i={=T$qzC}XqE*LT$&KF$)-?8MFjOwGJISO>L@A{7*v@D_Q)?rkxfl-R13lP86(=+QW8<-t`YjUpke=)g>%YY@1UG)6^EpO@td9f-un*_ti!mV)i{7@9 z)xOewTMVXiP}_7U=R!ym9NP3`izMo|kK2Q!OBGJGBqf3QPbuLDzWdfHMu?bW1c;ang4;f@kBuov*G9buU=vcN z9KA=raz^MYdQSwZf6;qTzfv-!Zq9v8^CY8C56Ud{(@}$ma4qrh8u!_6jp6uA1^v_!%#UV?Mg;9ouJ1U zM1^B8c%E1Z6-dz!8IkpBW}W)1Mhdrh4HBrx^E$rD!o53sSI9X5p#_YDjiURlTo6(T z2}S~=7REKa$hH_po<*yEb>9x_Fzre?JcV>~goPjuf{PId0L9wNQb4`fO&`I(?Y7&F z-V=t{Xrqk^w~}NJu{(UK&J{F_GKZhYcIf7Z(#Qu`Vu>YMCrd;nfj`;BgjCN_RRt%5 zkh(Z2OE#;LPNX|jGhJ}O1%jJ=Q3N z$*eAu6(;aUJh7D@BOf1{ojLMLbQ794F%0Wau38zgUXikQ5ht$3a6`D~F&lMRlbZsxS#0XgTh<)gqPx%%&5_g&MGcAvW$8TtfEaRv@F4&mjc;)cGdnkiqUO*pG=(@cLZxkYqjuBd z0gcLpq+>+05r?xGt!ihWr3y&nBb-8z9-U@)$Z`s~A7Za`Vi-}t*i=!6- zd-Pk6Ih44Or`BhsGwzhgg%TmFI*Z^D-J%O@)>6QZIKAT6Bq%CG zSQN|J>A(yLnMSSr<7#E}$` zl7MvY_v_)ZwRjZAN}z$r^t)G!bf2^ zf+^0Jnn$6HD1%^!orP{pb%s;9l4OXpHPajLTgin7^k)4=UeHl|baa%!M1;q(E(9{7 z<23b&Dan?$E$%4-^8EA9N5PI*Gi8ZfPu^Q{3O%?PBG^h&>ok#9Gyxg45jxDm12NkA zNQqHXoB(th<(8_`=)G_h8@pATR7?o(!Gzo+J(&K5Q$ru_Yq}*#XY_GeDdnaOAeY)E zAT+E@_1o~fhBEM+T3tp{h-mYEWbK1+W2ojUoK6pGK+nTqS$alc<2Bb@V>>`VI4#LR zMlOrS(M-O=O(;SrA<0Lg=NKs46X2cYK)~qWxOOyXRxXpRO%93i^e`^uOowworFN>5 z0iB(a6nr@RY3QG15QOaDCN?P<^euh~g+mnVnxsJKP#zb13RGiHW!0={1IQ6u&Tt(j zU#cH|ydLR9O>)XkEzpyy|4~L*2QjsAtYYduqK)vY_)en>0s)B|PBDR#n5l9TFrcbG67U#Tt^)VBMG-tx?3ot~Wv)dE#hzXC|u z$h52NDYbBeL{oR9Ti`XP?$Vm!8z-9P!JhK=2>rju?nY#FG3Sn*2QQSV{D~ozr_nE_? z@HTi1l5e4J`FRL6?V2UCI`^RS01Y*DvGsm(sCuB^ z1P4iFvKcfI7lE=ld!9n>aw zA)71fU;%RBgcQUK;5sj#dJ*sL`lXiF%9M-=u* zik>?>ejz5#t(pbJbp>I{dyIxxUjz!kasY5D-S94@pOj?a08R?Wjc+Hno3Lqd)X4d&IY$BOFFqrg%*wGw~jEF_(e1*k)2)wfT zw+*GHH-sKAH@qTsc=gSQ0*YI+upG1|lbpoC65`rfc5cKn7an2riXQS3h;d{+;=bRA z*KIV6`XZkK!by0MrT3h4;VcCKSvdwt|4$=>m{<0F;v-oRRA~IF@tW6;>xm-GA65Nr zi~<0agKJWE7Ubo3MSMU4x^6tJJ)%53A4+IZUOHT=y{s`c=sZ9X-WtEf=wJS$JVurH zsaTGbIo^}?#J?JgUod}|R0NIgqQQ~?FR&6HRm!l@%8#dRnMwg$i7-T}AmE+2wy*+r zNB|CTKxUMEpNN4L9CWGt#UplZI1V@UrS;7wC;(AsozKr}${NKK<&fM0N++;P7G=&> z1?DMjZekeqBl#5Ib5_Zi3e4y|zhWskt_SX&oa3{oK_x}(-28Xd-!@GF06@A(*3r0f ze!#QFQiupbgbNmylLVT6d;MT}3WOh$$l7RPd*#i;>0I|;Q=q0mO@W#MH3e!4 O)D);GP*b1>Q{ca6luKX$ literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/cred-login.dia b/vendor/Twisted-10.0.0/doc/core/img/cred-login.dia new file mode 100644 index 0000000000000000000000000000000000000000..f9dfaa7a11271b39c4408596bcc002b6f7012fca GIT binary patch literal 2369 zcmV-H3BL9piwFP!000001MOYgj^j2GefL)oo`=lA-#?DN^L+NL_?gFS3B)JDME>_MUZm;j+sS0J*}!0% zur!Kc;ICoAC;wtWz$T*6Wc)Y+U|)d8QYNnzS6Q0I{%oCc5V9qI7|)pdX&y)G&>QEi zigs=kL@{_~!Nd67Q}!B93Ne!dIY(r#*qqN|&OSAZM&ji-ZA7nlTo$`rMTsw(q}x@c zNrN2ud#_E=DiMvs`Qy7E^mq9!6&H50>Ta}ANxEe5+z+dI#MlJ+KoDpXLSDwrZdF*?~4e_8wgOrW$rHkojMZ=X#-zt*+>?i&#;B|NJhiPx(xBZAOKb;JX zcOUOK8**ulaPQgN;Kt=}KVSPEPfkp;!!`|)7DcnksZlStJ9l96OC|EnR0M3x<3jw8 zJK6=s11=C}Zxo{0Ke(F~-TNtsHtvGODYyeaMxVz!H$pfOzV|Tx4IPe8xko~pfWFEA zt)ks5q8cuDxC$3dvS+ih=vdr63+DkZ#1rTk8WO+m34+MbD71`r`gp*XQ#XpkD#Q2E zpGt*zxpAsfql{$_OV*;UpW30nOgS8u=RUP zAtjgy-@tN+-cGX{h2fNkUe=BCJ6B5)1k#ppC1!`aDl4yx%`f7I(3`S2jy7e%yUDnp z2WusI8t`y_jN4ix_)f(M_B>mIeAHq_i8-HCnBnCeun<-vF%^$N8f76d*u7gr)fJ%^ z+TtIQgsQekyV|0y)D&%Dgu?z1G|Qg(TP0}>!LQNW55Xwr|6KDV9r(I$zP2^|bx}2X zeX9N;c5Bc<@66OTBw6-MC0$$4k~&P|9y>Bhvswpf=aY1`Urmi#cHLda&wT3niRh}! zn_1}&wN6b6(Q&GZx%Y&tA0K|0UWxjRuEuR>hC;J8G#yeK z6JgalB&Ka^Cd9UN11C;Ktk&*zdC-5f2)yvhZC4BbIyuPl9LIGZXU{%(g%b zDUY9+%LiwnZ)f3Pk`B&7FU~>(pqO0&g=)4!K%v4hC;$aRgax2D(76dfp%DCyrx~6A zcfs9HJO-mBOWlQd8>9=q4>f-o4QQeaO%7)0fF=q<536(NK{Vkww6^c_LJtfb;!sWe zHL*>lr(z?7rD8`nEY(3swTCs0bQv9CqBaT;+<{cn$;TF6D6N2CO9f83O=-Uk_&A!16P+CjcxtNrQIG(Bm zi>#V7t0}!Fc2-VmDlRAO(4}+gNezYdq;#k!Rm}jui`!ieu-`lE8_4wddAg~YP3CIdoS*RN8pg2}OPP;!Od7Pig}1~IfPMC~C> z{AL2d%PLi{;wse!3#UD76mp=-Jh3n^Om$0WL6}fOmW+#VPnNBh$r&UFup(=H6dGLi5P$jfOLI(BrT+CM0Fu%-!Vd&a;JG7NMH-gXY1uJ{8ll1?Xs@@vLaq1i)z+z1qkp}lh8lY=tH7|s zXf@ZZ^?%@dARu^};a*kE&d@rHShYOvxOZ{!w-nSe)=6HwgxN~m_TkG0s+SGe$>^Jy z&;2Cja#!}oPZz}j5AfykhrHqY(eBkqa7W?te3I)-b8YUUmg-KET^m(Z!mGg(VTW0A zv%CeXjlw$iJCpy~sB7ZDQ!IF#ZR?rv#J4Q$vtdj2!@aOywwu~i&wn`MKV3|ods_sg z8n((|sT%t0+Q0J%(1BDr1Cr8lSZ|g9zfOJv1>qEj#Q*YV!ec2xo<+2YV(TUH~eG=bD{kF7lP!d+-#pq^W%o}XE=;NCz$ zS0o@E%F+(!tB!tNpMXxUG!WVc0a1;RD+E764Tf^bK~5e*JCV=z5$M1F{?Gq0vXCn| nww0MUWK`N?N`B?b0z>|9_N{x5ql2&IdmQ}_QZqs5igy42VsD*Q literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/cred-login.png b/vendor/Twisted-10.0.0/doc/core/img/cred-login.png new file mode 100644 index 0000000000000000000000000000000000000000..a27dff4b27faeeefb5523d7b211bda07acec1941 GIT binary patch literal 34148 zcmb5Wc{r8t8b7)Wl_XJ;jLAG?$WS4KMCLJ5q>!;>4jDp-5<s9+UOn!dFNNDHo>FL?Stu7)-e5Hz5 zDtLH2d#!qDsQT-~gte{h)cm~P&$*tTt!HLbI$x3LJ&mB!keE_pxRLkEhT{1ODM7XL zVjJr*7b{g&)hkyPpIFv?d6%eocB3hlXU9IF{^AR#6cy_pnU=H4c%{dPxGmJ`FCM%= za63HVDb^F?D=Wv#+xU+75CJrRk);fIE#c5@WMbk{VP;`5^!BZC&u1D1X^*)lB_)RsAEuR#w1h}4fa#d?12U7jC!QCV4;BJHV}BpzP5&gQ?)UQX2$9X8HltGhGqt|ZI7 zzVgfdKj(~3pFVx{YDGrI$V^wk1ihNNy1JTLOG}H-+Rv|VZ!v{0YNg3)sH)OZhR{XO z?BAc5nwlCD6O){5YGkBxd}zxKdL?CLNpW#bZftCX6BNTlB^ld(KF9qx&`XC|n= zBE(LRxKMm|pw&vv<>>wRczVg?TQs{VL(GKApFTD4sH(1(m1Wln(l=;sAOuMM`q7~k zx|4~Xp8f_+FZL(4#kc+nMV;zXQLOv-?~jj<*Zt=bH4MhW6`ATpijCf#FMe~rSl>~Q zUX7QIKl0XSLsmsaV%>;Jx2ojO`1trC(`TGqTvsak|E#U7t*+j=rT^U4O78rUj`$cM zH+bnqqNA#`v~Xk-<(0Y@K!=uct%FX_)~jDMa7vVt55lV7v<5Tv181ddU|_g z%?u1qMtZur3G(wlxV`^R>6x#?!v{Rgts)MTpLyh5U4Chxq_{YPAnlrvuAB>d6#{0RxMt7bJ0*>RaH12FyztTPWEfmMYC)P$fsFjmJQ&$LTTc1q+Y~v zw)>G_(Q8_OrJUankgu{fB(L^hDL(0h2Ppwr#SNUsSlYtnab1C zdCL|?qn~I*`t$U1rl>Po{eDm9)JN~1b94D}CCu6WB!xc5g9^aQQ zU*>3~ZG34>UK#ZH*1+bU#gM0+?DuAvl(pR}F)=ZgN9%oS>(_I~D=I32w(fNC^pv~y zTl~W>Z88>L>EIi>ZAA~|4>Y2b;8eG?6ciLhgoSC>sY@i$_lF<+#lh`)CH>4!@ve^# z)i^jfwvfKUr!TG@mCDV?c=hhxT$|h`vy_{ymDM=$@1n&eX>X_5 zz5BYSYv?Yv)WpQR82h|e(T%?R{mOmMoS5YNWc(Q8d)qS7vHByZ!Q=4yT3hjuD!sfTC-PqVq$`p zmbM{^E#c41^6aO_k00l!u(X@C7it-@*Y!sAe?VRu^_%J)dUiHQj* z8QHB{x11auJ-!VdTD~pT)Vn}Z;ypS#DkCF9MMY(1W>&qq;d6dBeMHxy=97mHCmyOL zyib!Gdg-%*X5{1LRV!AM&hkF>`0U8Y74#qUurxXU4eU(R*O%VQuWP>cm47>9aqZ#j z9#s~zH(n({DwZU&Y2Utm6ZR?lGBNRMb@g6SlE?H%>yKHgd-v{D86@F7=KNzyXWzbk zWMo0Fg0|&6e5mtD*n0APN_TfRP96}2qn%xJzS&IXoN&d}MM>QdK${e`!xnFCyI{o;M%ghUaK!lk1}qEn9aLdhkHk_oWF9V*fDW@+)6SzB|g5pSY+aW^z%mQ zgCScQ`PEOKW<2g)R$MGvomH~;^fgWdvIZ4;bXH3g|kNUmqo?9=om_DdSBkc z-<-F$4xu?LCMNB-{@h3~-n1}3-~PjclPnQBvL!(ga|d*d0t8!1f`aNocI>lvaHv1O zhz3U|8EiEc*EAZbvRgm)&v}~OyZ>6K#|C7}?6{1{$#Tb@!v~`Tb*dlnIZoKza&j2# zm$WQ4I&tEJot@p-+gk;>x%XtR{jR$D-QM1QoL=bAp|j5%jd@N!*j-qYn3SY>|EQ?6 z^ovKap1S9fa z`~!fswAz4sN5+2n`pL@^5?Y>?l)Tb`-^jZ^b56IA7-$ z7McNp++kPjJ#zJ8uu{>pOPNeZE`9FlVGoYHb!!)^j7oQ+A=$r|zhSI3`N*y!5q|#D z78Y!5Y)nkEGc(O2&fY)guC6XvK48rw)Ao>`;V}NA8{#?NA3l`KLPJwK2_$S)!{Fk- zpHBH>_R#(c4ude-P!d86!_sU&b&*94_h2701B20-Gd#b(CW>98QJ9~fCk>Q$ks(XA zr>a}^m0c7Fr@49iwuhUW6}J2t+&P=SFBHwyT@M|%$zKwssv{>oVPs?!#d`c*Yb$`N z;@Nx09F&_>PzJ&x6EjamalfCie;64VnR?vE{?Z>ta?Jp;^uuTlHGh8g4ZifLFq;R; z*h;VTP9FGSIUR{I+|C! zgwIx%WoKs>7OETMjdq#UC_h+c+d>#`zdnFN;JUgnLcvZSu@`;$;)AnK&XHW{@9ONN z2}yM2+&!|Ni3ub_mV+aRLHJiY$jVs{4@uekiHUX>FP@@{Exc8DFDOXS&@jHUFs?9G zh_amaP~!XPkNfdC#x>mM-&hg?eu6#d=;++Iaf2cQOCsyFAb&V#q{O-6f0; zi;6d>0BW#_`Yrtbob+YdvnNO{>_K?pqeqLezbG=$4jmk3CHIy-eHy31^Ux|qy`!^} zj*(I7QvY-8Y<9&PGCn^Y0T3M}ljolYJPXWb4A2}O8Hv7o_s7@rp%SA)5WA)#!$&43 z&*-y^|AooQGMi&WfSlkh7+QF*re-sSOAQ4Ndo!#dw2@M*2SjURdb+}Y(@!$_rCmpM zRu<>3UAu@V+PaD}b>h;;88r#TQjEaT_ByLf}m?zzRvWL(b zlbTu$AS&%Se`oK)(h`oz{*8!Trw+!dekst@)cm=!($?5G?0ghOdhq#`Ow@`2k{EkW zLz@X6#|A+uj&S!Ec1ol62HDOegkgSue#uMyUZZce&#ch@S^i|PTGuVlK|y{UXA-nW zkC*P}*S?i^iDKVsHf0A+xYK_rH<#LiGMMhYi%r(e#RUwQS=vJ|dPw^*Dro*8{`M32 z)eT@a5h~N-^CwTAzG?Hc16&)&>g)^fho`d-PDUgpC*%13$_Nicb6!|*R#Q{ku_FTL z;Ix52a&(fTGF|L};NH8UCNYkwb0FJhZr`D7xML!dqL_lB*>^=~gy2KZjqDRmp?Y zgoM=B29m|S%RN-ipt*EXekMCrZInPTCJj6wA+h(i(wDmO4*)Y)WmpG_%XSt!M8CAU zAt)yF zbei9o{VR}{d}wEPH><4gQEu++?=~l*fBeRoA=^qNA}&5RRI_=D=?I@CO=yqP^R6cboG&tC8Mlj4e`m}e--Md!! zK6mc4_xCfiv0Zj>SQ~HJGovcIu{tJj<}og|`_re(1C{P?!f4-^eQ8VcAFd75JWHdR zwB`^__cHqLJRQt3_Fvb?Lx>^FBlbM+r5wRm$(CnWp3DVzupB^pE zM(9;aX{~*JWefm1EJ`868I=k+*i7itr%&a@#X24_sOS)2820ZsHZ}d$`&7*`H)acG zthtYm&*s|DCU>>e)upVcs9npPF*sQ-+@=+LBO@Y`MJ!3(Ir+0HG~AQr{cPV}-!n{@ zx`PdbYP*Lm?ka=wNS%yTR*d=P#@aW%Y<2ePl*87|IK7(3w+A+i)xUWY$#mp4gP7Y? z`(#_102@_0;a7$PVPI2$kUopG16~Z2m-p5GV#!bvdbN?J=8m$%Vc?NDeN-nXp z%_&MtrUC*5_wO?|HBER_ONV=Gnke|$ehCVFN=Q$XVk3ueb6QIa%;vQm|926p9$Zdi zs$FqeneVS9F45VWHz`q3M{ntLx?|mOztQu2l*+5~^YcNi!N{5g-5c!Y>rCXOYqvWS z+!}Evfn~y?nq{am7c5!1ZZRKQ!j5{=kbLCwZRTUTO?IvC-We7eE&iC+JFo8IQj(K% z@kJ1ce5t(N&H(B}e0%iZ@F=TYbt8gRRP*-9!)_#4T%+XBg%_b1q&y}TkZzG&z$R38{{po$!S+Ut1)~Vtx(R6|k(no@NV(_h%{GY4K@m|`Q9)~iHpa@z znj$4FvG!{+4aWw^VeQ9sNBQL;zdygBH@@5mf-Xt2vOq?z}& zw6?mtyT>seyY~A>@r8!lEYi>L&C8Uw1lXUzPLPDYYoww=k=$DCvoim=_`)=Gj-Gmw z_%wQDbv~!AZm$iTLbcmJE!|*R8$%c8`!$$eWT~j>;ofi@t;2%UA}xdq<-rz zR$s^&fM^BmoCN(HtlgyY(tGLX-lZNl6{Nr1|6C`c&n~zf8Unf*FC~uLJT6o#RVL7b z(U#XXpQK%ZH1J$^p)s~mN4Cb=&%VpJ9M|HWx#8h+6&8mqs%6B)zHj_lv+!LEsjjY0 zA|v|;KAG4q&vc>V&h(T{rulE!Sz4a(Jeu+tl(oCt&?OQ}Zf?%%mKo>~U@k%E{W9H= z10|%Y!P6#e8}7yG`WECs?Du_PabbiJS^?)+QHg@ zIOx{iEAxkFC^ml276p=R`^dsbU?F{p4{FHPNCr0N)=0_<`YNtq`|A1@3U(eI9^i*< z+d}W$p(!@1y3jyfIYyeE2`wxjo|ljBio5$>3UXods;QwGxq*>(4qjdrpy46|VSaun z<@+M)o7>vTOG`7Q#R?8aHDGK5*2US)|p-#)DrNzXIPEV_P9(4mFgm>^d)PM8O%H~?lCbVs6G?t}3Keld_ ze{rISQRI4W3BCOZZ>5R|XS^k<|MorHQ(n@yXb$$`^Kx}E@wa;B<{Q8xxF_jB>3Q7@ zp5F#jun7}dzJfJ^cLLhwwv=wz*xO588GSQ1=eYbP&qnuaJf-RRod{Lk($dlms{NyrlQ}mI*ySYebKCfywwaZa10<%EA{94p z#IQ|H7o7YceT1~pUhWa>^9cGwx-K#3d)N@~Ql!g+NC*QVBs<@XE#gS$wWwS&lTCy8 z6=i93>C)r!^78O-2;(Df!uLSnTqFhDn;EL9!L^r`e#b%PRX)9&g51*5^0@a8kt0Wj zN-ypb9h#c5J$337G=Ix`+Pr3^Hn$!==+bhb93{&yu8nHrq+26xlMH+5SBkSb-$bZ9(4WsU+d`|^lcFGe)no@E9jpb9GNI; zfMYXuT%Y3RnD+{iTK1G!Q)KKFFl@X*E{SQc}{LJM!vkhOF## zY;2p*Td~R6*wWI|5B%_CV;qWmc%H*lmz20xRk(nUA3vhD&CSh0TieMb*?6`Ow-NaJ z>eZ_#y8|F8ukFI!xuj2pM94q)`y-`bi|uW-#jPK2Osjx>ZD0^mTwDyyhaGUHWE~0( zh(ySc@&V6XIiF*#Raq@m2CuN6^f<*RZhI#&asQP+vh3`yUcWvMc;@0+A^9~?x0obO zr%!H+ya5sV&demm#wwnT{QL&OAlLy2(;fcWu|>f{hZZe)cyh2r*RPW{k-D5Z?c(g* z*xVeGa@o#~Z}G$b;YYm<{rdH5_(}ZdgB~YKmMAjdp&WFf4podyNP6LTKSu2|o%VA# zT?3e;))z01)j`3BWa7dTZN`g&Fk|RIpjE?5}9pvj6-kDd*vUq9S2g7Xaf>YLz8>XsFuxBf@Avuf$DbZ-HbPmE@`T4b^WxP3uKC%WHm;%9zSbNO;Y zgrjPlnN?2=@izFp(~VV$zvAqio*Mt#)3E8}D&$=n1+bJLN11@(xw%Lh-E4uv_Apuw zRyn^yb=9Ur2HM)7JALi#>Eo>LN1SmA7Dj+YZ;N+XNq;OfE(IffkeT^seLYw0+2uBJ ze)3QG5^n)n&pa}52XqDOR=a(pD0aDYtLmPEC!2G$OOZ^WRy3r`o(klzfGwGA(Tg3JdHz0e*hY94-N^ zw_q=qr{m0YXHI74_I&zuL{Ko2;@%wfI;^*mPYWAFmtG&6tEg^_`~ixcJHK{6N(S0NJ*cYJ;weTXBoDdpjG{5iTwpDNaf3>cL~=+b1PL{3AMp`7CJg5k8P_kbQYy+ z@o&NoTXmd1bhndOnH4Df%~GEaZf_heU%IyTwVb7J5;V2S<7-l<3Rl!CI2F709}afh zmC*YebrG$AnY!V_hsEJnK_#XZiBU&OSX4Q8ACr=*eEyuK{CaGh<8117eQI+-%7EtK z0WI0)iKSFavZXzcAx znwP&vT@#qtF!q6kg7B3@1VG+A(E zA>k7?dIt-IQ+i;h?SdKcg93_>F@V9lIQ4avqcPy)J2<0b#BNL?QR4^VH}Xgnh0)j77iKS*?_s--oa=rEgSE}f z?uUkk1_lO3M6|37`3p-JfbXGT0E%**8V$Xfn#wv@CQihK+mNDo>hS3?u!#MoOWZd> ze2*SG*5uJ^6{M=6;qL0naa*3M&g|^jMYlU8mSf+oi;a}}l%KiLi5>{{nD4i9asra5 zg3gMSwDSP(pMrf!h3;-jexq%l6m3~~Wv{Ga83kqlzpgYW}aMAhX%~c1} z*xT7{r=k)Ei!3hgdFgY+eZne~J*{nJWrc^I{ipiPI>Qo^^9jnE{l#2O17c0|hQGS5 zTiw>q+b;e5`z)PpzJzkJuErR^7)bA32-(nA^j&0rwk7EC$7&c`ij-N^#+{~iva}qT zo%MmQcJPK}-3?nx&Le`~|&b2CYStGron?x9Pp zax9m1Ld2X%WLQ}4qepNmw6H4b>iS0JLOm>|G5yE&ez{H4aZO}j=5ceL{B$Va{|w)h zFQuLaM=(13$>%r4-32O@+pd zRKl%YljGyOf>A1il$uxGV43*n;~yyG#dL0;kovDx;t2MJ_w4t_yQG(Mml#(mp8%C| z>wafiRhKE)`G3%>4@Nf*44jqB+uE~hXV0pK)jn=h`ikk7O*3b7AwSY8pusyOo!*9x zbs)GJq~*5$gK_iIzMN{!kJT+ptR^X2$}{ugUtHujGddsefYUvgrT>>ZC7^buMqW0p z!pV~-q5Ff%#h51R7nvkFHaC-JzNdN~#(4}1>=M`(8+tf8uCCzom>OHB)-;V`L6{_wQZrD;Bq za-X4#Xw&KD;|k%Ve=o(Xpqs(zw8SkMz#%V~2%rv7>T?|o4=0x#ewBNS>~a5iJJd@F zNlA0)CF{Sw#}%H6(LeXEOX=__&-7Eh6QZE%WgbgiE%@@051+BFM@1ub9ppkmi|Q)0 z(UKC~rNbh|S4)&WStSn8Hl^mRzt+8b*C%*4@n+ChZk{1D+w0}qjiN3Ofr)#<)YpBg zddZu3NA^TYi47l&8EyJfL*t>}(&;EGCKyX;bAjY1YcrYa^kmlsJC#BahMKlE@^by-oj z{`gQpkZ*;;z{{D$c)^#cT>(@N)U5}BhJir0N~ArI?}7cl0lM$j@Y)RA0VNJA;{E;0 z&Vgb#6&sr`Xpm5+r>V(vneG#|^?m`<-A2u9gaE-nU~U~;Ga3bS8R^$;i(LPVv)(M#{z%KX`4 zBXxAKAK%;LID)HQzQk7z_!&g}AQ>22uV24@^G2!lC3BxDm^w%SY8}kaAh{sw0PgFM zA{cl1d%me~okYXBg3JI~Jp`H^U&Y&Pi8BAg zLzRk|4RA|{${%zAd?L~%#1&H{u9zgUk<^rw3KT?qLBSra@?&A7?)meh1HKkzV51c- zTne@j58E}#4iO;JnH-dn>HqxssHEi2f5;e+FhvF=+0^;xw$5{(i(xfl^*3iNHXlgS z6Z8p0btU-e(`A?qqEx0Yz1*(m_=2loxz$9*~ZEw#ny99OZzO<*%%fP&jGP@2qXCq&~Ui;cBfyI@VAbyvhBBG)_ z)z_pI!fbPB70}Th3LkWrhJ*&4UQkdF0;__jAR(7Z9ID4&EiJ7bJ9adQVh{T*j&3=- zn5*aQ{rkv}ZxBsy`Q@}nj}8rt5Yi75Z|P3Pe~d@3g<%+@SmV>z3U-`*2NQ9>n9YJ$ zRX{)hXmAG$H6b5}8oNvwnwR$vqG^~H(e>#QVxRduoXdMCLjbmLbeWGjpVQUdEt9^b zjOqt||J>83Pd|M40M$s>XbJf)T}2r;<0`L3+%(iwZ+u1DoWP%fL>26Su&^-Rgq?i^ zZ;rE8vSlkZGw;=+M9E)Yk}p%UNP#nh@yhwGI^!10FaOoT&N2MNW<%f&UJ3S*n%^>b zE8z5py+%Ae2(`isGZO6XDma7K$Tan~08J!p2;w`asOGYgM5ziMKSn?e@b)s`JJ=f5 zbwKs|gOjKf$nGGcBfGw!C}EW)>pr7@v5k~afzJ%E5u}fprwPy<{F*=~hjGUUtqbv6Z!2*JIgw9o+2ueC^+s;RXA=mu#IyAO<;SP7uQK%x)_59nwvmIkYa(1G&D3+RPHeCHvaQJDc=WSX%BV>x5o*b$yQHv z0a-?fO1JaCQzZf+y~Fm;sS`mX4sFfN{5(9Nm7IUWvoU(K1q1(wM!|hEy6emE@SV6g zE+%!Ngb`h6f{K2yx3EyK!j-+CCO@B&?aQJU=)SkIRoooP#NgltLS*_3`JKD%jv!*R z>ltnOKC5o~nt!|sG#g=2(dY;F;^Mj@_6dV0go_Gx`=5D%dim3{4ja>0Fdj)8!2=O> zBf!_LM>iYI`3+JFO{haVK7M>{Q%b9#ka35%yZiUvw_1j(NR=Y+2_-LIME|Bl`n3ZN zd%nMtB>e%m>gz=`Dp=hJMe6Qbo63wjf8oM}pY?w`j)r{q^Bz5Nf;jm4uuB0BdTUEd zU1$@(PPU)54QF`_`ZJ<=S0KPsI$%r52V_^a!naF{j%KH$#BF{7REwg6AZzLq+!DNw zBF1;^+DrKZz&Y%T1}Z8l&dv)s)*%Wb5IsTe7Z%}~u5PzC{%@MTW9QB|(;I~*2tN|@ zt%in%`ucY?3e^8lUQ{v+2koc}-33j5|K7bP&*I<(Z0bz*czj(zi`>N^5>Pi{WNNy$ zww92PFgC_#NIdwkZ!S7II$BwYqtfBy$Hq<`{D3mCC|v+a1}Tr=sG&;t0tjjSlv_wo z*xK4+2~mY1Kuo+(Dm0NzV?#=2!sSt6A@4lljFH@8w4-uoT-CKTqSXOEEIao^&QJA! z-QNW2v$W&^iNjL7|6Y4679kU62>uXZL`DC9{jpVWM%};Zj()QJjv)3H)CZ?68c%<_ zW#wtBie*|m#dVIlIZu~4PT(wW7w<|UVV%;aFr+2Oo1V}cKnX*^%SQkPvjaGi$a*5~ z^y1d7R_w4vQfPxH#DF01eG|p)bWfh#^7}={+w;X?gQl>@(4QYZdWEQ*tY~H@L!3HR~rqn}d{KB<6y!-K42NZWCT_-0S#uly4 zT<;$?7Bo~>kBp7olbRb;mgwV$e#w~g zJO4LH>kLEg)!SLE*^Os4UTTi}aW=O+_WzIc*|Ss@@UPPMO1idB{cv1K{|KU}#6&a~ z*A}@DwfLY4jl>M=ZCRAOu@Om*CV5d-$FjKGm3tkzT+axY?WO$W4{8#B$P_aK$nb)7 zU-R{)c{Kdz9V>|u%a z-}gdt)41CADztHH>xXk)#uX%Ybhty^E@rh}a+tR-sb@FcB*$C_>h^YW(qbdQ-Ql^D zn9TVxJ0~{Rd&=cu6U*&ew*bN>oR1#&`6=Uzt1{=`;hub0v4vXY+W4XuNJyQl&Z(0p zM|ff%s%()r+-pp2rlI*{Z%+C*k2x0#t|L8=F zXgCwv_o9WM%w$MY{-Z332gMe=J5_uO09(xQ^Alu-sP-RL`g(=r>?<8JazY9x4Fz<= z+1XiOs3$dStYV-Tc!`RN9yyXDEiaFn?+tP=Gh+|k4nVM%3V>5vSy}ngrFr;5sdE12 zfbY7x7!y(Z!<+!F18{&1ivNG{Vhwiu+VU*atOt+pkogZ3{?!>FB(Y%)qM?*B{QUoJ=?iKf<#P_fDSE)2hd2LjE(4R_zwHP+iH zpDtSEF2-R@Op>q#@k&X#hE!jZRFC7&&OE(I>}Ei;8KHe<1vauhPHqr0!D4o^;U6Fp z&B^mhL=f%}tZPi`;FXsb-Zr^|oHRqbal&OMeq-lOV0K6?ACO1FtMMK+ue{zP@2?K| z%Rhc79i(M`oN_{to*1FVCjopN)O7Hi?fx1eTn$ZWfhWy(^vjp*nEkMpWv(ubBMzvh zqSAx-C#;4J}$`}=Z#Xz$7C@}DZtLt|-A*H3I9UUE; z>)+cz{bbl8mr9>JeTwg2C!Fnd^(u@eQhzO30!7k{#~t;3SH>HoS)@Jg#>abmdEJ-2 z<_)_g#_5o`I_GYu&re1`bP*Cq6-R2mp)Ic1sBmg(3YU-KdfJ@6UPMq>`0}r%32a$- zrbyE*BBlv=NTYxs9l8sT`TQ)@=7oY(YjNPTp`1Jg#09J=G0+9`ju>Nm&8cBw^nPa}r2U`YJ6+Y2H|zZSNMSx zqeJixFM4KX_^#ngU~;!_-`1VHym%Qh3MiF^x;oAdk{2rLEw9HV0#qF{K^J4)eUKbT zf1OPS>nTg~R5Z?I_%S}APFmDyO|arWp7h~ex%=|M*H7Te|3 zaM_zpqEu>sBkJN;Sd;Gsx7w6Hdv>~B(XfU(|Is62dUlWns|%~<)wkS>bgWV3h)7hu zD9Xv%M?;g&r{QjGJ@I>aHr(7DawHA|aVV#``E3*%8!M~kT^{MP!O>}==a(33H!kHb z&K;&hrXeLdI%BBUa3*eCF{qFsYj8KHdoL9D{1QS>I>C-$!mhbuu#y!aMaCUy~ zJfZ-XEcHs_>gh{GNriqXaMwY)FWKAQr1|*i(=KZ2x0h!u3!$~3F{;Y8p7d183ZuZm z#HrL0KM5-gH)hq@#wHv3k&by{$H~ufZ49DUcb@YvBIo+{9URY)Cj@92Mva_(C+}$5 zDxY39@OC$SxDifnx0L-77n(_cLp(Z#= zLi-v>VQd4AM{95xH7`Z#07^ox-Xz6T{Zw60BAsNH?$x_}NjLU$v^^I*qE>cz`r*?! z70zDt&%sY-fW7dZw*qUj6zcFJ}oRTttvYfYL&*WZAEuwa;EKR7*yym~9AIHGJG)77) zuL@MTN_Ga-1r*rH);3-Gfyc-8Y$+KTU)Z)7vcRcEy3wrodG$xu8@$V zN5T*pDF=A|jM2ppESjMr75i%&{hx7uBkZE2jHx0sE4kOH%3UI+tZq)mFh(3zLSTdhL_(-4yXyzpHfyHEx+oI44uhB7T@<|l61a4Fzm?Z({qsjKyg{}T0#D?L!Ui+ z1_T5BLEo!T^=Cd5qxDzY*v{P*j=v-QsPh!(clp5na$}X4wYz}{31@tLWz2^GUsr>u zPRmBg>!=@orumQrp-u8Yc@Z@H;5&@-{W?qoh|tGHnjZrzRoi-3%3`3rue z!_U3C4uLxOFZh2Quz`{T5-AbfTE1G^KdQ6XnY@p zag5`I6=pe#(A9HZnAJ}zhOA?0q@mHe`DbX8Js1-S;r@4?25glelTsUdrUR=6P5`Zf zAiV>+>cQw-mbiO~i3VrRjMaf5z!~kFSTx|Ij{pf!j$&nDVv2Y=Tfh6WT(Z6OO;vo^ z6A&W6!!S08VNksJV#_al;o9H^dU3l+vdThq5oE9^*b%0gkyewq{T83AvZp*+o43Ej zddG%G6@;AKw;nGk-)2_5{{yKNNePLELc$d{DJdx(ot!{k?)ny!ht}EI+rJLQM@W6t zk3DWJf=-XP1wuM=3VoB8Ef*ajX)n&%?z<`>kUu;SLHe1kLB3 z-%_DJKsCd!n3$RMXETzLlEw>LnCt23ojFtPGI4sncTY^?u)?eVIM+M=?V|tVoIgxU zV>7Sq@0Sq}h~Sm`pV8GaK25&=a_NDc!We4)lO=XRX04M885e+E1?SIEi+0M z2L!#j3UdjlcGI_2mKjpX_Z(;)QpWxLi=B%dCD&khDs3{@QXVCtzE=PYj9vpF>MKM_ zU*9UjXB~vx!06}?ke-kmIJ~~C zu=PS|55z#^W!*81fuU|tPEAjx^qmkasg?&khyGxq6J;2rpoY^ZKno_TBrc$gu5|NX=1Pbb_1WMKaDH@(fWQvA z77TjcJc?z_h{_8)<0uUQy9vqPK);w5=!WcsnU3fCf=9|WJ!x#?gSPDg+xqnJ;}Yh1 z+hKYTD%V%Xq5uWaK?es1&lleV2iv>KM@|)!kN~VE72J+;4|^D@FTu&i(Q&=+at#vP za(?Sxz=2q_T=)%WLsG zXxtKn9fpw?->L-!1({(;NB`JLNM}W#!B@tqed)D$u*&sr;;`i*`5FKR0BXqWa#xqE zK0mQ^3(dS_^6S8z_zxjYZg3qmPfPtNljbgiC$aB&(&t z@CS!-B=4a^a3VS2s@Ne+EIzjA1$ZsP`b}>0SKHdR8ZFPG;_V@<$=MyzPIr@%q}-;o zgWDl7Vix*1@ttuFK5|PG$^$U5xF`#Z}xtQE82Yl*a*|91N-^&S{62c6Cs7+g{20V~} zO$=L^R=V*i*rMOymTv4wvX@T=@;T2S>Ci2RB89s^Sl6<(%?%8Eg_JAupi{TE93a<_ zNT=N}=R_?;euJlbyQgf(n;JPM$lCE{^VHlv%+@#8PiBQ%lchJ{Ql1E*vG zzJSkzcF7_~3Q{uzYD*lov9{ilm``$!T1>1I80i9o$MTFp<%JLK?fOBQM=;9e>AAAG zJ}HMY%m~H>GM6TE^*Rkg!z=Ix=clKyqF5ChF#~6NW)TAX5s5$HKV(d5OYRv!=6)2! zvZe-tXv@UXFW)9o%ymN#HS{ryOb_zh=fIpYK*VoAOz5FFODIv;^Dj_hI$yQ5wY9e1 zTIKE1tuqMDT>LZk-E@``ca6@A6X(a@m+JtLhFn%+SgN_Ea`;V+%p$t2n4Q? zJU1lrrl^&2UTt?y=LXIF_HxA>+V$KShzym!mozSFJ+`MgJE#mD9j z#&TbO8dBMRp@UAU`WC`nkmF+R-8<_2!xrnQgWWN6;oP|cUB$O=-u$?TlJ><1p=4SG zq_Z-mX$CMlpj{9UKtRtQ9fNtNctM0fu%==VA+Y~IX04L!!&w+fPY;WWo4xdwkVOZ& zgs_hH7zu&*2aYX!FeW_keLYdt(=P%3BgOzn62-2uF>Ky>DrfA|35n~sFK5C*LJkLK z4>J$M>tYVp##JdyL03Vp*q(tH^hRc*uCDG@WaK@XsQeq!6h)MTt;phwiHk!t82|pu zOekUA*^PEbok){EgC)Za%jYb0EOw=frwQMu9g7&*J8}FV52F9#c`3M0LZj*a8`ZUV ze`o0%Sg=FOaZCCi`OdsW`YBjp1P!TE?8SRSJZ^)P71zzm!h)mS+lHP8kv}VoJjveV z2|&jkMt?y<`)Mz|TWlKd^o@<}hy%y`f`WIewp+Mz@$mQ~<%`}P7#h}D+t4rxLI{PT z-qndjq66Nk_=W%(!L}Sr%w66=!Tw;DIA`n4p%aJMmtdNW&23YT;J*I~bbZkAhRb|i zNP@sZhld-Cue}C&LJnhL1d@XJeDY@r>Wx%2uaMi`@_HJ=$G@y>C7hGJk(NRA>v;glm9Pcd*k5I7YK|L#CAswHmFscF%wBZL6ub4xvWP$M*wB~@=@IKmABpm1&} zQ7cnH2@B$zEt!%{!pm{LF>Fvn_Xu*i66dyFG^j|XY+EN{zHklHZWEfMro zLzrkZQ;PhY)4yQu;QM@a?5>_4!CY0^-JP_QF5>v=%xX5IK8 zfsk~)_+kzo>2Ug|0W^BJUY+|R?ME~u27?3MZ}r^T9+CO_2nT6;>AkDui@(-Uu4AtG z0PTD zkQF~g0gz0*n%YmUK*7^k6Kq<7en8--d?A9U^pO>J$v*gJcBe-qO?Cr@II1G?z#`Ez}lWo7Uu z2YY)NFQ1gpK^O-~@()SnAMKeRrWU}9dIkgiyqZMpW?8)E2K`?NhT^i_(PRsIGk?oUWM|VAd z*~%4s2W*iAgmzUu1@p&|y-rSsY&JbVuZi;{C@tNGzy?g;-o?`<8d<9*?th(&44N+k=*5hEbEf_VFIZ!wx=T=kxP-Oh$)q|AHQrtT&ceL#aGM04mGuX=3XmUM9^e@A<=Yk| zB_(}BL!^Uqva`84IcYj#;!aW2H#7h@K7ai9eM18eFE7QPf$0Vj^=Fe(|If6)j-VQD zB;eeC2FC0E`7Z2kFcQ$;nU8y87VKeX3vb2De-8%aR;;W}9p$beGvnM%MC%k8;3#O{ zHxa8Lu-{Eh)$}}ILcMvBVzYswo$PYHsEebdDKA~~?NbzS@$tLaFf$JIcyR=P3i^-l zXVgj_o^5RJo0}E<%bz2qs{kYBUk`zLhK0_h^^1?hMA7fXYX+S#!W z3MkQW2nZV-f=#*(`xT?$aWOI1)6BUGwlyH?ot*q|@YX3qvHIOjkFrp}L8M}u0^-cx z{?Fr`6u)?+o9v($TpbBzJ9zM*Qv<@saN5R^ng%eHhuP&ADd?Ez$4Y+eGi`?0l!MfLkC+#k&wk;Y;AQy6?>z?l@LwLC}Fsu!h+<6o|nf>A^1CH zLT9wJgpV9am^aeb!mN|lnU>@2Zp?>v-c-)?iO|tmr(OUP#8W-$y1PBz-4mtr48W8u z`L=DuVdeUIE&%}t;98`N6e-};p|MbHUOpf$u5WquUk`&SS-A>mMMg#@lz{2r8U$Tu ztQY@2=cx&J4nP^j*EEl_`m_+OF&8y8HD<{%MGm?D4Tp})ex&X=4j%09?|*Y%bey$a zJM!A>5XOh`%m=wYKlIj9cubVFv@Qcf;xN|06a@d7rVb5fc&>GrCidI*Kf>1DWW+j$ zVG!hhlarY~)G2SSvnj!rz_>|Ik1-s-j*johOs#tTmruUn$)l5Sit}1O9yynE@7{q# zO;<-pB%}mF>!vY72aIRELk06iD6sGZN}oIlyL~%5mBTprzio9xzy1G{Xy;&(JSS^o zUVw+2K@_3LfHc<5qIR*T!j(sXS48B_ygb9U&5#^v%GTO-_lK({5(IfgnBI0^AgG@2 z8yjoC?fW2$`80;3&gpn417gHg`&D<74W&02y|XyOeDpY}e1oW%B8y+_1tX(-&z=EC zU~CriZWyZ~>YT8$KsDua0z4f_(n+b-i`EfW!Un_B&kphPe;mzbWG}gU-X@oV;U9uPAjSM)2(Fe)#SY7`lm;IZ|@C|VMpbfYkZS#R}?ye;V*!?R8}Hc71=8(x0NDf@05|1tgKK}RH!6`WN%S6Wu)w~MT!y`H!5UgJg>9*uHWzb z&vQJ-@%ZDuJL+;>=XIW+@qWM7r_Dm0Yw|B|brD#Ppx1(zGCx3Q_q7$hRfI5Mmm9@m zd*5f`xjTS&a0RFo_X`MgPf6JFg=zledLiUz!uMPkB#t+RQdHE=h~}`Rb2Lh_b{sJ; zkFPw{tyJ$jaze#Zzz&QP_GPr#6COIvQZjSsRUx>&_u?aZ=i5{IooU#;5Q8{4vj`UA z5p;Rdn_he<9>67PXgnQi8ozgBm+^GqX{ZZf zLFUnX<%WNSne4pwzeQ3N`k-mhyFow*${$3QaB@P_Vx$RYuM{|2FK_P{+;3)Pq~PRNR2bV@s~2IjW&=K6alP7%Z%Y;33-qM~?> zCfABDemSic`*T;-z-iz07oYlXHBFlt4OlBj=641CFQFgy+LNrBABg-SC%i_=%eNnV#me zRv#{Mi+Ea(NO<}!9ecp{EV+|)M)snpj`$vEJrVlF5r_q_xwm%_s7~N*6iZS90_x>z z?HP=x8_wqpnXN7LU`Yml1PgkV!e7{C0ij}lM+xIB;~ChnacRegzQVjs zp9b`j-}7VG3ZFFQd&X`TeS$keHAO2UAb@;v$jx|M&M)p=U*D|J6%5OT1rIC^w{PFx zqa5mLGi=p;x$W)So!hskYV)I;VWhc=f>|pA{9#J`ah7wLA-9CL7T&aC=7{4L>Iiu} z10g%~%_#a3D)$$yR5iWc@`?8Qj~}@(PZwu4r{9sr8anovC}+Iz(CQ4y>4YEyEo0s;?_Q6}N~%9{U<1FKi~u^rIJI*uT-QJr~}qo3p}aq&fo4 z(U640#!0RgR=w93#EZ0h``{@<53O`-=TS}6#hq9VQ0Ka-3gu*U3rHjPbQk8Ds7JAb zPaK{-?q|@MMi;BjyU{fS2RM;$TWBZ?GrPPBf))FCZX}v|jqR1MM*>T>$(DUv{bu&; z!rh$Ia|9gOciw-({ta3Yd?W@IQbIs9!__@H1{FI{SYRS39q@;x1J&ruf$n~2Z+HIt ztqW!|1)l zQ~N^I=0M7Rc9t@kg@tW&45)5MSeSL@ED|YLjDyvK(d+r>I)f z>eIDNETa-t;ecBto)r#|PO7VS`;r@bUx;IP*V5pSoU#4I4uuQHJwHs@dcN6a3S@F@ z4EEe3(JhAg<6&~EQS_e%2MK|o(R0@}!zX7qaGZs_g3|2nC|B*ka?tgKdB(KERTz1f zAVC5G#u8l@gr>Blq@=xFt7ChyVo_I^KF{(Mn?pZztr-1&z8e7YldLsHkSXf1Hk0E) z^{8{Pr-C6WMteIu>uRT0CtW*09_jJ6>N&%dqd8~}C@lz>9VIm4j{J< z_ZEi5V)cn;Gy3WMKAC<<;S8il(E>2kFk2($DWFGr6$lb`$$Fi|T)F&buH0>%P7X@` zX+dI9ocFV+kl-*)*Um&X3b@YW{1H>=rBFA?N7|IbooNcI5UMRAxdBLIp}X7u7Rp`Y zH?HBMUZNUz{qRMs@%)g5wTflHR}n#G@;Jw_~V)yP6su^*~%QWDspwEa++b`Gj*&eoizA zR#re%3jb$x9z&MIU*b-B4wuWxNaZ~J9eh{i??go{e34r#O-Zo?{QOOEQ&604zVH#Ux3C54ZBK;mMMR) zfa!;qj5D;)&;%ez?|D*3C7DCj7Up^ zk5&ztlu|C-AID6Lg(_O@SQRy)!g>GHI!u8RgM{{dMl>7P)DPILFe^RBJAwasc_ls? zxccKFygcvqOg&SurkGXwqTQpVqw9{A;^O)Y$cRbGIT<(;ey#ug`!UPt2%E+O=oN}jY<;6EfKyiyy7oZ#Iuw-^OXFF_?%zR^ z;dY}OJbmau#yR_8BR~c~w{{1fh&ov9?!5~J%R>mUz#6nPKYe~tMBSPQ^%^L&+hIgw z7#_Z8I2gpR_Bh-?Z{KVL^;Eb&?Gn>1sk)dbx7kd@JFjA3AvbOgpE8yLC6)-R3gI&1 z=WJ}yQVpI&Qwi&Lkx>W{1;Z|9x+W_Ytnh?D_a=Ju5VWkMgiOBWv6FtBf$3f2XhfZW zjsq}e!wtq{Jz5$0p}V^XW^CB&P>19gm)`Z&3@W86LsKO)T@il&evwTZXNS0|4b(5_ z6EyfEuo7Ys-iZda(5`laq9$f7Za+x(6@+DNd_B`=_ZaJ7sQ%a72lUWZ#z0*LMiE+d z*ed{`%uNr-$;!s5|8$2cYwz*gJp4N4fs~Y4dTx zbsU`)dMIXb`zTl_7V4_1ULY?<#|SL~8yg!jQ~+PyySL41?>0p?AdEdd^GIB_ za*S*R#-&!=icJCFmZP2BQ}>Wto>+1)G1zLwrm(sdOpAn?p$@5y<-g%KbL5cpm$pQ$ zjPElucvCbAqeDY4mhS=x31t==k2(KblRsi4@;jO^B7b@I<|kwKC))gC3%XJq$+@|q zyL9;@grjfmvhq%okd$0N{L|9DhQ>yS5S~QnWp~-m5h+3gdYhODX=0Yr34R`lqTco-A37Q?q z$gw{{gmvWyH+&U!$jsU$D=jYGjquFJ?ZlEN_zzkO~OgI zA)V|y$e2Tv&{>Nz%Lkp{wnJKQ!&&KlV<`3IX)*gKU8JtEK;6rs|2 zn>fEh#|Afbe5e|#C zDVQi=ZNwdufJOil3rNx+m`&Tlz8bN%4-NJ*%Rq6+$jC57UJa0}m3lvnVdV-74Mkv) zHt#lgv)fI_QM+K@XhFQ8+tB`mCr`fT6n{wE+=GjS*uJo!K)+mTEdh2A%ulQ>$@`?v z*53xCAv8IQz=F}kFFqrTDG;oN1GwXr-)hQXNyyM{WcOEqNM}EA50)@-< zmTv8=u5r|k$O^%*qhpl>gc2}23rY|J92PNDvoMcM?4QG5e|hNb2=&<^hiJufmGo01 zBSq&rxP|&pONfd>p31?+6usP5(XT#Dlw@NyJf zs%v70u-9>OUwdefpwX)7{`2tA!rs+OFJG)>zO3-vz3a!Ng17VCZqA7M>4GK>X;(^> zC%#NffPR|A7^uK*dF4lR6u__E+>=;gT>j=8$d0^m^&Wz8^g1rrHP12xA} ztCq%wh6J8ZQJ>Y9f2zt4oQ!do*uC09{*4%2tCvJ4$F{wN&{ z;h+z(-$Ar6?lV3(*qU~d9T8u^+Hm`&JMO`F`+YN3gLF9h7QQCDzJfUl=8s9-Ufxag)EtBBMo@v0FNOpZeW|^uI7w@~3kG+*i>3U{L~q3w;Ixi_2Wc z80tDhfYRXB;i17<_BZ^hw1O4%Teh4wq&7~_@bK~yPLkv<-MaSt$&7ws^$EY!({_?F zF2dDxTh`hv=*ok2jQ&o4`t*g78IBZybQy_q@(CVGvl-a2vbr!0ySfU244^*84Z=J$ zmYx+4Ayy9tyyF-BIgp}Iu2$k6K}=iz*nK| zUQebhIaGp6aegGS&Y+z6%I{-3Ftm~}#XLMDpGGqTb3ru%h{tu{rEQ@Xsug@J3MzyI zB(D{zPy4;%%c)cxqGruk_!i>WWVj%=NIyvx7k0`R#)cQJ|!Q~gp!e^;VV15wT& zaf2RJES4@a4UOHAO~5N74;U%EdFnjeh&~wk5@;i0rCd^z6d`k8!W|$WLr912hKCau zkz-hxnU|Lb3u8gSENbk&2`Qh2ujp(M4{?lV#nLF?wbJFQ!=FDNT(d4b6cFiUWqOyn zl((~TW6F6|E{5RO6Lo>NvBIKClAUQ50s0#p5|WdV(U!4{P7tGc?DJ>D*)AdMj<(Vsl1fy~LY$@5)nCmHf)ojov-+I4KGmO1kja{mDLgb(Qj?u4;3JM zpo8d7edcj0PchS@Q$lHR5`LSme2N+iJ5X%~&T++DLm<*lZl`DhwN>Zx*(>&T^fNn- zZe47%;GS?~duLMkBmGWn2jz5T?})WbQEw_l5Yq#dW(gWLq7U!{@>@=761?@w%F3qm z9ImndnvY^H4|K2_U8#gV1rmC+ZcB@cs73M}GOmVic#?DUvl)dYt@2H&Y?dr>=Zw11 z(Y<_iH|4E5U%W_4Pgn6PD=wY~pMlB}Qe0@`5(l)A&^8Bj2_6(I)g69^=(wN$Da)+h z8{H(zGN%ISx`mWoIZ2Vptb#PrZRqCUV;L{5C8RizW>^XvKHo*9rs{0)dm z2wbYQswjRh3G(cN<{a3O9&L8=O$gXF(>wpWRG1~B+io$SeDl(Iwzr(D;r&?Z8=NGd zUx+e#KRLXkTU$hzJCL zh)<3(-X26V)BOhX^dWS?VTYH3nm85udvZqdrgO-R0+q^=YL_W$7#1@bXyMcx@&eG|=Z1B1K^9jX~3`<>xXp&f1p zj_%G*QAo+Sh1gvlpQ8&X{UMp^CmyvBmdCRYuEGV(9Y87^ssoqiO`z>Za2gg;Y^PFF z6qwG?Oti1hkFapMIm`0{3s!o15IqjvFaiF85HoodfO+V-H*jF@M7QWh_>nHgOPn`F zOkK6CIhXfE3yV)}Ny;s2SL~U;>lm>qYGT~L%f~8wwjPdmC`-;xGLwC+i(Ne>nOoh{ zqbCq4l4Rd7SAexz^zh+_Q-9;EhPKK3^0Q?}+SlsMb7l8LuNG{jX1HJLr^K*~+&MV0 zT~YpnWp8%E7%CrbC&y9`T8LPQv~E-%XpcwLT?4BMKt)u4_((uNHxtvLx$ThEz`S_% z@@2#DceK>$>FGcYnET1?GZ@A)F;SS-uJ9U-ZmwtTelIOw4<8QBsY%x2Q$%B73mpl# zU9?#+Wn+8fn~RsoZq}0CCsZOXUyssh)Uk;Y&wjX(afJq;0RE;M!V!3`xw(}DB5}x# z)S*MhNo}nnn87GLO-h{={nP{8L?mOX%b5D_zd8KV9->LBX+2VYo&x*eAEe##cy8JO zUfxkmo>?cPISHy2RvUY>cF1kT4e4`bScd-c)HcO|WZ@UH(>K34pTm8@GXlve`ukpZ zEH8Wmv9Ga>amNmrnZzM2xsiy|K9)g&X>DWt4ZpuJ(AI`<$i`{&&V~;;S{bZtY;zZb z=zcn8=Deel{CNMtgB*u$reTk`%$C8f&nTIJdaAu~8t>#A>Op>^kZ_Fp`;7voIy%GX z$Sixv-cmfB%26xNvT7>H4mqA{6ZfE%oFO>yHQljO8QNfVXPqzxp(hRtI|(ZqY}U8* zt__9tp$&&>8-`u^X_|nWEUZ%+edmg_Hagx}CF1z<@(!kG;W=k$@B+WTr;2mLY@oqP zDbDI`-^{Y81n|o((8$(yO3h9A@u@-~z3});KbXQlp8<3MQu<8HsY+obtB#DA>X(IK zXGtkEsB|)_UPm`Nf_v^x*wiABRNyZGz zOspgNx!;H@st$(m1cw_zy!wb2ftrM?5Y~-ZW$?;<#Z7n%O1ql-{C5O+rKJrk`<7y? z>eLFQPNp@dJw4M>K^m5f5f9J{haC)u51?lROof_|=IRv(^_4E?*p_+|v5qqGw`5dH zJlCktb*5_)1+9p*-|8|1)*uIsQnXAeePv;{mtu+bOjVC7JKJy!ZE81u{+=i8wZCj8&Onu9+r|w9feMH98IOC=OWZ?C4z4g zoO@M;N)#!1aNi*_Q&LpiA`b&Ol)NArq8CvhK*j-CCh`p+T-?~rAtZDGAJ%N&$Bt1dFgBym!f7M zJsKzsht-v7KO(<+*Nz>^YjJM|V_Eg;-=jl8&1~^{J6X`p^%h-t1I)+fGy4P@=I*Hu z1C1N2^}*_Pg7;pIq?H%8FGJ$6jN97$n`meZG5xl3OGMc{PKsW2ReGoPwNAW0p`gP) zWoJKUz*ba~8#rJ#O5`1ft00hmWP52w2A`BvFP1rEG)PHB`|{_SZ7tFcM)eKI1mo|Q|msjrYd)O7vCeLg5|bd{7Ws|Tl--!+P}ZK!1AaK zfF~1(DI=vO%QK1u;@FzpInu+m%8;6Y+!^8M{ajpUfu)a#cG>HN+ZIZ5adC-?-t%OP zb-)K+D5XwaeVV}?sFKnSOCi>K79X|uWv)|qO7?%p&6W`mK$2DtXco{~mLHUbv?$oc z3Tqk@Og^>fJ%f@O!tuhQA^;=BsPr+k7!tST?ff~lZv7?yfD3GsLxiCXFei?wdjX;n zd23o48ebcD;#m*-MAdOhD>Scsaitk4m>GFo9RwK#7TeOYGI&o8dwy4Vq*Y}>A#0fZ z`gt*3b5GlgGe9n&(3e=)Qn%J(_q3aR;IbfH;-ux+v-uC2Ms$TV+d&Z9l3{kZE_P!H*4$a z(vp*Lqz`@w`@NW$8wAqWi_t@f$i6_w3FFh+Y?|EIn{!UXl-5b$1K^DUs}MTHesJ9) zAPlDh5`aHw1tG%lz%N5BEH0b|xCZA5sir7Q#0$=jZAE#G@W9#Wfp~h($R^;t8Yr$M z<&qrj?LjZ1nD0eFiD?aRnnhj(=nl*}#O55}!faOh+8zU>#?orMpq3?zQs&HnGOa~d zm5H0%oXZ>8G`L`!hrWCvV(PG0i=2vjb$3;HBS#>CFT-#&x;DggPEF9bkT%XrW%>NSmfgjJ@ssu}oKcJ5- zli4Eo52z1Oy8`@ShR@lgLDhP<2E1hN1^#Oz^|k-8mg#-{9_8 zgi-^KummD-lv$RCEhdgMym%2XbxecZNJ16vePr9s6OpaW(PuEDB4YY~ZjE|baIwAF zV7oL6>w(s2;`kI{VJ+@(^l|7|(44n-gv1iYa+pI(Zr(hO=KmUeMDT@obV#k9H;pM= zU7+(hn$5hw_IcR$+Dx(|l7FBWivc4lQup2$?@9iPPLyQ+PG2i+^XOR*kVvU_K01=B zd_1JrlWIP4=pJw_v&-A;DsAokEbEmBIf*_r(9uS%oW=a7@@^7~w?p$N?H1#rbu6=+ z){Pw8r>nMhh|KBwtBuk;c4A$r5l#CQ2f`Zuca7I~MmL*TuAC%_+h(e{Ev3x{v45M{ zqzN1eOxrmC5Z$@jX+yVhy59C3RVN^d^QfnI1*>%eRw7gcxLxR*@#wK)Vyl@=gY^V$ zHR`6mn-~JX-ln3hA42$d9LXZ^k>LqIqsE&vwN~@u1++hzay*Me5A`|k9IRFic-l6D zb7+9!{>2?W*3(~I&2jJ`3a+d0Yb`?Ud9_#8eB%;{YLIM+blrW02&8(~NrkNQ**0Y? zo!EYWAtlSn;+lZ){yi~~^(^S^_Y)_h4t@mCw>-%(*qE)4C?Y5555P9#I;c4ih75%f zL~*7D2E71!JyBufv?2D0B1LUJ znfAy8qki@Xz6&^I+2JldGg$=WDkTvRDz?R>r}e-wK6ngm($mumR)CH~UhY7=xI-U$ zEcnw9PfdJx_%u3%20i;oERK$VRPSVcM_fGj9jJPllVw?U(!XDU!rj5;q2TCA0{Me$V!(cV;%#~y9X1;*xws|mzIyoY3v zwykC!AJ?O1!;*zXY*a8YZaaa7G|ytLR6yGncpJ&xLoV%}sZhVz`G^mRGKUxo$boIb zi5|q!g>>hozABdNk0Ao}kv0_o3)%K|62 zn{}5(SBW5*QSEfwO&2m!V89x2Vb=5{YN~wO(PXFDb^bX=vYGvnh#d zx|@ibjyI^yObD-Oqa_us%LjuyNL&tn^?107blw{o(Wj60<;xKll;ad7>n-kVP&5G@ z8*##-yZhAdbLcFS6#bphDcv+en@4TYHCc|ifS50|C`2N^o~|x>@a|Z4%YgY3d&R+A zL&><$qw7gBgZ|~0-7QgDSy>?;t-Yav@6e&xUXy>qUtlWe6Lqunvh_cH`O$!=K^IHETfpX?&;L>_1KmH-o?en674L=l=iK?jbLkGG|E6mdRp2Si11xi zOX2YX+73tWt?Z-LN&b7#9+N&w!p6(t5`O`!@Xe4Au?0<(<~UnIHH9+}?G=J>{(bXo zo5?mVeFC{zMa44KX(0AvHd~-sCU`sIxE6%N;gl*bP9p=*ukbqT=jDaB3oCW*4=+0Q z!%+F9gT4|lEU2$%b`__QB=F)H^L3=JVUC%6;jMND9Sm4FBUE+xBUWAIRR|P|IB6Bz zgjWfwTP$X?P8u2-=!0Z^AYKjdZdTbstY>*z8TT5_zG|YOqa&cMRaHP{SFT(^>(}Y% z;3nP}`ptW`{bbsLAq&oNt<8J~k@vqC7a?<)SiJ82*)v6%H*qL%hl4W!rz;<%nqpxk zLSQs12B-w3-{kIPAcCn-^dc+houh-yN%Qh>)@LMhD*7zP0r-i1(Qmn1(^6CO3JU6F zXxkP(a9oQT0|GOPH17;4MJK){^DRKCCLrc3*fR_Sssp2uv`+ZT{r+XKw}Jl@us{m{ z$w5r;j#2xc|4n;C2&bp9E3gun+3a0wjN(-MTFeU@Z^+Rv;sB2nVuwCB82DuN=q!1e7L)1QZV@0cR~L z0@4K12NqZOzYMH1u-G4(?{~p|AoS##`uD3^#0;9mJe1}m?$o|kDoN62;y=_>m;C?Y zxq7&Yn385NLuNq_;Gfs~-Mor!Y4iz&8*%D zY8>-$2^9*??Q2K96Rx+9HMIscE}N)LE5<6qr~6C-zxI^0mrCDWq$CBJ2kX1JyZ^w9 zgrxDb{FZ*ZE2(|UDYHAQDFG=!*BuM+EI$7HciXE;&z^y{@EJ>qgqFT_ij!i#nq*G< zA9#(WYWF|znmQ4e^VXq=I}s8VCe{N{Q6eZ)!jvhlpoJm0*8Iche*uC|Z-3L=JPRCp zetr!C61WQyIRI1;HFCoxo>B9CO@r=t=(x)qB{GT-Zh<*FHs_koy|fi`$wXUQfw~lP zGa%qEOf`;V;34?pfw!TqL_JzoPW-%yk$4P=$Do8oat#DJz#bhg`hBOpM&nG1hl#vPskfQ#tENoeZyI)1E#DvT?xpU>Q0^|P@xf1@7#JDdP&R~iEBJjv z`2i4CUK2DE0PH^S=6p9EV21|yucE09=?XO8vep26V2V4nb@F_z%62F2j`R!5;<=;b ztfas*-OmbvXh6-zPbeXQB7axDr+HnQUnfI**2&0-Wj@uA^G>S)+d2M-#_HQkt#R~Ty|~xv@*>)A#F@92*9?B5ye@UWDdpEypDuFlbuX!w9Y}aE zdJ@IsMq1ia{1K>(a6Wod6C^jF`(PX%>j^BaZ_Nn4v!|B-A~NG@I7a$6IW6b2V9&Vp zLIDo^jO=XD=PLgMU{I-8q!OY~DyoTQpXG%MI8tki8|gqmV9OFnXje$%)o`rp*s*s0%uM*)BtVh@OOcuhLv$_tZzD04USci z{@8+04nhpsJnKFnh~~Ht+$aEuE?YOO(yFSBP?n5_Vb8$!*w)vV*=EyeTllTf35_RQ zBAtNCF@X_ka>1-M(7Cr!xWt6*fW&V^1@DUiiNSuTyLPY^p&K|{1o6Z2DiAfIbU==^ zh#x{)PldyU5#V-E>mz9JwrL4{Ep>Ie6+bj6s|LGuP0Y-u5mA(W} zq1_DBf>#aox$5!bYWnE(?B;YSu|aHDNwe*Ec$s$TO!0pQD@F?zWWCymO-H$6F|!yG zAL|v+E%4k#wq;kBPLS544v-Mops+$q`co%tc0#d<@-uGRud)+=sv`f@6%suxH4O^J zJVxPP{l))d#d-a@zL@kW3L}M!>u;d-31Z)j<@K!qS_9H2Yu)SztDfufOl*va(zy}U zFxo)0rdBUA$^5V3<Epi_&q$6{*pzSUf+E28djk(1 zEcg%?Ob2(jD(OGJS6GVl!=hiRO)sw{po}>tYyt$Cb(YVsCE z%}#Lfuvg-MKaL1h{051XClRC*bDZ-Ia4r4v?dF#MsOkT&F5Mv)%^{{ODohmckKuCY z&ZX0*B5oeO@(5v9lv^6zBVP}84;6vM=h2M&$zL zVg!ZEu0clnN|$fDOH@=ey~`FPrJWu{HYFYf@t%ip6z3Cn#;V3Y#SJB%kJ$3#e``wq z%NIBV`zDSLP+a4IdWWJmvjxV|3;?6Hq7VF*huA@9KmCt8+zrvS#Y-CbdMrG^{_Q8n z{oRPNvgT-ZpE0Hw)-Rk>V}O33aAKWqfC%;P2TJl!=O9|4|9>OpPmiT6uX*psKmr0J69 kz8?Rt+W9MjH1WsUoW(-(o%O=VXe5!2DXJ^v%9~&LKU~|T!~g&Q literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/deferred-attach.dia b/vendor/Twisted-10.0.0/doc/core/img/deferred-attach.dia new file mode 100644 index 0000000000000000000000000000000000000000..9e42967292fb80b8a572be5702241b323e7877d8 GIT binary patch literal 2234 zcmV;r2u1fFiwFP!000001MOW~bKABOe)q4?D6bBL`=zekY0sHHb*3|?Jm>9{G6M#|$0hgM{jdb!?|=BTjz=HEERWLUeoPP^kHTc0 zE}~?4KmPykKfl-GA0BSLUqr!o@MoC?>k<5qB)6^m@v0~`-%Tb@PfsX*&VwS&P#is? zJe>R;#BnfzPLuJ&&1m#`fJIOQ_S*JpP!w4-do03H60F1f@hq7Cxy;hXWHGLKZTrpB zIL$^MgZO^@?nC)Ao@|Gi?2U6`>`kx3H0OYyup>QXa~{rgp)ZLb_UCCi6* zzl22*X78%o+Fsm^=epTipkf_l%P2X<%EBP79tK5%zdVWS0cKqT#9aeqT?0h-E7-hji5(YN%D z67Wr>ZW)poGujMuo+imOOctdj4eWF9h5%K8InWhj((qBh+n;X^@5ZHfu3}8Gm!g?%bVFKJ(W zAM~k<>tzXJuirt%Bg`@z_w$;X$BFq%-E1{Y3KyWgzU}g#^f8Md8&BHc9OXhMIaE84 zeJ{Jl)0&`_but&;egO@`PdlI>j+V(fbib%wcqA91IHziU@^?*u;OzqhQ?~+qKyRer6VbrS;Us2Wdr29@8T%J-93W$SsdM5P#N zUSPb8HXQgu7|3qT%DSdPhH7n*1v6iO?EbMRg83@^H`40+UuSXj*Z$|OFE+ip{jJ0F z`ZbDgTlCxPQDg5)qk^+K&ywS4;(EgjX^_w$p`=u8CDc)Dsf#ZFN-9tSpm(CW2ZJC> zw!H^;Il61howaG#mr-Pd2c%1)14D|p#Hue_U-p&%dPHXl`*az*3CtqMS4XPD8DX(0 zf36A^uLD3sdTU?w<_HUEC&U_S(ign}6qled87G$B$~HmVw!eqV>Fv|<%jqq9VmCpg zCAnv5wg|InZ`0lmgNZlV8PBxS2A3!u0HEhVq6?xAPN>(lZ<@(z;MXYM7$AHOx1rf1D7kuoJ{rFpeoVP*Q( zygn@-H1D^ec?tE*yM9IQmUl0rcgwq;MXyiG2hlr--W@raFLHv?|+#kUuQ>Qzh6f2qP+Khs&fmGMrf)f(^E-k0n;N zX>9ZDFOvFiPz0mjqU6u7bn&t7e(|x>&xYkuU9O=ds`VmDcyCzR-Kdz7rrTaGTSv(d zmWqRiaLA9wZZc$cD#{Vjjgk8d5&sMSdJOX-AC9QHFWE! zq3zCKV#TiNcvCV=pNY~iix%bTYlJkb>4#QfjK73tjF6`kQ zhA3uzooQ*xb8evB9N(Q0&*sf*GN*G=4%uuT*5=QXLHY6=dg0`}37Rv5o}Z#OfjZ8k zSNUTzP**m+FrPMotIMh9vg+yV?ar)Mgcx_ZuWh=e#8bM(SZL?o+fG6dMn(XhM8*K# z9TiC1Et3XurbR#L){nuLrCW}aqDJ+QZW*VvMY^r6!8_TyJ3IGlOYGDOtI8ei6s9Bu zOVf*pWmrmZYOal@mQ|p>EUU0>e^H4&{19eYxVWia+>qS8CHH!t9Fn{59^OM>N7lS!8P4Z$A>zArdwII_UPXArkqI zMB-nHM8bn6q8M{qTfkU#D~T}W9Hh0?{j|ucUH-zHhnv0MsPl014=wNK I+-YtA0GiWhJ^%m! literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/deferred-attach.png b/vendor/Twisted-10.0.0/doc/core/img/deferred-attach.png new file mode 100644 index 0000000000000000000000000000000000000000..80500582fa9d2e9da0a309d5338a5c36f2f0bfc8 GIT binary patch literal 9356 zcmZ`GS*@9+Ej@Av-W?jGE|-=FvU{d&J%&+(4Z(@|$UeepB|0%3gk;O=7xgn|e@*V3H= zuf%=jKJZ5ArTWl-j*f0@Mt2$l;e$N9t771vzLo*^K8Gc>Or<3X*}ZxpD9UergR0$z zjbEBoq~eVfvgdU2qOM*XPmb$1^v`Robnkb2c#LxVbprD$IA?#HP9GFuY2c+g_r;7N z%G88v#D-7#jOlv))`6`%r#&3sF+PGPddd8rpIE5=kmis{>bi+vvF3~fkK=xo)rbqc zHz-eIDIg7<4H!zui?lEVEd(M>31x;rTxqO$AfGq<<&Dywh-QTzk^H8D9V*W8(QObQ zi*YS;9`(_}5#)oZW7OKuF&_B*`#*o~=etmpf{wD47=B0mP7~pYYr$>RZ~oyL85v<# zBD<@py&O1CL4PvfMQglqZ(ko}SVNi{DkUSs(#XgN26L;hoId}bf7g@e+djl`%k4I? z>)yS8-*vp|hOjWMk(O6JVEg;PU>MiQ1K;`;0FSh$VZ2yw{X6o{pFhG`L6X<5<>lpF z6Bc&ZT^&)6y@VXS4*}EZi3+2nJ=%;5b#`_RTK@6v)926m`T6Q`JPJ;O1!}LEOkK=O zPNv;M$AQ^8L(9Nf;yO{2o1060=6q9AQ{Cb2nC+h!5~(}%2#bK3+1Xe9`|_JF2#1Hd zy0BVb7cZ}^K3yelF0KP2`Z%~F`NmC$F7j+NR7?8i_@?u4N#k>_7x|8EZc5&ZJ$svT zZAAK4mj;uP;Z>b!(r0uq+e>{D#7#Llxnj#&DTl7~%E^FeLZVFI?kWL`fd#HrtPB;G z3C(OsQa~V85hG;|(ZwP`W@cs~hbyp_=4MluXUS~MM?|n#CQh{Ow8ljiKl1Y4#>GL! z#GcOG?k%|WrbRqne8v6 zxh)?}P}*F+e3=f1%^8DOI=Z?R<>f8+ee2%eStM{jLy z(eo#tYGnJj<(uQFwq7Ix)-d0w(5xb_u<#v=Cvn0L^ymL8f?>MMiaL3SvOqcl z?TKwqz3pUFVB)dVE2P@m|3jYxYGt9P_oO211&co%^1|K(8oDvvhzdUVSy{=hdPO(0 z&aN%t*Duw4ZA$5rFn5gDA+*Ht3P+6l3|VR>iB_)^)fPydrfaM%hCtqxSfNTvN`C(I zC9Y4nZ|+hd>-hcUc`D+PshCS4B}y-87)L?ZVAw(_s-BW!1czP!bNAFg@Q&=C&tz`> zmUwWPrpso$%EKrP5^f8F>bmYE?$Y~E4>4ESLtbPcD-bvVCZDSmw~1^&?ZoQc-na`y zx7b0M&*pz*068PiJqE!c;5%+XzoKfHdJfCIH zV`2iCqk9$tnTdQ^A0!g)zi4v@_h!eZ-8!GZ^(|*9Iu^aIr&#J??B|>Xr61KolAbt0bx9aj;e5JJ z{F_Cvl|Hs!TupQdiw@Y6>9*xkDb! zB4ZI=(_TSCdlB-&20CoDSI6QUR#$EP5}|g(@1w~nhF>Tv#YH~@$?AJEbSekEa@TlG zNh-I~<`tV<@3om4Vzob}Te+Z5S%!FA;Y|&hy!9ol?P)fJCfFg`8Jc_H@!o0|ZLIfS zMC3{I^jYoHZ+@B*?R5(f4E+1PHtkC?Epd%+8XLCLbB?*9T}^MXvs?jUJf<16MxS@_ zDzBb7)R04}J%{?6Y8#*DY@RAZ{c?n38h1nb#|A6C2o57$V*X=8aUQ)y--1Hpp1)cv z-vt6j8aA@YpR^+bjrjd-W<*&ql+iPqBA&Wi!>ULVDDh2V_3ak5b!43h!%Q4siGI;% znvA>I7pV^|W`#e^#%^1wbjHh_(})$~(NmM@RuB66jjG>=xz-2G{Ue8x$J0A}Jul(b)<2 zLZSA(FZ&t5WPh_a5N3})ZPmA&UQJrL4At50T zmnK_6%Ojg_Uph|{v5IG=y0Wv3J++QxJP&5KwA?<#$H%9p?zahN>7kS$5IH_MY-f^~ zIcMb6t5@j{J8!Tk-jbGfc>46Iy?v>Q0whSvb=(9LOf$1~nw4^KP@ zQruOXSn^jqItq|dxpgDn;GDUL>!ljT{n%&cra}%}baZsg%tmK^zXnMhqO3NEL45p3 zb&6t$icsK&y%f-M&Hj|Yh|FCsZ{EIZoRMH@qobqCMp+=rEOzx|IeeXME_Z|JHh${3 z_p2r=YikpA!+L7~snEPql0o0X!uhv*W--!Vao7DKozmk$P0sfp0v1~m%Hkyp_UtbU z)-qnXd^ueE2Nh)ak||Wl_76kgGQrc;a#~9$*y&A6!uYr22k#vh1tJ`)|CZDQ9UmRW zU6Qq_5Q9K2GWB7pC*zB~xmd|>H7V(%gKiWm>8cByLQAv%qlHFbvkB?Q~3uRKV0 z4hx1=JT?bIIepf#CWw#4T@O{4@=0B!B|(4L2a(1=eex3(sq<;FUJJUR`}*sL57YS0 zE640Ekb{LXjiX*&2|X0|ii zWg8;Nyoga+(%9U{D0lf|ri{m@x;k=G6#E$t@%f*#Z1vAaPI^iZ(p-#0*3?b^y$)%J z>{&ooA0Hju?y%K2HxFDNmp$@ri3Pc6qLPw^LGM7qy)Rc=G0P3ShM>_kHKTZ9*}=FJ z3!S?;dJ1M}*zC5nMas+14_4$g?{X8E#A$#kU(ntf%>hM$ZUG=D`0-=rKZCEGK*JES ze~oo=3k~FT|$a22v3W0;^YoM&-Yw@VaNE)m!(*?<< zxlIdnNIk$)JN;Lj0VI?4TOU{E%kF7~qRFMYS&DOi-Z{Cr1Roy~J4e>1&`M%rYl+5I zIb*)d{Z~~V1O(J%r)y_-baWVi0tw-K+eIQ278Y`IbA!{3L?ZQLwPc5UV)F?sYLb%2 zP~5R1Y2(8du6rsFUmtw1YdE~pk#aL4e$2IY&3R~qH+V-tfMM>YgzuBiwcDX242&}} zMb5A2*L;6bQ4v^e09OH+#AP9lNNC~XAxAm9F6wy1wfks}_xORfc0C?-AR_i2%KSpV zM-~=xL?(d={U|Eh4XA&&_U@aU?uxY_qtYCJsd~1yw$|1;o=}*H$?)Xlck z-#F6j0o{^K4IMuS)&hj_a6SqZGFtxZvwB?7j~~^m=%DTWM*=Q_5%G0hb@`UYE6dB0 zH*YRk@sLi!E@LRzveK2C3*#jur@p?*RoG9;`?To(D8D$7Me=$7_x^qZL)}vVbzvm5 zoSj9NKsQG>4*l%#Eald3n26F--a%g7e*=dP!^2M*614>G$H=%(D|>t2GWK-qt;f)c zF*oy6%jI=)NTc^Uqa)(2>I2T5J^Lj$x3>ix_YQELPv(A@Q+SYP&BGNU+MUF(CUr~U zih>~5(>L4WRb5Nd{&SSE{`-h$_!k;aQnG|wl<@A z0!vE#go1hKb@~zBvaE^f0K-S3aU?8-`{_G440?Y#KO-Z9Y%)o8tBC;Cc&3H0kj;_r^jqnf^P;VtoeE$=t&u;bdvesk3IizrX!2`G ziG-38`t#@CGWkvY11IgO8w>`^LLi76znQrl5;&!jjtLm(QfK_Xt%Zlck>YNrbfU1r zI_cl@>N9NN|4#kCG*!G^(Mu|B@3i*xiNm2O$1CW_M1LzzhVd`$iw z90a1-V~T)Px>^PX2I42{)g&I?PW5uOv9Vb##1A@=_PQNu(_tXk$uA z#DMzh>gpbB^GULM3R0IP0jsT?Caz0|42^xq2b-kI2FY2{&X$yrkZ^!U!_-hgR#usZ zws0dvWvFOYTG~O68uNw!_I~ICS8pPO`@uRXK;~`;^6oXHPsgP0K&=7mRnP&Zv!7&7+%cB++_GHmFX@$KGNef zL(9^&Sb$O5>DLFV)fq}OG#ACYTB6FzynaVAZj+$D8RJ^Kyb^gW$jYsYN^$S>7rqEa zOivi!$Xz0U9WF*6?Uz2q=w!;LeE9I$g=OgXoq)UBd}qA}vD~+Wzv@p0`(%j&{IT;* z>G0X!#*D7~W>eb-VS4W<^TrX#K3nbz=9 zgYFRlfsTNEpbV3z>&z@C0l_}nUjfI!fBUaG;LE-TYn8@^ zhQWtBLw(jNpnUeMRbhh`WuBFW#fLYBQPC-FqL z@hW6#X(Q)ll%yYvEGf!(#fIuQjGlTWWDs7F$mYJ?qPTck%Y}OJ>Cl!qCnyo=N}JL=*$k|NZ;-;$q?2*=VL;Jj;9d5Rd3G);C2kCWod4pk zN9Ve>Y)Kgcegpy2mk(k!23as1=g))LN=r)<bU}Qw z%n}di`ES~H_=6x>n&KVIs3~v&vEzqXWnr>LB@(4{iCiX8VYbo%*ESzxw(Jze(#C&V=qLqt`H__{X+5e zL;`_e@_uH*Z|bm3H-rF^(-a(+;W<|VEBW%}OAAk6$J}O#QUK~T+RfY0pU2EEUneuB zW{yeHU*g&G$9wP2Ky$VDYkgCKdn7s$=xU>nQB_x4?&~0Qb#$&oFkHOl*p)6@jM{&$ zebPGPLdkO{F+1B7HS1x*`#vF|G>GJxp-54Zfcc{+b;_(Kbi!DD9wYa(P~>&d!Nm&~ zs+~tl=UU>>+r65AGVq4%S*xftf~OTb+T)QArfzr=zDRhELz;rpFF?S$bv=A`*)@P@ z_!z+vpOEm_)RaWt>Mk~`c;1p?kmYMV~)+{`|6w3`9AVZ)^B_@T&ToK!VvS5F5HFrj7Zw&aHZZuP6cY6J)4iyusEUdTa1iu!wMdJfWp&hXv%3KFi)~K?@WkvLHH1oCDhYb^st1uwFd-VU^$iBUi;D|g`ih9HZ}ws})pf za!@t3_>1F(!ordq!^jTkRIQ(zg0uc<8#wo)L}*r0lJDMzwHJdI<0+2p%nDq*cw5s7H(xMoOkH77sVM3u_P~LRXoIstiymRos-At{Ny^*7S`ThxN`sOW=VQ?!m1Elv~Ab^a*0O4 z2A$5UJkKWp$BS_-%fyo4@NeQ2l($o=BKF#(e+_~v{JL0u?F{8NB-2zU!1$W#K zjgNAG!iSFx#@fHVuIKJlJhx+~n}R!Y`c4?Lhlw*&>6q%}2Tm8vkMfPgK$jMs9_y*f ze)1SJyKJ@essJ%n<8%GDM00aUm5_3?ZK*FOjWc2kr`C%_lOmHPhTul$pE>uR$4vELE8T>@ ze!ZX4vZ-|3=S1@Sj6>3!%V8C#HM%rdNo6Sc{;XLsc1gV(|+;P zA&ZTrd4Usa${GHzP;3NMA>;GJ=}Lj5xq^EjEZqJOMB(rzzRsCG-H}(G70m>IvO#u` zHVS_CPN=#%l5=EIN3j<9rJiX8wY#Ji$t?X#%X8+`Ge^F+L%~md;EgrKFJix_f0ovJ zj7dC0j{W6vcDFa0g_lILRjyoZ<$ol*A_L`=+1z*iPhPa}> zyL#i-{odgRk&L!}LL7=;`H<@?aUpl5T>Jmlq}6TGz~@;{0d6>yb4)&gCeooiX#n8! z!+;zj*@}>6CSpO@&^8=J_@L@Qy6Eied}^#O$=J655Kc1X0`dm4bTa{f8TZrs-5~yD zua3Z-H`en325ZRt9=y~a312a?ZY2|T%X3Vi=kSS$D~`9oa}G zG|&k;w9Iq#Iwi(+=;7Y@T~HYeHVo8cvdj!WAYgBx<=t8G7T)-M=o<><;i@~h_Bzc5 zAk_wssy2^&RX|Kv1`Ex9I{%dbxXT9qt>EB!H^^G@5~pPOGmizn8V$6n#O1fz2tS!8 z6YOVZ{a|igP`C4^ZAotELn&clS`tCokBZ4Se(|z#zO~)uRPkg<$@=0>`cIhJ+px@x z4{TtS9&rLZ$R0@|bXk)0Z%zembzU#7BTOA14RyTKurG60icDDfHU2hX?%Jx8!wIbW zX!~2ndct^-u&YwCoK{;VDKk1Y#vyJ|Wz+l?2M7WOdhed9YB=yZ1_1bV*&3Fy34woG zB>e0|(yLiou^5{w^#i%ITGOSThq*aIRf3cFT- z8IvuzNhhwuaQ$^ca&nayt}9M)-?9DURTmc*z>m9HTTRLx|9;g(6i6{rwe0!8h|9%B!Bm#V*9RJoWk*EP?1nB9yoaZr z?9rXFus_5919B8TH%-9Q2}b-5aUZxG`)QFEJx?mHt6 z*PVq16Ixz=UOW`|JGeXjLQSGK6Zf6|;b9X%I4j*?uPp1V+yVm+khU$ZRU|LmgGcYJ zX(qFaiejykff#Yk84w^Pm(lXSpVf!@`^QB_Q5nutt{;OV;_lr}wmzS55L`_S<=>tpPiBgo69l4_!CaU5Muh<$c@p?SjgWV!L z(9~SKLc@Q@%uAG)on6hwHfqQz-6w)&b~C>G7TYYD@Z(41*h9JtQVbx<=&7lx#{iN8 zNC1c&;DeE=X*JLmfPvJJyeZG0n>!qq-5RhlJ8OfUDpo;El{s|3GdY^@QO0 zEG)Pl?qBVFzRmZ=BxKn@(kCxx0cqjlYQ%;qs|m8ONTJ0L4h5S zBsA-}nKT&*0~>;mBd|Fx> z0^x_t@LM>cl{mmtu?q{}=cz1gTwHP?Sf?z0ni35md zem>@stR!&M}v|V6+fquK zsA$C;6o-yVkR~KY67_*6dwY=p3)*Z3VE4@F(@Z?_GGLp#6%STp<nJz7Y;|O-d+}vFM@#Cg3 zAbsiRVUi+B6+C8}Ko(s@WrIj9FE0lgo?HFSd2}P#LWhpz_nDa}_?A6az>T?8kL$v^ znRfUromRW{fuSL&Wli}%C9>tuAH}1+IW(!$t+%(g<2HLw{0;$lzS{?f_`0pwyvk3? z$?p5x&y2*tK|71CtFBH-G)+xSEhuGNq+7v)hw~@Q_ZwHxLm*T#CwBoJsu-%N{RXmn zeqNr*@ev>bsEuD|fDMr}OfRyS4rTroQPPQEmKMYtT;$?Hh+FOtn!Aw>kCKyH-`7AS;3&55*r8y!6ntagp@;QdZ#q?C=;3f)!^X`|802^FNTumAKZ_Rl-6 zU`3dgR)=kz!Y(kXnJ&rnJmyk4(fPq)B8Q-XxF94Eb0iD9AM=cRwWhcg!OYGXDeJpx z0)7o#dmz4o9UIvT2H|OxMiVZu(x|DSaViYr+7>1VzHgldTyQwij))CM|Iy-iab5E5+s;e}5Fmv?1%JSLs$x@UxK_-op zeM;LsYCze%Bx(0%U;*OO8UXkGYs}d9*oi$Ik95pjLqSH$o%aY^a14tFrs7^S76 z7Pz?Z=+UFEU%!I10nWEk8fCdMP}m>V)HOVM`_eIN3bAsX^RB0|Mz+{9n)-K~W3&XS zt>Fkpz{Ws+tokIE%2!iodkW4bN|jrs!)RqldX{7HXwPMlE&lMyuRtEIJK=R_)%(gE zbn~?#IkT57C(D)c^r+6ue$Bi*J>MyLLG|;*gt4LBhZHPxmo0gc-vFY<{rSnh z-bXVR!@N;QHcJK$L2DhZ-~&JMZk)Zw*lmdwJnO+B6p~`YGow)UDw=jb9Pa$$ymLNg zMI^xSgPzD`=10C>+4wa&$a*g}%yd`}VZ456r}JC?`4{@-fsk!ry!3=mc*AuVW*mk# z>V?r3mtdMk^zDf;SEST(7+Z@d{!m!*I5xV1O_HN^`CRf=vcL|JZ7%|)QieK`2p0!~ zsjb6Ze1N?VF=e*0uvjG?ta)iNIlp2&`J3P^OH^Uu8o);$9v+HWc%@^Ku7SbXU1aw< zaq;gQQ1t=Nw+}I;fQ(w-1>|4o^5tJEJCY11Sy+{B7YUu@=;`mjOV`%Xan_guBC28( zpCavM6%-^NsL6BgTw-#v<64NF-Iwe-K*FVv+I7XnF0iJ>fm#ojPk@C=q5*0`BoZ9^TWrTcr9V{C@xsBlC>_ literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/deferred-process.dia b/vendor/Twisted-10.0.0/doc/core/img/deferred-process.dia new file mode 100644 index 0000000000000000000000000000000000000000..37c5dd36c7f1388e4b5b68a4e883b1b3a1793c2c GIT binary patch literal 2099 zcmV-32+a2%iwFP!000001MOYQa@t4~-SZU`*^);6?v7)pDw&y8s@DPT}52MlJ<0EjNR(9wIz;)JOrAGhR zu4|8UqtW25*YEFlFtbBDy0^J&hhgAM)}iWq_CnnbCie8_Jn+}vY>>3tw43^_AM}6O z?(N{+ef(!I+H^BI=%=LbrM*-^y4%IlUpcx-__WM4Dd;EqcdyN+)k-(==6CPDsxVZ+ zyX3JpH+TKzJ#3+&aA5~?$IIFZlWRf-!XeVrM?y6@qL?mWz zAP0m?G$eppoUm$Em8%xxsULXRY+I{$v7H#rSr4~JWaS|` z;6ILYWYO^yl#kWXE=*5|+aKz8m=n0rIB*GoknkY}STfOM=t6)Prb7fcmyF#&1|*|W z8>Z29HOpc2R#JA;+?HxnG`g`sJZP8FlYeMF>VNmw!BiE9%8~?8BnNyi%(EHY-!%E6 zEjk*VBNICrVB~aG5VW6E8Y*nnqg-D*g{R+j3|Ckxla&Yb~RQ^CIZ@tLL6+;e?l(^ySCKE$M;yA%Ym-;rWZ20s9XX@fm(RICwNxdJ(jC$P?zltq>-j?j(PTv}gi+Q1Y; zUd`~-_q?(4X0bn2yetl5lZ>n5X1Yv2P2SYXS((BKyDp zHgTPQ50?g-QUSbi6;Kdu4XS`Cd-E!Q)m1G-C2iy70%Tl9mMktCK(#-#>pP--9psR7B$XboNijX>Fk*3hWr zOps@-fi+k$ycn(F2c6C^o^8mDv~ISc$yrHZOc1Osf*^ol#GeU*15Sm!7=pOB9d{k5 zjwK9Pg0;*N_xIhLDfJ3e@*oN)85bC zs$2f7CN+v;t(*94beo~^K0^&u$diGi95-5$NFZrM6OuB1qwNwaAquRoUq4g)*$ebn z#}kG;Q5bJsP0}b)pgPu3}cvbA$RA&x#v7H6jCHN1W(LiN#eCs9A6WFjLhesaA?lm+;a{YG-5yu zP2^xcee_ogID<3y`DY$Z%GYNeDK9+vU~qgr3$1EUqIE)v0*PgNP@+Hxxd$Z+o zfZ;YJUTqG)Jv3zttts1;2uiM(etZB^bDO>L>X+8plCbX91QwW6sdo%f8UtB|XW7Z!`*?i!WBNQ5%L&P{4Un{O z17zVA$QpSF*2+U1Fd@leM@MLa~!c!<3y520Fk zXnT;MEj`2l68n0H10veiL&ZLqG`<2oWLh6|Jk$^maWftgFUmu>Rvv0g4{dp9tB1Bc z)TSOP_PM0-73iUkhh8ZU2{Rs&FUmu;?DdM)wOgGZ!T?bV#`X|_H(F*e#I=0}2!yO1 z9xC>^!Nra%;Gx*gDtvNP84;0==wAB-6gLxGR=Y$;YbCn21XoOSJhlfVkBE*6N+LbO z5TxxA-ARJ0u63x6=sKc%Ei+u)OwqMWbgb3|^tKco0)k_)ujmkkv|UA4>~je#b}N1z z(Ot_F8Z}!|MC}q?y}MZ3QgqQzh-GMB(P@Pz+f{VMK9>+(x1`t+-L=foQL{D6szjIE dc|M<+T*kkA@ve99+ZpeA{{wmta#*!^005fz_;dgO literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/deferred-process.png b/vendor/Twisted-10.0.0/doc/core/img/deferred-process.png new file mode 100644 index 0000000000000000000000000000000000000000..d4047eb5ded566c532c2289fe1ca3d0090d0ca4a GIT binary patch literal 10809 zcmbt)Wl&sQv+h6`Gz1Io5FCOBmqCMja0`Us1c$*PxCIYBKyZgagA+VB!Ck`Ou7lip zzwe&9_trU8x9XhzWB2Tyo~|`(_3nQ9Su0XqRUR9Y3=;qVU@N?r(L~5o007A!9U1Y7 z_a3l-5HDP%6tvON(U&&V)&T$-fPxH2+bioR+uQ54&g^sNpOpoIq#8k)S;;r3M)Q{5 zl!SP%O;SPXC`f6f3B()jBp^H_St5{zmC3iy-(=N6AM|smqq&;%Jc3b?gM)BW0T|{O z`sfP6dpi$Lu!%iw&OOz#*4Faz-8~7`n^obPH+QppT1}2CSekJhhK+$|PDJc_5Qx-% zklbD;N$>l}^I-q`z2cOPGE%0tt64)ur2=tgPV2A)Tq3>#R=tUsGBXfwSuk+41oMBW zkSd10xB}s#z^Nk#m!{jr`q;LvC5Gqlwvq#1^VwqcmC*)#i-T$6MP{BM1+iNDM7)1P zDPTTj@M%rbjbV`Nqwy76D9^6CwE6P%Pw{T~;1N}x$rXemZ zY8deq`MHGM?emg21uH(*%4hn0xzJ#bU44V&>Rf`@6gboE|Ax9qhR>{@mmpJLCd zs^#l4)0>-LW#im3D7Ifcc$GIUQAb88zqf}9kGscj?Ol}^Lg(SvUGJefPEKYSEsexbrT+FaQD4*TzOG$fsd-(NH$~A-6MyAw|`eRQH z5)zgHPepK}9=oCl{!9&oA%q?((G{HflIY(eR_+B6bm||yt93vikb^YK<@N=;^^h3| z{*PFvSfn-sDWUF8JOL1n1b0r+^d=I!gRlQDVcyQNO<|z_j~?&e)>!|K_>bR8{mbM3 zcKjdyhZLRy&#^||2Y+ML!zoGo2)2c7zX6p9T|boJqBvV*W<%Pb+W2{;#4QPRD#vwG zkmTMnrKMt8zGW$z0(`_Ehqos{6~@JbDK2^;bXRn`Lz3A$*E;X)@+~GK9hJRvlF*wv z>y8R-2+F4oG{|qRDdFYrW&y34+ueR98>~3BM6LS~gM|xS3KrY=P@Um8x{wgMqos&A z*;$JCgG;2bug91Pbih2T4T33O)q=X&0*}WY$pG( zHkNQU2Jd_>S;v#*0o|Jh{9=lZx!7{5N5gJ9U$J3x?5ZlM3XXeWMjX|GW~LQysvPnX z*+@kALBe&rr~KMHPeOLrUib&b2)RA>)HiucWv~3fN$3zu@OgBF317s9kyqTXAQ9%| z9WsaW4>eNycqvM^UO+#!f7CQnRxR^vZeU#SkH5rQ)F`;FolbJb8DNEL%eYnRpB&3H zlXce+wvw*ZIIYWmC5mR6K8Xskeb3{R&!fG9>_Aka3l^D@D zz`iUlDy)A_-$lMMm^e0!`nod|Nk!&nv0#|_geIYv@G>WB8|Rb%uosaGq77aoE+me*RNldbH$Bq z211aD-2A-YM@`bw(gIGaS=reRu>(~GZ}n=rEO}x{c@bVG)2L86f6YW=5HJEr_-&Ij z-yw#CgMl%*=eDr0AnwXOxPfl0R-~3oMMTtSGhOuix3aDSB7d9lk>&avxBU3=#`KB*MO;i&8%e5XG zJ)t@}I`r~!+wF&bQB%6wW*3Wg)kq}bzRe{MQ-w1tK{S4r8gQ*)Tn3$M&c{R%w z2sv+l4UPjcQRI;iLv5z%=;;wj#(R4Z&u%DRHu@!M@99Rbh{s_`g;RILlx{NFcVTzd z!0r$?!w*qe0v`$mZfX;kOj9+ zsd|N&w$Ku9UM;OR8hF(&DLhzy_V0Xi+qVH*WomPxP4AywZI}@(tp4;z~rN zmYzq9SF$1%DaPHwH-G;c-SpDCzshO88Sa?%a2*1zgk=DahB_A>RSX};+yg$oXb%qQ zIDisqBiwbPAPU8QDvC_=j0Rq-CgPt-s?9(m-AlDxHUnd~=ho=|*p}RNM=>=ABr_{i z!i7fj-_5$*q>>7tG(co=V%zWW#&X<691i_ec`3F>;_JwzPKzj1f%}YnU~$@^a-q=} zD2n^nRGezZL%lWpp5Rm)&DsJT-1`)OA-Ybwl9qs>3oN3tze;+B(2HaWgmlRsH)_9boi*_u{M`NDfk z)9>@nXbqzD2NcgDq{VLOJU3(gV<-uOCa^!2KBQY$9F>LLx?11WA@-dPmY9`E*500w zslNgq9*TNARdOa1Ac{Bv)D|L$R)E&Duwx{%yh|DU=P6 zoHZHoq8;TBs#filw7JgY->_PXnJP-887Pb2bGzs6tYJWvswD^wgQ)J|@nJ1b{=Mzs z-{4VvLr(S|PmdTh*>O*IYMiRcMgi>B5-4Cj2~ASIB=3iXvk7{34*Sm=s-?eYdo1(! zrpvYVCksj|D&nrM)5G3}Iwqpsc!!pY{yNjs-W;yTw}}CxIP|4J+6h~SK7zU7vM{^^ zqBE6A0nO&W8I^`CjA4NN<@B-K>VjIfp;o7LhZdiZIyc#x6V=%OantTAE*`Q7rGkw(Jpp9Vf=(LlxycsA9pw!qWYnd%gk7fP;v= zYo>H;ZXFMWDZkcf?~;1|l3iElnxU%I^(4gSv!(3p?FnlsVTzB-VaKpaUZ#sY>uM=c!#g) z$U^O6J9wPGq9UE)KezD{t>9BYxMGZ4$jxf+^-=TIEBnQ-ChcFYE2{l!idRRV7{MQv z+J?X_%^2hP@fI%xirS)nQHAYh7G=g~d7*&$;FSxJbp$zCbo9ll8j6@muc_61Zt#U- zoSI2Dzv%3E4SXmD!9#@A@LKY4d9GKwP`c9Y>tC+avK`2Xmx5&Gp44W#9UF;#6k&}aLJ=NJAbWrD~fD)qMU@JM5ho_KW^i) zN!(UvAiZIra}4H|AwN^j2RJUKh%aR{dDa)*8KpNx*94yTu_GI-afH&b245i zaWitLYspGBYdt?VWLgdy1(*a-!msmeveYJFetT6gDy9o0v8(!mY4ShAA4~We=^gv;<I9sP657%9kt@Yi6FGr)_$3Uv-2e#yTK2L-8lkfGa!lipCF07BeZv^ z$VlV`g1;7rJ{R9gba*Orv$B=yaw5ad28Q%}mLGe7m>9I#aV{>eT~tkw#ieq&v+k~D ztGO4ycb{$AoS;T;bi>ek`8N<16;_%jvGSmRheNpDF!wDZYPPc0xxmY5`a>m`%Nu1z zc9V*4uIKot!K>;uL6U0xU{)ytbB-3EG#=h;`{dhxa$J-Olh|P7H6JDcBnhF?-pN-?Y^wfA=PH_BQ*k|%BAA{q zr7I#mfHm+)ni~h(fzQplATQE9QqgCh026bfjF6SpRuO98b{KKIQZ7L$B30ZA0nN9Q z*MHtx@l3hUA=<`wt1WF_)_fcO$tNz2`Xr!EBG8BKSV+ft*np%6!$(e}gb(s~br9Wyd3thK3;c zbwUERXUnUPA8`rjeK-Nz1ea`|P4&GU3Mo=kK0dA-(0}TQm6r1&xi885L*W`by@N*7RsZ%jEr#NdWQ*OG2Uq*99Q;r~G>miEienY}-v!3gC{Z7n(R%0qyWPV7ms*k?qlYw6HKHD^Z`cy8s!QKMtx;#^}Val`_<^Z74qv27R&!bR#^koIq|XW zPMOgNAlf)fk$j^9p~``p7iO}r@Q5>`ICYIvmn-5m%orjX?euRZ@oI7d*4n8xYNBRm??K0W;^t_h&p>0Z-$<(Y0uQwt@i$x$u32a1 zutVes0}L^<7ZZPZNSib)Sy`Ks2M#vr`FW0g35?~Q)3c~Q8pRGU=DU8r6GpD-Fni2T zMN6y3#eB-ZwN2br?JowCY3`n>8||_fZ>3n=Y@$$4ejO zxiYK!9x-Um9XCi;yVd_H9b9TYMuBWS%f29zh>_`LXQ#KCEG-s8jRTOm&3y)0GpDi# z$qnO;D=O`{E{wZJ?emrQRj3>ZC|K){3Xc5nrdT8&%R%5_y#~zPF$KF@y}MeRhx0)&7?}L0RonIU5EDIr0$k*fdfGDP zt0O80I}EI06tnqJMyAW&F(RF{jFpFQ1>Z3xLrLhJPY}P4N2>Ik@@W(24pXT6M2DFF z9S{Q}BIZ|>vDdBnuYT~S^|CYDMq2ndcfeC)ZxcoyM%^osGTb@1_nhc`Bj*mwv-@-v zCE(#4D-63g+mnuW`XW2#qU`)w#3MH$rgPe;)r?q%sDfD zZl!(feZM;G%;y|b3}HbfF1Lk&n`(J}D;kPnfRMlR3$Z{0Ag;lxmvM8tPXj-muC4Ukpp z&#vO(2fH7fPDl@q_O;)9Joo?~F4RrE7G14JbVb?)#i?)J;&lZWwv_l^seej>GLno4%(AX(RVRhrHN+}ZJZbVo<#;f;sBbBo+} zxqa$|N8Mb-A5=qK_rsZ(hPo>BBckf;SI(#UeW=G^chjS}8L*5PM;-&4rkPNh8BrFf z$X|30;^0Z^>^xIh9YuJ)Tm@&Pp1b|c`Zs#$rMAYOtYx8|lHF9CySucpPI0OgMAq#6 zsIkb&zsVJnj4%{X{L|E1H$UUKZ}r@KDl$uh$JSl7?cikIMaH*r+X21cg4ddLXJPWn!Ieh+yO zIlzV0b;_x5Da{V(u*=-CYL)Hp3_hrtHC%L>#AxVPJ1;*U$R+P&v>ENX4SL+X|09Zi z_y=oY0UcXb^%bL9Ke1^9lYeqbo~gvLH%mwmNTD)li9a9i5JAjuhf5e5g^73pEqm(6KQ1-yqbjAY2R!S(YC3zoflZ?Uggs}KZ^H~)Z?@1?JV5v52c|*}r^5@nme}Kr(JZuLNMQD1!`G=q zsQ)yz-X)jnQsADBd_NNsgP*Agti%>=;uQpliY{2F$ACDlSe|FWJig@!jC&vTgd!2- ztl+;7X3W>o)dj^T$Go1L6T{mfagoxJZYW*U+9|f&_j8&wfhA-mD9eaodG2evPi&n@ zP=!;CXZw$7u@Pad_;4a!{vtq8cjGH8(v$`E4G>5&sSa+7`rRNp)e^jv%uof_(S4Wl5!&*S#@HY?me7F6j5RjC>7C81ABpn8wlt( zA)Q&aqFpnDWf)CNASq1gux6rlJHm;ge(~4x{O{r^W-(}yDCfpIiT{o*h{+o9gtoB& zroFl=h%fQ=KlX*;WstQNTgS_XeW+#x(Zq&z+~$91>67wY%ahj?fx)icY}_+(vy zE9dCZnGa5^i3MRTgjksJfbU}SH*|_`pj(Dpj^6KCOeI}vXo!D!cCW>h=f z*Ff-1>d72!E9H-q7N)@7`o|YiKer6jFd4R`c0v#O4_x#bYwN%4wzYW!_$KTfJ6qiK z?9zpW4LsbITbuzseC8>sigxB-8S~Zbb3tbzsQozhq>FXp9`*ILcElb>J!%r&UOtUl z7k>vwLTJo(Ro}dMe}N^Km=lLVP!^x|R(3qc#oe;d+g0D6cnKu{UcADCai3COUC~z5 zvykW+y2{VxxDJsEx$nPDv45;e#6MgLH~v}U$%%>iqjpdCcXo6-Z=nF&VhVEL@Kkga;%UjCi_DdV2-lzlX>7ef++M{o8H zL4fdDtq)M={TcrGC~Lz~S2xkou4^amtztrTe(?!<^4=Yv?#gp-94_dVCo94 zpcucbzwI6AU2=SW+PG$!S--e~!eK?<(1W`We0L~n6LI*5MS`$|z8)Jg#_Tp>wT}FT;W@LFbu!4c#fgX4Tu|;NNeD{G9#|s z!FS6%eYcYTbI5YJ-@ErNbuvg70KnM!=PUrCK1Oat#=6Ju957T&)p^;-S^q(5^Fogo zr8AWWyd$z-UcCGjbOQxWi`a0usJkpqWjzW6ZmAbD}!ck4OWrw}Z}!E?>T0M~sX2R_N=sksydFN&WQQVxkPpG-G{K zGrRLyA&%YOcKm9L)FI#Y_X_gUCuVT;4SuJ;0F0z=Jl@oLaJaY{i*?iuyzg(6 zUtHx|Q^r&S^BfQ9b65=>+j4FA`dO#e5x*n}tTH~kUcyD$-f5wp<=`UZo8(Vlak)lN ze%S@v%Xfl$8yaz_(~TUjD0^zmncwyo#Bbj1nsW&@vc4n45fn0;7y{{N^9z}u3$Uy$ zle!e2Wb25rTeD2qBCXG`Z2v`|W1CI$pdIbL(%(_PFTySW$qSD9pY(EHEs6uY8ncrx z45TZ8DH)cd)UuXJr?I?)UX_c{f=}`*eDlvzin~bU#oKLfMFqes77}rSX|SV8I->rLGEqbNlt+PTDm$t*r_>PKE1s4Aa1L3L2Bi&;{HMsz;-30tZe%tebp}w z5HIf3$x%Q)_3`A|0i~70b&|kI!ne_7@k=X$v9RI^m_4p9qE)F3MYdgD)q&a}Rsz9M z=x5hUeZ@^*{_baU?bC9iA5Yw1!gp0w+E_+B)^^%=#Th0**1_*xehW?INXU#P<>eYb z2Hn_Io9|8SZ~ZAho9i6j=8cM;EW%b)Oa#nqVA3t)W$d|4XQTG z+Ntf|ED1eXsKy6TZH>oyR`4VVG0@8%pBjB+jeO>(0{XukCAR zpne?oBC(@4G~%5xU*EDwk!0daGqOw@g{4xZ?Tbu^Z1I z0k-@E_lSxs`cR1u(I_axnya7<)YKfbPaS<+0=1e-IJX!eAW%L;EhX3GVSro*cV#OS{d``5l z)O=2DW_}N66iv$z%bU-)f~lHkV`?ElP-)1RzEt@l>MI;VXo6+4aJ* zwpmaMlOhBf)x*1hqS9dc@L_^h4HvkFJtuHWBg#2yiYeAgb-+qiMxPIC!Gg$dnUafG zU4MKReoYF1HVSpH!w|Ri zu+J$Ni$p(&|8iV{yiA+Ah)EHl19{MGXJ*1SWaI~PQQPnu6HhG?l3bHoz^(N718(B zHIHt7QE8qZ?u-7;izXkpe(QX8k5*}r=eoN;jw7+2`z&*FEa>*W$P^8*>eqDt$PIAn zjJ><~bg0B=Ki=w4KOPAfWPqtv`PNl?|F z_z#XNJqVJp^))xQi#yW#2xqNLl2Lzl`~C+zLxCZmHi`YGptBbY0(NS-^Wq8{3UwZ5 z2nGS+HhN~Thy%~r8Ng+Up5raI8izRCkk}R={G)KldRc!sFUVB2;)0^(PleU?hkB=y zcIeyA+8z{|xS{?d;?BO(VuufIlHm?=I}vg`hyHFZoPC;B$Y};FXw{zwP=xt||H{gf z5~|Z%uf_yxAZ=>An&l&y(5_zRWZj8n`3@)qce}#rIyfzniT!JON}>YwF=NbsMdT3U z2LVS>Sl`Zu|4k0T(34s6d$Ep=P>ij1B4cG>*xTz59KHOi^@B;P7~RI-u|FQsdP)f`qiIcJhCY!f>-)dA;cNySj=CDe7Xc z;3WZMX8HAEnu`~L%VfI9BW3)u!4&#m9C-ZZ*|;+GP>AS!hY{4Qpn->WkV%tcUnBwM zrJY)@r>CHw0j5NoqzQ0L=pXXwVXE zX-i8kN!Rhq{`NgXQn4kGGDXhNRx%FSg!FK}8SS#pPX*M9cUld4GBtPyf8mR(H$UX;rE&H(jNxEc!c6-=BVW zRsNiw)zzGB)U&7V+xRxgwyV9lUF8cXQas%56ltl){{Kmtx>OE@me(J?`#mX&B>S#v zR(TIrv>h1KPMlW9?R?^6*kXK^-*evE!ueLj6O ztoSrq%-)}V#G9{YyF*YJkk8Khxm`VeM~oOHbRrQESjg?J?|#!Zn9Iv;lnnY2FR#<& z3ldco5(cDmijh{7Oo&068`cjwNt2t4=_*_98eP|&ETKxh-tFiYoyJIDYz6?#lg0JC z*n_~3L$Si5mh6B#99ev z<+Vo2w*zy@mN{Q)1Ml!6&a%~gyFAl@HD9F3@_K*c^-zds zUr$E_D_0=CplSJ5Wskp!=AfI_?*mmVR-lUPG=Oe zRUyu`0&15^OXgQz4doUIOTx`L1V%wd_EeJwkTvKd5gm1+CDWhQc?+*ac9 zkL0hrBro9KCpSqkU(H%_n0xXOZP};QvS>oK_v<3RuI{o0DCgM_oE_zO&K&ds*_5|~ zWUdcfnfPu3e3}u*<7nODMKjl4ACZO-WcNQDlo)H>3|_5C*X!ML_8;} z7%&1v&V(W<3@aHzSX?Ih;*LoW(>P57LkwexW=ZqtLrmqj!w@Z*H8nW1Mv+$1Ol-y% zd_bQJ0r3S9WiE|zEYAViP$ElY!-VhLT3bKc({RwkP%R$flI zPOY`@hjx{j@2@P~$U`7SO$mdgA!UBel8JRUIOQWH<~KXpXatoSI&9{IJY3|4)gkgr zHlHP~<+sH{HiJE6iFlc)B@p2n&-pe<1)G$E_SQqzh!R~{Bzz1$GCUDa=gCXB$UlC( zOc#G`PFFi#SGi5(Lb}aoFir4L`N~>qJA4FDH345)AgzV6hK0iQ>*tE=q=@Xqw39b^ zl0>}%@P+Zwa*D_ zM7f{}I~UyshP-cJ*mcs0up$CT3PMmjr$CU9YXyjOR_IEzdmKKC4%FeId?kiaE9x5x$p6NRYszV}3HHycISXbMFo(oFEz z=52Hczc&>l_%j=gm)`vu>C>MP1N<2#U&f!I*ug@-GE^DCpW#;f&7(gf5!Qqqk3X}@ zf-f_8tzWaSh17}9EGr$pcby)MC8XUXzR=H?Z}J>_(cD@#e@qR|A^P`(foQ6pQP~T68@f^{CSdxk>j2| zO`FVxRIbVV{&=Md;bMHa{BQy(zf@tzhlvsviBevU4{Jh|y*8AG@!@Z1e7I5If>Ys@DTdEn zHDKba4=0IVrcg*>!7UP2yv(TrL+Dau6c0JoZ-y$`BrZ4=2T_HcL4UiU92<9ftwf>f zc}3afl$OOBgxMX=7*Z4u+$Cs-U-o$;x~fKT9)VXvVEZ1*PL`&chU#3@#vxkwirl$vpApcB!(N`AU5UCLFDw5 zaQf1Tc1S0TDrq%JODxBu6RK)dVi$EYWXfxBVFtJ2AUbi^`R;TgM`9DQafZq(CzF;% za^6A|kL2Y}&5d(4mx%E3)m#EmK2>vNJ=O8m+(qYunvbUDF2akE)x5grBZ6Sq85h_C zo`f7bg5dV_Lird$$hs9HM-VPMA0mhlK^$oW!H6?D5r_~yb^u`m1^pO5NTlrL$l-&- zo(H!gL=Uc2oVr%=af63i#n-Vzt>Pnx4i0-BR6IluFA_O0XFQ;gBBKWm3Q74SZZK6F zMh+Vs_B=!lA!<0*sDV4A5`#4#J8V!$iyDtW#@xi5h4fIYA&3s`M9CO zTFuw7!&=Qp4jx=~KB#$!9u9~ec5gI?7Y7o9+@2HIg%%pd<7neXG5b2*kr3L3Az6i| zwBYCVIL7i?BRL4ajS9SVxP=6Ey!oLJ4eGz6{7Uo(X;u9FYTpZFSLHj{^}~?%Df{!P z-Zm|_NzHqxta7Qc93#f?cq$8cRO3gL<#qxf9#LfncQ~kQP}xIORy$Q$h6Enb$^sr2 zPpm8#h~Y7-qPar4A6wkwCssE)C#4hNO>qO!Y@@YkcV z9J^H6(H!GiW!H}JT4l#`j9qm&sBBQ#LsgbIRoSr|Aq8jp!`_YJX52k9YJuXO{$gZkWFc(7UC-iUC|R)mLE-MAg0 zhd{A$Q$n}O-Ly5~zRd}H*MHG4A7tMKg8hLyys!gs8aFF^e!If$oz`W8)bHD{P_=GY z*t%t*L#ksgsgBzv_jo+2vv>?Yp*rvugcz6V8v4Aa@KARqP+g$97o<8hjOz4@QXO|f zb?cpz$MWhllKiPxXU~!!+pBBub9>>h!K)h@PGYGYO4F< z)vc*+Os}rJ&wC1g1zugCx^I^1wz=w1sv|E-b#A723M5C()d3#22ZI0}*Tx>lYy_3o zX;M;|aj34n&mM=+0@Zbf*j>D}Z|PRx($|;_RKwgxJT8|ims~oU+i1Cz@)D0@RdOj- z)+sZZs8Ol3o7d=SXfsf0pwe$B*{FuOr6W_Rb~?FoJhzl8BA&XXLTNFsTiV{|J;WP^ zkOP+nF72Gn6vM`->9}0#bfWr*ZYd**K6OilCVE`Iw7t)JxHODW2QCd<+Bsb+h7E%E z<-V|K@#`$VOqno&Yf&1s*vPH+jddj_el8daI7^LJEYRa0BK)%bV%h*kxP5r(yQj( zm}Q?w(+}d=*V7T9F;00dN`U}=Bg|ksa<(T%5wbN#X^BuAIU8&+gMi7Wd_pk93SOfT zYh##U6N99Z9DI#(<2+@NnUee%e2wBRTjs&n80c%X2wZSA1|DE~*fMM3WOD!7G%G{} zmEKdtR3jrRI85a^6NqTdfl`#l)c9Xy?W()o)>unKC?)>aSyRN7lIGagCbyYLCsVr_ zMOa7;KB$s}5&x2I5H*+s9^ssTsB@$#vp%q~*ZuLEt>Lpa^AM}qn8*W5Hf=xsdoLq* zx1SvodVSCut>^P;y*^O8HuNBQ><8LpN;?|UK5%UY^*NKe-*h*d)dMKT&yKpn$&_nK zkp`OHjct8V5a7`Yj~K12!!C^}kHV6ij9_eC^|za~wJ9)a)Cks=aXQ3Vd38JfVSIJ< zwK@+@Ciqf;sAOzn-5|4a(X5T(4z&KX*U+6+@+HJ8HmQ8^FERgz`N?4{QD-fpGLpV%d;4zbW1V!~2 zB3wo{Niko|qO7`;M>7arCida$1g_Rx6jh(|gf=}3O^B!SB#)+Xnp(4#+vkhb_S4ZD z6whYAt_!_E=1Fs`Jkya5+hWe+xeYT zF*|3TlD^0z^VEOl$ucVD3D8r1mln|?hkwPhhe>q3_&bRfMNKre$S&Ee>Btg|EeNuV zpvd~xTgztKPXYGzHGKByjeYtIe)ravkM0-6yaan}3{+>Y{uwXQyDWM4&i?v`XmJ(A zUx+Zu;zbU`n0%flw{|3K66H|SbYAruT3)y3@!942R`kPcmE_Csiyx|59xJIH2KcNT zo&mhN4eQ!%#p@!DNgc(H^k zs&cp4YMSKvD$6SbX!zZ-8Y7&o+f&()8fCV+uUl^4njIUq&8a(^Z^2asq0V?2r>kq* z#5l?mpzqv1^?mg&B81Sp@2j$x=?bbr(Z7~?0U}b?T@|W+Gl}k#s^mNd(OJLb%7o=8 q*rDEbsF}}#A?jv`s_Qg0RaGw8fTCI literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/deferred.png b/vendor/Twisted-10.0.0/doc/core/img/deferred.png new file mode 100644 index 0000000000000000000000000000000000000000..069d1d5fe00f3335add16af0e80bae409a0e81d9 GIT binary patch literal 33282 zcmd?RXHb*f_cj_p1VI6nA{|7fgMfh4AVn1EEtCL{RFP)r9Tc!o0wP6v?+~PiZi7&y zcL*TF(2F2NO5p4~zyI6j{qWBDaAwY#GsBEAxwG%R*S*TMuC-R)>1wNAp<unOp>O)etluev5#jHS z2b49?#PCs4!V9MFG1$$tcbD!wW7eRWh7En*OU}9Z$hD*~44rElSL}G_Z3<(;G|U`+ z?Q#r+1o6(0F(MBC?ajcl$M$yY(E448UQj~DA>K=DZ8!}CZXKhXQIf5O0wpktP;~y_O^OTp+A-CQksgzZ6Bfv2dV-Fs zz?EspZj@kAoL7NcGaz$Cct;Ql;o;Mh_Jy%SvCC=Dbt80QmxzcD!ot z>bOrH$6hPIv#va0;FbE8oqAA`noB0Iool3=zBoj(I)G$8z)uBUwxRti5DEEv6PN3+ zmq-53wTd1HGPB<)LCx?7Acq9igEP4I<_0rN( zZ5{;Tu~vSr@b_ofYUj(fq49C=ewpQb;&lg(>$gfe>6S?9pxlQ}YAw1Z-;bSF=JE&}W$U)<-0!=G zBqzP1ve1n3S0lvlOS8z{f+%?#Dwl~vAXCY`vLc~yAZ}Vz^Hx)BUa_nV2080g**c6Q z<+$;gve0F=sm`L=`^uFoeSLk7m{+!hb*uB~kaNNaGIy=z_g89l&LGcez0YJ`a7g#G zV^IdGX{{8jL)9?rJyUJh3)ayGirT$4UAOMHoRREJZ?rJcekRE*-HUbyUgm$++G7L> zt(f8+8OCVwi-wRjbm*I1O6w${XgjSY@?E5?$j@(LpX!}goCnIweLeFWWVV!hjuac0 zMab@kMN|8J;+0XC^|YOGf9S~zGwm}mMg}n-ev^4rt2Fl>0X>}E4wl}e6Hs??*$ksq z{OC>pzL*pHtMLU_o0oOK<$UvO(l~Hc7f)3L$9%@ z54S_kpBqZiU1)7u=;@1T?qBBS<};=a8Hr#q-*G1e(HwUNb&NQ^wDhF0cgvlgQ0KWw z$`Bg8?%B!g0G4g5tF8T|fGce_^fKPD81nJUScU9HG>1WeMazoNvHDw*@aba6tqnND zcs7;dcmLvIMXlQ8H~B(aZsJG>B}2|m6%Mf;pF>G>6!s0POf==#xEz4idU|b4>QU}2 zL(h-$KWMMbG;FsX&Nch}S*i^qbq|X+FIYjSQW|$E{EAa)^4@H6o~q_N{YlXpaM-pO zDX4h*`&+FFnR{q-nf{B%ayNx#5ymg9c;apBuw=CIFZS2QBpmwXce{9?XMb_5ZY_uj z3mAzZ#Y{??!Yy#v2 zgg)6Ga{iKxqCZn1ZQdi4SKQTbjpb8NQV=s{H?zzsX+uP#71i`(Fd+Q-KVt}Kuui7? zulMTx=`tQu)pkr4o_fpu8RvUN(2N&fFP&^PZ;zH3q@||PtEo9FY3zOPwdrMae;Lgh z>Tpm!Pq%3DS+U{MCeAugooiThs)lb0yDK!nE)>fxR5l`bKsoj~10meJSwFMcFyCg~ z5&8JTo&BxZfStvT<;;LBk>(9|AVdKTK^H-uMy;I%(wjGN$AaghUV)_4Gm1kUI}3&b z`?I;*O$U{{67$r02VHvFnpeLR*Vjv}N66vK>gR(`5V%fZtF}{E`|()L8;&OXzSOL< zy`p1$GF(N!S0MHHip-E;NoMgIJ2yM zht6dDJXzQnG$e>W$>)c=x?Sv-9sc^lE|xdg+r3Lkf{^2Y@MtdF#h+*CygtM03FUwJ zLnHMB5>r!C_dN2Kp+K~`S=^G6lI*gcpQsFl>Ro4>)+^i3fOs5TUHfqhVyMfwLi3y8p>!h=$Evp?QA#t_N(;9mV?9#Yvmk)}2mP z@Hva8&Ss@bQM&I$-&^O05!qd6flP-+xmI%FFCM6X$7-9VdWGBK$2Feb(6s64X;Pz` z3QUttUQTyV5yVbLaZTus* z-QqE2m)!SJEYDD2e`N?a?NmvaJ8Xl&U=vtuV6SikC3ipc{8Y@QlTV}O++fJav~B58 zkzY56D~D?fOHHD!-Yt|C)yt$^8jlW=k4YhliyZIMlyVOhK`)PS71sij*NDC z)DdWQ&nLgGZvXc;T9}Zo5M!4A8vmi^I!o4|mxhzZ$-$;zbF7RSJ2c?qmqM+y=g*(_ z%dVFL6>fu7TH?u5)6(49j_kR=$A+BR%#nXaMn|dAQ$?uI$b6UJk&=?Kbnm|H<>eI% zJwwZdb$Z3nUeo6=I|9+~xT0b17%v(!RrPsqbp%#!g7{2A5qL5)Z;5{Kltqk`S*=s; z=GxkttI0!yRAm^2G=?PG|a+_p**XchJbo44D*-w6$?l+dP0s8Ftv4F7nH z65if?>TU&oVrzBn=gBhZ_mK0^CdJ8E%+%XuXJ5;KCu#KF!70~i{ct%aq$;GAM z74_d-WE#s_a`BB=5K7G5TE~iXvPQJ!gu!+;ZsH1GAKw;G0YT!5G?ptI4Wd%@pFwi2 zuxICw)!k#8Jd(?AoM-q;hjW08; z1>uQ~hd3>PpGQ=Wo`4niC#*z^f0D9vp&t+Vg+D^8N2rEG3Nz_&i^M7%Ovc}SasZ58 zqW?Y**hzVw5wp^x4(lrTmx?mnvfNJqE4cERCG3SPTYQ0Wo^ScIr-5VA{H~!_x`C(K zvP-rwwz8_UbpNx|SGZXEyu(^-lP+7ilbo)z+_Z*K1dd)uHD`K_o37nv5;n6{z;G11 z12FWGA*us>p#9hAh12urvYspd+!dkZzQ7dYz{oQlf_&4_nOjg$kdu?s))ovrVk9{W z3k%TJE11{lAE}Z~lb@b~*lk(wY^tK7GQ9vf`kZJ0dBvZofu|B#WgJv5tG*7iczDlk zPM%xU$!Q&6GW%IEYtB@aKyQRPc;olNw}|znpFqLby(gb8eUo}Dw)@WdXrs1G{^-{S zx0apGWWE0@?*@>p7F*Z ztny&X%5O8grynoBp$AqiqU^uZ!Bnw^q+2BVk5}Z>Y}7xppzT2?2g+{&fHu?vDXl~V z9eAv$cq-S&@kgHGWy_ueu`jZt*VEqh(kSfda(`#{!RDA;486?E^fCy935QI-!m*=? zW}j`Kps&K62WL#KQS@RF9$oU`7&I{^p#zETg8>s_dfM-v%X z^+mT2z*#IQhiV)K0K9lJ7nGg)9&uI4!{ZMKNcA(Wa@?xGiBHTZM~1NQ5`Uz$nfnp83qNkqil-+y&~i0L3Rydf^)Nm!r_S($05^!oa*YitW_ z>^I8-ehP$l;qdza#@N5^ZXFQ3kOIRCL@2S_r+MGOI8^&B&TnLb7U zIGA?NQ6IfDpLKq^Y^)sw)V6{jND`cxUK#5#pitSe+w=2FSg+rqI`nu5*rN3;?X$+e zi&4GoO~A<)W%`$TJ-|u~$n%7+j91Kb#;}ecuXc%vi{t}+9BTG)<5tblz5nNj1QeuP zd<8@&ru>$S=Kklk!Jx%=yy0|K$HU3>&QsRHzYp8bgSz6l$1hEl81U|*{6Ju5P>yDo zJD`C6eEto)GJpf4x zZg9sr*}?-$D=LUccedGwNqKZ^%;Lota;FJYOpqX0MEHnI7MqNV+abao4-m-xzRD@11&=!yDayb-g!MTpuf&$ct4p6JoqR zy9nG=1YKQS-OKS*X}3&;Z6vESvW2zz()Bx(+^ztAbch3Sp7i^)8MI}ZN~PX_G_br3 z)#!P!?2%tkfIpaV+a4(z0EyV(;NXiRd~*BIQrfpSv^vK!UovF9fM$=6Uo=}MKI*lS zqNV>oCe|eyXiI6{O^KkTw1rVbDk-T9) z1>E}S%>0@6T#Fwt%3*-|c(qb%XIz_2_h)NW$}X%$n+q|zHLVtcj4v3tE0COg1c9Oo z!{H>Q#5LcA*008RCeKg=7WMXA%~DXtei55(`()!cFe%@#bpZCB>E6R1cJYc??Xw}^ zGeE%D0Os`^MD5=eVsbRHE7`ojozzWOURS;+DXEny?*}3@@Aku@E;R5aL+=2-0`g*t z>JUVs7aNx|PZpWlpUHzeuMN|1<&|HC1#f2ZncvrtY2$9>m8F6)&5VwE0xS$vsj8%e zsIdS8;X9bDu!Dni_u`}f?k2Yjh4V2+;rT)OBEQ}8r!$sw9=OiE#Zo2F*1cg6Rsj~Q z!Xf3(z@_paTOL7DcHLe>bI6a_M&c=t>df%gK?Z*TGC1B}_t~tQaGk2gM@#gZRNHok zU%IaUxZ$ye;0v& zzq>iO`+Z2i8svd)Aty7y@Z4Nn*rCC|;-T|puu8*!UC{a$y+k3u+0#6exdyf=frAkL ze)sZ4nITsYK!Fe2oNqq|j`T1>G;pB^N>~Mk4D#d3hxesY?D4Zlf0l_c!o1$O5y%lh zGumC0;>mUkNHq4d!)W}0k^=_gn5cJ)-`{~mhrg5P#9i$K7^6mtbcnVBFtVi(;{eEK zZa+y%_nGs1u-)CAN2D^w%S{1}fZ`Jjdi)MST)4S?EYKh@4}!lsyVocH$N`g2-?Sb~ zJI$XiNsrK6DdyU8RsJJN zC@g$P2{OI=z|n&QXa2Ld$J$z1C8M(E=Cl!|9v{63Aj@9RI$Zeq_^^rf*VnbNGST+a z-PMsIprRRIo8UY=52N4u4jFHNOnVw-(fs=LMZtZR-vc3Mf11`&0pQU@c39{99vo7q zokL*Q`<%_xz2~xk$l8HAD9g9adWWFT&rWn?@Cmg%qykjT4a&9k^>aYl6L62dkn@vy zkW*WD4yggfE|6?882>ncP9mpfJpIKBO_6jmE2(Ym-dyVUo|!$04N3o4X$dB*AT>+c~9ol1_v#?q)>fBGi+CFuzJsVNmI<` zHTrb7p4GMZ^ep(Z{KL;(4a5y3gKP*F3l#~U1iNwr*~4`md9lrG@xJFa(A8U2-)J1) z#4AKGUgB1@U|(f?bb(pA5=29B$KiaCs{ul*57@z?@wSfHg<-sitf6!5s z0d5M2u}GJX80tZMzpMp;Fspa=rAW^Q{sAy9a4E$hoQ{I+#b=Os66z|0gg3Jxkh2>k zJkp8)_wL{@oG(6;49Ne{oIqI=X{PLgyfDm@8shhWU7A9pU|2Nds28|x;O49A>xsze z4CEGBC2|@hkhU(g=^-(z{{03T2)s@Noz+nfYzv^nFdA7t;%#0Im8M%bjhD*+;6p37 z@hKUgo+zu+MHP^qHMA2dNQYg5v%07udU1%F;<`Goo&^4&P z?chO}vQ}P&WXBrgfA}p#Fm*;V*ZlK^l>7S*hBE}i?gP?M-p%aATCeqSPznj9*TwY3 zF$fw1ft-G{L7IO0=L_(wXTLt^^ruJ^eI0g@&b8K{8QA2*v7K5-D>fvo7>%!0~Dk(MjNYlc~A2 z74}E}!=}k?W|t=OSlP>mk#=1bcTG(avG5;KW9mzhP^YW{1lcSwJq(+N=JbQxB31;n zalv4w=cbqHz$%_;%TU<7LB2QjSxAKh%%U1l)}4c~X~-+AQkoy{J_WUdEWkEo)JDHm8N>{xM@f<8oFK<8FuLQgCBkWH^(18Hi^rv5rNGa5Yy^35W*E$I4 zVf@`{EVI|7r{Vph&eEgzdK{IwtNt9|?1|?F;72%71CageAihX>t_*_j(PqfsGQZR4 zm6x!Q`2$k#@$StE#FHfS>^w&e6ukW*XF>hE+RbrnZ4i6S*5e z&nmwKPS6fycFh~rPo^J$3)DMf`aq`d1u!TvalWp+w?45AstmHgt&h-9LE;z|DvcCz z(Gss|If2Knm;mmNuf1Mp!qgscnLsve_=IFc%fT$FoGs&{LNXL2RqVYjd&5sa+C%_! zKFrE0WX7eT6+}$naobx1`&=RdHs7iv(#eFc-{UwrxgYrV_jd4MbIyPWq}XKZuMpuI?&`)DSS!qcqeuQNmi`8V4le7_v`(z!+bk(qQiQ~!~bX>sF~nY zlWgTCzSmNnAG#6@a(=*%6aHkKrz-^1Tc@BGt9G00Q}pWB{hTG|<+#5C4u$1-qlagJ z;&NBgsOe>;h)YMR@3~c8*VfhbWh%@Zzb4-RtQrx-!(h^0Yleqdptbb?8-qAhy1_DD zS7y?nU2e|nJ;{Cc6{tGG<&YP86=o$Id}@Q(tQ(>x`$=DoB+@lzE@?Xt!+HQ^#_9X> zB>EFG8X3V{x zqo?MpuC#f4s1&^wl4O;vaa;EFOE{$|HM6|$ZxGTPW~oT;gX3ld9ZcmtjM0#b``CEh z{IN`S)iSB_^ltLJ0ai{+-xU3+V8N-a-IFPOL8}A|po2iufvB`m>I(u>vkDu0<3rnJ zCQM2X4V|GVxhH-a5tg#zjTP6hbXszviaV4+PreOFD-4X&??=MXVN%rDUY!o1v zpsvr5>Tdxr6G{(mD57sTMLon16S+@EUK#gh&5IKMW4kEN4)Y;xP8JP{40D267%l4;lG{=ax z-5$X=Z0=nE7=qZSdomBgzMuBQkyC}!`MOCVpnE}$Zq!YR7_0!1axfnf0<0fYB>_81 z)VuiVc96`uI6KROa+h-JxpKY*#-jGoAam|`YxBww&n8zWl`+){sI5V_(71Bea8l-b z&jvxtjyns$A~2?v=YKI!fu+|cM8>o~Lrz*Z9n?Pw;ChA-01V19hYOx0?^?|dm){1M z_&111D-?}^N4rEe`}aKHGqSv=9oIganVYL4*^XMbg9bcMiO*PBSV&tqA#ouOw=qG0 zJDCR=4R^Q^4hujKAmJ!5>mn9SCI%ZLn@=th%=zg1L7@-)89@8_4GKZQPK^@r{i^!t z#6-YZMOm5Pd7Tb_yCNvct*@_Fv`1d+F#wi^0@xMY-|6BJz$O#^$Ic*Z6GbQy5P}!G|jMi)LhC zmhSi#zdt}$G=$fKoIpBh(r#znI>GwStUfl$?ufC;j(Xl03mOF&q#b1UM8W_pH*C5N zB;2-InAY|?LKF_j!)>YdlhLN4c%;XKcQZ%rYWBjnH-~Fu>!2E3XWeaG^>%@~HKN`Gj=s`ruHWddRZe{3S!o=1?9c zN!T37K&sXS{X2vD1NHVu-`F8>TOdQ6nVv4S!T`B}cw96l(~5SidU;=G#r-R;Suq%- ze19)?1EcuKD2}7ykVl&cfdZx6Aif7A=H`v=1+DfT-iI}E6P`3oVbgn(Bhs}lbjq3; zvXal(eE>@Uq#?o>m1|7N&EmwO?xXW-e7LWpZ^Srcddq)*3`S9k1C%1ltgaN00P=R= z=)q;Z4e@Vl;BU@W@?!63e*9^UadC2Dm3(e!VZoj!PpXtM6pRJhu(vTaQi?M}t$Jx~ zd$)e;$UaD`Yp)V_eJkqYgVq%r(<_VF5`XEh(9}j<&y0 zfvc~ps{k7=D7d})dE?$Xda9k13G(Af4rfD74mbAUw@WaLi>s?t`rWVlI9CAT-#x@< zn_>#UMc8OX&WTc-Kk_s=otQ#@yt@F3CBs&Bpo}aCLkTfPb*tgMfmn1t-u==f_?SpX z18xO+yppGy=l*F8eVtd^P}DxAO#b;OpM)NXl8EXW=1Nl%!OYG^pbojdg^~PRiaY+V z=M@ac>zB~qv|4O5Pg(- z7_D_PJJ6!Q75@7WN5%bWE*7wG(}pM`)aVHUhJri({bv{$R}OP4X9EbzevQU5OkQx} z7u1#SB!V~|zJjS+Lr-mEEL!~~3Fu7HeH@Y_qwAkhNSp!pI(k6y#QAM7mVO;gQl z^QsYJ+Ysi*;8zu8c^O3$6@=^b`tG9m414+@D!}et&txHYO%>em63Dv82V~B=fjaG|GTIR+mGk?>_rT z%|UQ?(!@M!*=c^WCrMo@rHyl9XCvFm7=B#@5;TM{nwzZO`RXI0c~_NN^|!36lT%q9 zvboq%Zx~_RNkGebW$@YJFZ8_S@9CBH9iOW-nX|%(7ELCrH|aNX9;RokU7bz;Hnv3-F1CiT~4sMX4DiEMg{(K)o!5WKO9Q3P~O`%kw0i}T9&+J|fz zuAaQEPWOxgwlX+jkTg^8dkGWDgIi6vRgpS z%Vk}QrnP;xyd82E64gp@2TmIfclwDGFaH5{6m{tQ$n)NUY@33YWwSX?NB#U# z_3DA?7Qy{+$zO_#qIGMch8&IUFXt?>m(RqEv^EK8A9@2>l_dKZuu*v7TKt*z z6;W@tW}Bs@r5#Sq?0ldolGOuTsod3X42vtdBl9Th*=)o`Gs~(RrFLbkWWan=n_GIM zO__wk%GXBUcKY}^5eDpKji+$DyaMM$_6h-g(eNQK>HM1!!C3exg4v_Tzz!SBifm1h zPOGDEy;{|Kc9K{ufW^ZpQw%TCauvu3&6!{-oS`n;1XJtE|S2A)9l8jDQv&n~G)at%!+mS*=X8 zcCbx|DV>P1^Fib38$Wy{5WN5%*y}MOFD{0KBwHgN7&9=~Mo!qncFse9g$4jRYu!C)4%zLsPj0M!!5#WRkgp%+l2nLI^cZz}!jo&&qS36LYBq#4nzX>?dZoJSj zSArXI3z&$-{1N6d=V8)Eb4mk*2~bV18hi2y;@c(v{SW1wV%Udy$(+xQvhl^ROILOF zWL@D1(vl|&!SFu>G*Fpjr9&K&MY2cJ5E4f%&pDwY0=)OXuwWTbomtNeX&#&)inP+~ z?|f2v7O8oaf^#f-8Ry^CQP(q$?s-jXZx>zm!zU>6{@-GIVr)=@x{747CoI4M{6_oDMIm%KllpKYTW1~ZG!Y5NrXffK-6FLWUC5aS2uaW&`$emBfqp1- ziVLtsC)4W5R;UoDD{!Y$=7}U+4BQf1paMHHKF+upj2$>ZB#SVdpZTCGCw@K%s3s@><0 zE^Fzl>j9NM|3iS9VvqgT&p~i*Xf$rDH{NJ=+aRIh{$6T?PZrz5d9NiHrf;`75umfd z*jt@urK4^R5N6w2PHA6swFI6rbHU7)dC!DLV+|f% z2r7(oOJ68-8s>cRgrLbi|94icB$-w=HPYd3q?YQb%t|x0Ear=%bkCXc??AXt;|&ji zj%D2OIO2LMr?h{C;ei+KNN2wW5%|qReg@NO(ed#|q_HXndB48Q<89AO-FAuF>Doep z?G7b=#}LDB{*W>DruKllXX>8U!IKdAC-a+~laYb})TA9aQ!`*<9ed~=c~00JIMh{Y z9o@Z;>l~&mH;|4|HrCFL{+O$|KO+Bu%e*+g?6$m^<2i#X2I2V!5^q$_D zuu>N|ezGzFG5RKudg%1^?iGDK+Fnwdmv#bm>%BuKtgstf3O!3__F1bZ2#=xIlOy0G zhhobLw;HI;5>A@)@^>~^_MLpzb6-fV`)R|rlbfX{YDi4SFI3#8+#>DW{BewU-ih;* zpDkN|Q~Fl@YEd(rk9?+4!`to3H2>zU`*zD={!I=uZrQk>iVF(8+_TTc34vJn9&X7F zj^BWCN_R%D&rC6**wiNsSF}!&^4>}e?nYiAKW11&O`VMI2jL9a%rjOWFww9(4C1{G z1XX)35!xtHQAdR<-}C-FeQ8F86BvnIe-{jg6g%@{W1p~3TqmHJ9o*V_muLe~?BwDi zw))KCwv+>y7#LoG(`RpG$Uav*&-R12(4062N9b>y0PHsb?N@@cKM%$NsF`}{GNd{- z7*5-`g6Z3>>)FGRhMG^~1bAa*U874IIi*GT59*uT?nOGy8Xni%%ojUz=CL(ocRa)z zC%9Y7WLC}Jvw8|}+LthjH!A?@1!G?j(60w*j}bBN?0!akt>2r_sA`bxc{sM-SuIyW z`!>gqLVK`55!L*Wl)Kf=!2b}Jdh8j2!m*s4zG*f-8d=7rUfT=C_5jW_I{33s`eR4- z7gKa>e#zJs!?o41*I=uDs~9guPH8`e!B|1oW{vOCuaW+mHwVP>vAqGoiOm!jt1Am6 z81)nWDmajvO-gVC$UK&AJZam`q*fP#UYJ)|4v=T}uv=!}AG~{vQ215ADP7}*6*|QA z1Uogai{zZh+2MU%&FTXOn_iWtXM2h7BVG)|2PHZYy}Z~i)@N#GNZ$3*n2+6?{?`2d z2Oce9*oKUookj<7$;-qT;uKQ`2TfvA6+Pm5YYBH6(rPt3Yz8zRUr6fPwS!L6d%WlS z_hR7BF-EBP=(1wUwY%O`ntPgbD{`_GZh!bTueXh`d+mhB8@sobw#A4Xe~J5~^79DI zv8xZ_A9fIX#pyO)!s8ybUV@DqYIF)!Ze<_^YLLzAFoIeMQoUVEAIdQw+@nO_F0fI< zR&j{zC?9v9LVRGqFtBU4-R$4AeCHm{$GL%iGX*N3+6y@AG#r`4YOalr4-!qdQ*83uLF?$g@RXJ0+T% zcYKX_+;Nz4V-3wJeTSWwNVBA_24gc70)qW3b|nTV$%Jzi0v?v)yjO1mHsn|CiJ?Wt zTN8}J8sBw<_m(D9giXKYccQF8%i@}hHtIJOU3HPm!Nyd`DUMTR57+-u3Km1d>E2h;?BxP1JDOE)8w^YK7UqUr<28-B*cH7vA>jtoH*d$ zYF}&2I`wQWK)Gx8AIY!t7vMGp{?WNR`k?iK#eg{hsS!_^90&N*Spn>+z`NhaT zpwK`pQcYA^%>(NMEXj=%R=zG!m>{fx)`>OH6bs6Rv^Vbl1avtu{i~P|ZTbC*P#3fX z2x=FtswDpQ`lPD!x+U43+Ovz1BYkVe&z?PdYeED06*YuN1oe<%(CP!y>S6f0$5yaQ0QrDlC=pTBcH-zPeIJ5 zWxM|g^bZlM5J%ggBxFE%#+k!_k0K$vIGJ3vlyq*^Yu-C~o|Igb>`2&;>0^QUjTY;@ zJfMUKn0G0DJU%`KjgLu5Nli_03SWx<5f&B(trDOxnMvm9{NG#WUvFP03-yfnkqn|Q z=-LDzL+^HV2*8{lF_5&oF`xy14Fo(UqnkJ)%rqQ~Wli#8wce|LiSFsX1~SOceJki5 zh62NDRnbB$Fd0+*|IAOo_E$+DB73-px_sY3fGJB)&qh*b%M#(6;D%R^d+DF@XFUSh znNCwR_I^#vs)s`VVMqbUZ~)9il1ydiK{GJB%ehO;FuWpfVq`hFur1+P79Gi%Z`t@(7Z8Q}d z75?ul;(N7v7;@c=(n?OIZrl-IW&r$OR0rZh6FJR)uiC?Xy+^zih#GG>iR8_HzvMqs z&f#Pd|2G75ZGQ(12#TPEayi`t^b?hoIR5>;&{nA)ePiLsAG~zP3aVdrGEHggz26fn z*srwSdB3%#y?XLh66c=pa}k5!AW+;Qmda&7yTrE)3y5yF2FVq`6Y26f1NADXP{QhCr1QqwQnyY?N>k*E8WAr^pG4YtT657&g*P*mFuD#+<(eJ44`Oo^E}kehC6hoB5I%I8I56%q z_Rh(iI5F`5;0%!0`1!-=8}D7&iaGvmX@qc*N7RxjB|Zlx=<2F1M+LcCM6^>axr>H* zsVx5YAco0cB1=Q=A_OA9xqtcpG?MfG-4Uk$yW>{M7Skk<#B5zY6H$!<{igSjE+KId z_rmlUp@YjXK6?Gfz6YCy3}m4jJh+=WY)3!d2i0U73cU9u@x6(=iNQ`yWQNT;>PStb zy54@$u_pTz5_%ch`JNX)%9mDlKa1U^T{+%U^2x{)6=V`oA-Y)XLr-(H3|TesMYv4(GjU zeZdjJ{h!%3v7%D7?^ikK^CZ>$?z;FTN$L^qDh`PAxK4PWw7X@ij$Z$2w7~F?t2irM zC`(O?Vv&in3hXS6SUPk>cjU@{G**2id^{85^OR5=7@|_#;3Q5Rk9e6sR2pr&O8j(P zt)IS+K&~6{%;aR)E0=ud~&k9Q9f5p zGatVRd(bPzZXJ6s&pmmg*@D}zXq4%xT8iMz%~0PRv9g1PzxM)iryGky=`oGKc{}*;s{6r2?0;(QZpZOq1b&^ETDo?{KeruBt!o^vgh5&G- z^~J*1+Pu&n&_*>vLjrNdc-AaM}F zGECn)A&rHb1ZEP{NX zig=}fn+`cS(e^Q<50q%A3-qfPoH0t?{-=V0FZe_Z!N%F)t#J6HHPWX01!m+yRXeiI z7BeN$^%mNX#g|1Ty2uV5aS1}dpF|9Hp+gfKq}(0wyHLBSC&p-@m*lgq*}Hc51gE2$ z(-SD`_R%uFJ8KDyo*@zPHU9Qi>T!C*6&p;^%ZNA^*_HpX%;#lWD?b{)_F@f?j5d{% zAUbeqrXJJpQ${|OF*7qU@O@|U%r+YCd5CayVpRm>VJWwmC>|(jBzzSigh^Xse!S+guOuWdzcDi?X|dz^F~e?N+An<*DGUSmcxvKJ!V+g~KFHe2-Fc$IX$ z*5lrux0f=NS@2*)SX=C+&B}i4V9rI2l&cM_6DJXTXgNwSK3EL$Jm`FHpEA7~zIqyX zkk3}pQ}0#H>;9LS*_XBjikc4=Bph92-+7HpXXN)L&))x)$uSX7%Eyb^&12FR1FbBJ zHZmPX=!`S7f`MC1X_^T!7hBbM!^>}nDM^D$H5=8nuZadqt{2$VnZ!!#$v7s??4O=i zZ*Tzx=F;4ZESy7{s{M>PalGT@u9e8Ad5_j^hxa(*|{RQTDEc%-|$ zO1!XW6m(?=p+n35Kht#xJ$eosr+VjPGQdBu#OuCfhWQe_d%vd8@1;>Ph*q58;Ctq8 z;_7_mr`jpvDeuFNn2+MieG^j<>}*i`pfvw`X%$$Hu!s1MxvpI9-$l(o*El{Zl6=g_ z5P$vnnV`U{cdpXUWQHU(vNhwzr_!vOGzuDw>TfyDz>I9+6lzx{4{M`L0*s~amCRh+ z6hliqJ32PX7>7>5%i`8ff1poVv2PUMGGD#oHY?CCdk%%*#@c?lxbM26+jHU0P33Hd zN2@0(`ri$v54zQ=l}S6UyM>f)z#^=hGc%~G51%N^{`u>hBpQWjYtn|HoI^Z@KF2jZ z|ABTyWX*$#*UEgl3{CD}uck7Mv7@T@?71du$jF{&dxtQOX1yd@Go*ha-HvJ{uJc;a z22aW!;vfSRqE9Xh4cn@;x5(`=vgwS8H_YX|lG#e;o>%Wk5HH~kzWSdiECnh z-&YJtS=s!YV>I*THi~0$CBiVehtor2R^n-OTbYTX?gT7*(?4lrtC3_k_urI=wCsm0 zYEk*pKv^7pGXFc5lP9C=r6rXKLs{!4!-%J93GoVGSo?b}sRYXH>+jYrIJ%)yKM2vX0mGnKs`SQvVl%xkvLj}q}pIUNq$5$wbEO(y6A8g zjbSX+28R5Ve(aPjlL#-lfK{u-l-i5x_eS#6RRaY*)HhT*)#^NAUpaCzi24ItY6BF# zn%^DAjGvDiLLb}0xe)K_3uQ05&{Q!(Bmma8&bu)>C-P3~0z)w@u{ZXm2>=3hA7bck z2G}E`mn3Whdto}FJLNaH8i2k0bl6^Dx|SO_zk2-b(_hyK1-7)a(7Ta(q9qb$|E(9` zk%Gez^2%W8<^K$?H zx$7OzT)DN*d~dbj1AjJE)JG?(3_kAi~L}WV54WR%EV@{FDMp0Xkaz;UPlxAmK0AmK6ukt z5nCU_>4?*CV8(I;S)kmdNGoQnIhLVtHTi#-eCj)c_TMJe_R(_q8LJz1&D^^Wgl6_< z`7v0&tYo^P;{H{3oOjq_FkB`6Wat9KJ8;0(7~F>e=wWmeUK#7jKi#cR#4FscuAg($ zr{@M3eFP&OuRbs^8XH-L35}m&}lz>=^{9Q6D4QOk6~ukuCv57lS%2L?w;gC90#u`TSu^;KIZzfy)4z( zqQPG5R@^;n*rgrCo}PC%F6~Bv?=d&Te;7v7vmoB7V@|RbLediW*Po~ycv}Ut zZPfv9dcwu$rt;j!VRpS3cln5xj;@+2Gw(p#kui+vc~F8;U75-JVbH**BiB|!?)H@}^qtg%%wx@K3IG=?RRJ*_aPUnFAus=|sej`o2>&}*oNl8{P0tfcMeT;`r+ z%3wfP8Vgo#9me zr|kGiOu_DK_|grdwJH=gZRFjEf&$+@Vq8Y(fSfb~_5gFImkZp@7OQdd$-sj*4`>X_ zj1$;wHnD%G4ab?wCEWA-85wZ2g2sB92?Zo4P`lpVl?0x8E!ye~{RCWQ!F|O{%md># zU3;IPHcTH!{Y~wuVBkYc7Ik2F2kh5ga&Fbtt^@-Mc>?<2k7iM^@)sMiGCKEmts}vK zUk_&)UU~MuYdBSEmtUMl3C-|}H!SZ)#@XV-ohG|VvfBl8jX}*}3vTY)_)o#p57wY7 zVA$aZ%AOgu^Eea^@8;4K3=(2v(854B1Cx_?gw%b<*1jh4=xcO7m@(_A>}&7!gB^Ln zb(YFnY6W1?yRhi;5$*HV`VjLYV`}WYYi9SiS^9Ejhoh`5DHXu*uqmr*_dUWvGI^(d{qkVMQZ_xf- z`C}Pe(|W40NUe^jgZe-OwJ6gD_1{AMvN(6W2L_e>>JtjENB#9mtY@c{d2zdin&rZC_cQ$P!O|%D%UUlN+W;4i?F?ltyRhUW{!FRYyzm>ocGXTkz z5o=6anSyc|o{^FGmY%`{2^5bVRi!J;hvQ1Z9rm*3h8x*@_}iD(=HYr0t|j?v35mj@ zmu@Q|J>4cUddV0lA!7mrS(L->pA82uLvI!PzK_E$*^P@ef<@M#1wl_i>y=R2upim? zQf!kenlyR-*qXzxiW8P}wYPh?v8@i7$3x<{fq@awiMI?=_)#AsKU{yr0=x;)h8u+}%i!<3&w%I$)cTFw|LWHr2l*s91{MJUD_TDz_5Zr zM&~V%Jpy)mI}^-E`3}1o+yX%9y27bLbO zK>Sn$17Kii_jkYCw&(Vo!scX-;5%7VzfgzgLTN{jK;J0hW$o{w$oXhy32nK}+W#7y z1+;MFZQ^VO7)-L>n5_E1t0Tq8ksF0Ky;qHApw-sX6m86u$@6V`y_f>snt&`4o3N`) zHo&|LXmi{GzvuzRY^>@h!GCDve}4vze&F;oU@Yn$GdJY3{1#25m6lc?_^kxta5Na< zPD@L(i?Z;U1wUD|3MLnC&yp%hW+T(y>X KA<4>OoR0Lkemtr)AAoM7YW89lvGr{ z?c#fh(?EZ&l0q)$fvc5s{xR|(&I5pvXqorG7C<}g)1aSFse!KryRbLqvM-z29kN^RKTX1)W;1&q3!QC~u zyM;i4Wsso30>N#9I|TRO3^oiB+}&ZD_uGATcmB;Y-8I$KRbA(td++IgKtZShUMh$2 z0>rcf$Vde|F94Sg5!fqr!gB_#3*(B}n82XMwPso-Bca`Oms95gy0y5<1?R-d>4ySv z#!q)XAE2%#gWsu|;kix+(7l1&dOFzV4}WabnALcip;O*S9(OLyKpQPt@Vi$0{u-Jq z-FUa}{JzI4v|vFCAU_OV>ND8MT>j&y+ z_aP7KDFPY4z8zBgpV!vEbZ?ynw%x{GFY~^kmcYE-irYN#f#h+>@w3{=XOm*M$rIce zW>eUO@C`%f;rQA%hJLew7iFeFK!*Kuy%P{;L8D#Z=^PJ8h4xJl~xB-|Pug+2dX zc>B#jHT!OPrBK_w8eC9pctQd{dzdv0^(~AdWc4MeH6@GUr^J! zRfXYy%4BnsEhW71)t6E;>EfED``N(+(9;bdKys@PP@&?vF6lYJTe+V>Bi-kv93`;+ zC9(p$KSGt7r}Ed!F?@ns!97kOPV6PRs}k_o%5*l`7`*{=Dfl~vBqwCGRJ|`=aWob} z2!9G*knGu<^FFwAayDU-UL6Z*Z*_s!c1H~Y=@f@8Q<#WVCN{WGADwaj7ks0$Qt%v{ z6WL^r-e4q{B)qoBZ~Scto>tAi&C)m0-bDJ3C^$t5Qm=6=wndKilfCJxcA%UGuO>gt z)zc0cH;hA8g_?hLX?G~r7a=#Ty9tPv7Nr`EQ6={7%sKn{-R-Kgv>cwI=NO|LHg#0* zfUkcEu(b@MN_*c&{1asTTlTZR^ZV_+TR&|8d}i@N6cTUjjYeHc~Z6L=P)&L`Gv zC6q)WeP07`)e?x_k@_sMAG}*T9^thZNvr)VAGLo*rrX;kZd{{0)*IHxTt>}oaSU_r zIcgto!S-Bb>K7{3^HwBRW-H2DyAHp5)wqT1-yzkLE~!7(m?az~9yEzb!=NVdFh>NW z)&h{czboRS90=ss4U^o|Qf@{9sepCPSyC?mWJ=Xh4WD$t1-zO5uT$8&ThY540GKY{ zJ>~W1{Gz1_=nyUHzu4T|qoZC&Y)Xi6NePSjYar=@KZriMn!c7i>xVmRYzU~1&iC*RK$4TNO11-DOtvdx>i0E8>5r0lx_;v&oBQ=T7gvJ#llNEVI_ z$FCJ;iTCgNtVLmH09}?$w~xB0kZWef)%fYYh6~*cL^&G0{@Gq7e;Wj}V*(Z>CK|{z z3iSCGnO8|znILKx@WN_MIM&Zk4j{K&1)*cR2?^|gyG?tFqu<>XxLIPXp?R}u;@vQ- zuv>&KUY;fU{Q1wjvfvKHgf>nv*Yw>66Of!0uT{c#rZ|2!q(UAS^nP`@|5QSZQ^hqE z&@fhv$Dd$Q1vVdYI=VXyws+Cjc7IxTM;o(s4^Y^thbaxlMDReM zk0r~otgV&S1MbL3Kwg`+tr5!A#YVfpDJzHaMcdMJaBfzM>%5WI;BGt!#7wiUC{#J% zv-FKI$;+a*d97xeW6xH5OglAmjp~J8Y6&4GZ_x4JgPdda!FTxC#KptDUsZx1cOofl4Ia@S}AxeGpf;;ScWp>XWlfF z)ed=mm{)7;)QbkX#5b0sFA3)yW91+?86HYi(Z#1=xcic}qnk})Iv|v) zg>QpI2a4MWT^V^g7vjP)Q=2LqBlR zf5wBM$7F4c|2G;SLTj$)1GD2u_v8YF0=1-Ha-YihFX0{euAl*C9*!=+ zy(y6|+E*3oB8l2C(WrG9*Rg+O-1rJ(Uw4&P!G$SqyI=dHO+{s^U3koEhQ7=t4xS(ffepcFSEn7f;*a zF7@Ia;7--ngqI<*c*F5iv-%f}l+-Aa45_S<<@Pmy&UQnI9hkiU+~d=7C>gl~A&_L@ zKhTqzHJ}0}T~c}PK$~ebuGR4Hraziz_IB*eMoS0Hb?{ElI)}vCjn4c)BlD~2+5V^u zzGzb!?n`4o{(u4D&p~R5OtuMYMfq$QEJG7xi96Mp*Jyk2~4UIeU*qj)G$Ir`hY`EQMvmZY~Bon4yjF zZl6ZfMV%^IL$_7}adHOnbcH~oK(0Y}!uEAmfm2h19VD*rLYd;l*d;=#(d=F$XDik1 zrHV+Tpd;*~diNKOU+@4|11Kte!rf8zv+oC$(;@%d&#~GBuPl4<7yUlXx5S*)U+l(C z5qfC$h<&CQs;m?iB&8dmdlvU@KJb>EFux2R)3aUuwN8vYj|Fb((#Ww(RADhYYVMR5 zRD|;f74wbit^XktPp=q&6upWHwTKnhtt zsv?nZ$461AUbYJ+UaWl~Qqv3Ux~;!p^bxz6Xrpw}dvtL-9kl2~ta`Xy%l+EZ(4Ooa z`YIOPA0fOuSrgU&@%vVsY2ZC@2CX;Q$=q@rA3jM>c0hEhDPd7I%PfTG{nu!RN}OJc z1#_A={YWaHpHafj8jsg-2~1<2+LF1q)r$ik1C-0yr-Q{ITDadCqJ}w%oSfEr@Orzk zVh$SAI5nPS;U#I&y2BF9)a(T#M{!8M-_?Gw+BaLnu9~rE9NM0K%9yq*!j7A7+t4xJ zG{7L#P8$qgr<4$P9Q?g$aTa0Pdb|ufx=?M9ij@4^v(%Iq5>n+^BDnU7+aYD1tLoc6 zJotxnc!X4^N%?wC?r>CsZPlO?W_B2b>oOHFkMjt{IP7+I z5$25txU>BgeD63%)qa+xwwR8fdG6whrUfnvQ`$~Eo8-~u)@R&C7#k+Fq97Jf?a+IO zsY%J2F|cpjRDv!|-!W8p343B=Nw7J07*EeeXE@_BW;4xJC;#=*J}N=3~A>30J7{^hk-v^HxH zzjq<60U$MnH9!sx!ImxAA_X;{53j|(PG7Q{UJ6%U>DIR%xp_ZTqIqg;pc4xq3$%uG zH(vD-NUyfTj!36Gl&{15uLwgP%H9ZRI4;o1#$vDcPgOX`-YBWqn)eI1?7r^7t3A{1 ztXKQpvkmq5L5{&4i<-KIJFp@E8O3rDC+O7B9K4cWOJ0W)&*MEI_hLSCy#=UrIkwB)< z-!7a_&Aej|21Uh*6{eax;9IYszM{HO%VR~$)?=4eK#nr~UeAtrcKgX*Al@z+3Vet9 zmKiv}|dW&_|LN~)od^gKt7^!?P{=dMBnp8V~gxhF2` z4_Cdv&DWB3r7eG5-;DM8bdWlF-OWSJIEc_FV)P?i6G8k)HF4EktwMBPbvC%7^922G zwgYsQr`ii2Ty7s35y3j4o?Ixz_9Ch8rn+SPjr(z2tU>zxx~R9=XwNg&T?Y7Eu}qb1aIvC+lTFCSO~!wnu8!W zA=1C}a`Vp(492n~3FRKd%VX`!NU(r>ujiSK;p+CF}M8 z7Xw^UYCE0HDDGE^QUkq-Y@=-JoJwcmdRwD;hb!*`L)3+K3{Z0MsbMm#ss7l#Q?Xl; zk0|3SJ2C&+db-H;!Leh9?Pviok8$K8{my!HJ6LqefeiZ}jZeWdT{=1*4UEGaR9aS@ z?+A?Uyepp2kZkYxiMSDqb&_-3#3G7M`3W*q4csR+_p%qNockp%?xt3=Fx6~;YWY$K%+Ot+-ku*98y`)fliTGugzpnqv}GlIU&HFP!$CAN?_;CQD7`TU)nc(1t3=W3xiK zfLf+|^z~Iouc}B?KP8bI#mx=dn5t&KwA@yqDg)5UIrNltvL#Eb{P0RuE^@;YW~$h@qtx+^F# z4Mgo$HzR^b@5G@;OsDDs$7phU-YrJIpu#$N*vw zQ16-HJpnEjooP6;mwJ2pNDbN8`zD!t}iIXKPR27%CCwg zwQJ$FF`DHtRh50R;H4h7wc-MTIcb4Ym2ptEQ~s)yJN`!T-24+U zRQ82>4_<2RUz8;6J5J07Yr_=smg6}BLtV0ook*d(k@;NbueG$3v$G5wEy_#&WlYIE z`nFMbhj<4-E{>+P#<}J~hFFl{K_Gf6L1K^h%w4jPQ2X76or5+s7pPF`m)r8SM+*{9 zI?r~?uQ6V2Fx>?z1%aqlpF4#P7tbL-As;(Qqut6^Ly7wZZ__(tINYo9Ak$mAb2N?m zuaF$DiC~KBiJpXS!$b~30Yk$LULmqYdw58T0DlL!-Na8mb_Zlto@O%L4RY$6bvQ(MbzDinS6Oz(|)IuxxdE4uvc zYuOiFjGTd%Cjzq#6_oAk$HW0+c1&;6usCt}_||Usddv0mJZw+OOrs!yGOcG|&sC8l ze3w>pEBum zBO&WsG1nHLJAG#mI-=a^c^fdk1E=N`5O-HhYz7G zhh3RMGqb|tK_pi&lrn-cotwRvnlC+;=%$3DBMVnA+?IP^Z@%@CD*blu7PKqfC#K`( zsJ4?W_+Fv8yXT0n=`g7*FlJL|#$rph)^d`Z?QY#Te zuJ9lxcBIf~v{;geR?#%&-v&bcr70S>nCY0}JGISG+i4tHN=FvED@PO`J+o{cEqew`a0+uv`R1Av{N$?*-z-gzGi5#6xDzj%owsq{w|OiC?FQk(!2lL&o^+&v$#( zUuEfg@3s$%!6ZL^xQeG^(+`Uws>4hw2A6)**H;m46!)@d_D(fz`IofixLf^EuiJd% z@-!(ksY1pOK@@u0baSw40^txKqLrf&A-&kmZ6!N7kyitJE8cJ;?#-?iq#h5k8hq75 zZ)6V-|2_K}?H9$^S=L52`>nwqV?KTAd*ki_3DXm3t1*O}2ckq3p`|wG;CRD)r5y$F zu&?e*hn$8ws771`R|-jlu#sD_inSs?@l;x!3(Xbjd)C0ehxjPRHd}P?hz49=CLTb0 zOlm3?>jz-?@2BSWJb;&3*#jd3`kW^kwODUj&$q+(IG|ML0Lk*ofLlFkX z`txK+z7PWS1H44`Vs*D*s;+3bU%O$5lDm3$eCzCbnQ_H+)Rg+0dqeq@Jz4W9zrMQe z_R~p4(E5*94|a%4c}K}`p~*=bAo_o?!fLW=%U2II_^rp~|MqWI_fJV6I${5{Pz@#Y z=1g6`M8Qs4)tTI)!PGbc77q2luPq}mg+OoAl;b6Do1Cj#l}(cev@2f-k2OjVJbCuH z7ruA=nt|)$E;7wky1G>Ffx%^!VvRXW;XUuK|NRx;@%hwT6Q0we#fGA^_S54D%;%Sb zZl-WVGAkcirMK$=Q)963^{sxTe{1%U!CE?dW@gUs%8k`FtEY>vC;l(8k644p&^PKt z)AsOw8=D3iqnI9i8G72gxSu%)G~Ea`a8ufq>^s^cYZ|(c+#-ga@GaeaKlJ%4TmNdf za5!V~G0XenvFxi2R$Vvdiu_H23uj1~_0LlM+EHukGJw%>$S7en^sQ~v84-%JSt>vCP+CsYi$Nd;)$P4|_QeA7gs z!yG?2C934%8Sju7*(^21KsNFrzr82}a$7#7z0qT9+$6~q`h2l>QY7(_goBwQO`L3p z0*+idaTf8|e7rZEIsMYI%PFcMNcTKTTaSE0Y~8<*UzI%lZMChDniH*72DYF-e!!)c zSn$I|+&(&$Fv;NHHWaHLY~GDb$7l%`Kd_)jY68EtCq?}Tc0uO*_}vY~8tZmrh*Pma z!=Ba4MjxcEp;e1d&Mrerchf%^vVZwY@#b!)sLC!r&MQdT^V1Qfw}DRuP0VXYfDhvidK{zXD2S@*N}8w>f)}< zYS#WN)UB-Fsd4OSkZ-Fj_-VuBtLuYNX6V*el2Ti-POR_#75x`Gl8>nO)9ZfiIeUcm z#ktw#Gj}SdbM##o8q=+h2I0i1Ok~cKR>9(&qN$@E!AmdiliUnJ@;f3)?HZ9#Z}N@o zglqq{;{<)Nu`9~_oZFxXd=0TKOAK4c7K(n{R&IRQ`Ri^qy2tg2`-jcj(}$iCmyt&4 zgn>Pj4}a7`BElD(|BR4OJ8a#eb*B?_ZHLZk?6$(C9*z`4(%hoDay|c^&AdPL*P1Zv z+Lr!84rC8@wy(V|G2Jf8UU7IOpL*2LJYM3}9jk;-VhdD7kBr`w8D0%QnF@~QtVSy+ z#;`AVW}4v{;=H>zUY{Dl-t3wLB=7kHsN7rl|?y%4{=ff^=lUz zFa0#@bvocic;0^|0)j02=+VCI$)!QvCDc$0mkvw}e3P#km0EJY;LD0pXF<-rj;F$( z3Lj!;k!dHFT^_NJ0u|#`wh21OrKP3wk!_4!iY4}_#dDI7_3_hdI=pvoo&pU{9Htf~WZa4jj&#^@^LLb~Ska3p#Y$P)1AR26< z`^Q&`{Yrb_kh3HdxS>|TIxo+?+HHAWc^%=KZUt0E?k(7H4P!M`{vrAe3$42m>Q_%T z^vG@JxNdxG-#w}GZ--nEMI`~lOowHPj&m2HSgaCGro)u{waUAaEoQ-iFJtiHf@dcY z4e0OmN|Umv)RDnp8iCc)W5K3&Ny{xmzmrDUU_v) zg)~9AXQ-|&Q5&~nbxl}(z`OzN91XTNB%iPv0bB2T6S;}*Z8%;8OK~jgfv2D8CDEj+ z!ugg&sFqwr6;K|IhY#arH1or_%0sQIIh?PA$agsl?E8l?GnO{}A%?PdG~1X0V}_;? zdbIBEZ9CsW4}5tq$ZE#5gNuU`5$&LggM(A@iTMAyC9mS$$l*xttp7Z%X=w63AU%4= z+fHtbF#q?Z4iQ;E%~X~A8XmEoFY2lBtLbUSZ)IMuN9^Hk zp^D}&F`49KW{nD{_HAQ4HqJ+t;aEcp)`{Oe~<+hAzM<3al^ZL-tOAR`dy_I(P9rGz-X2d%AP^5x?H)M2$cBV| zeG;q`&~p(_KhoTr?l}*qr>WpxXEA#UCKC$ljWMsA&Hu4qy&ua;ti;K|w>at^Aqzaj zQ8V2NvJ&^+N2J-ei5&Ip&djxus;keni^W!r;%(vqsLyZ@-rPL3Tb`}o>O%_H6wSI( zLT{*OCJjE6i8ep(Hpz+kaogjA9G4=ewekxsdePp5k*f|=43PWUnXzh?0XsxKzp6Hu zeB0WAAx*SAnYNCGBbzcqtpibDGg7>ryfPO99SJV|Z(CXEe9BLEjbsX#aB?5BD`(Fg z#1?YS_Q+IIS;z96DXbLok+KoeC9~sMXG2JIdo=I&sK|4=cYPOR?`;q;Qi)7FfRz|gma!WZYn=ZSabl$YLs7fh6RY* zk1#cnz@Y>y8k#mVze*ktr?gmEP?|WoVZGVH+j4^qCXQ+XvB#r+R+)|NQOZMNsOIpi zs7QGQgTq-}qM%dMyu9PbJJ1I$Eo>JDvKe2?)NgXEU>+DQ&JJk@d46U1%fLsWU1$SD z|KVRN;ft$t_TPlB?YWpL{C>%y8T9ALRqiRfjWW|j zzaD9{uR+_ZUvs*UxUo=MCWRY!Y!o&I&sEFp{q*dw+E1c|y5mv*cVH>c;^>PSWK0oJ zN5h>ljO@V2`W{2>V9_ra-4ahFUUvRHMoQ8Nbm3XAwIXFv01$dl#z(9?crvJ4XG_+`@dgsEsk1W@<8Px z+N5&19J~1G0tWJ0WwO7XR@`U?XV6d))|&plyM@8lnzhN(_|S!f0Latv1QaN@Y8mK! z>HJ*fTP4Na?JVclbk}Y30u-|mmsrh{l$_gYRKgV*d9I4viP0i+T}mXhAq2znY(O>S zVpRJmv{B*h!i>{*B?HGArLa$m9flZ-GklD#F?v_K0N*X&*w3u@C^Y4R4f8tj*V;#% zZB@BaPa18q8?rTSQ8+Gyt7gT2|26RX=cI<`c4!RIXJNkq?7*w6dv2yqHi}+LzcOHR$`n*V*u+QgaW79*c)!ltrvQr#W%xBCH#{A8E8gfllWp$=d3kSFR zN?&H=lLo|R5Tq<7LM2%7 z#6CH(=*M*1jt5EqlFiGL%3bLtb$Rvf^_!KA50ank#@~S~?z{^O7->phc=D9)A2;ZP z@GADv6QyR>XL^gA@22Y_NgJHiZcdC6*u%KUqWAL?8(Oh8TeR@-f81}a zKMSkqwUUyRowl%xUs;ATuFaRh>{OpQwnIT=$@5$iBg! z0%lk(;BWhO(rvwyRH>X^++n^|KTTPCDwU0pld)W%7Btb)5cny;HKTTfc}QCNbYiiX zr_|^9vJtQE*S0`?0>my!vEW2S^`<(zy(D;{=g{`FK#fa5@yp*0o{)8A&Y@x8`7~k! znyY$&iu#7AP28!CgHzzj*We7Wy5lzXw=da5wX95i!;bH$5&)e)Q_u${05m0 z^SbIEHexa@kK1ser|UzvFUjS66A*oZeC( zZ@sh3*`}cP>ptwg*UR@eX?$h7TkMA89UU4NLOneLPX=Ur%0gHB9*@e>`Q%PK&K)Dc z0ZLb;@XASL??P{$X}daLv6Y*~UF^C2i+oKW`*Td(BZq+jeQOOoQYVd1 zocGpo(Ta;W>E1t4M$fT}&>`{yPFjhIusRoIC;bZtFGGcF!OSjkR{#qWAW~_e>>I4E-}gRn;Cr7F?*UH@${u6cQ(r?WdOMWVI}%ZW`+TPzFGy9v{@8*(-c*)YcTICl($KXp*RpFci1qBkZX z1Al&!~o@PG{an4>J!29=Z8v z>d&C~RcEZy|DiZEd|c}QlrEtTC*VplMEeYIn=3?HqfOmX>qajG92q(=%lAVrD6YD+ zMIQjZHD2icp?XL(x{S5xPmQbvNP#fD!c2AfB1BrcbiJ5oC&@6FAkQE*HB#OSfv2)o z(><6u68LlxQmvnsNdlaydgb}3#Wy@h>$nNCVOc^O!u2CyPMNn`Xm2vhl!3kanJXk2 z)jYSE(3@_QZXLyx_>!-5KgCogq;p3LXoGqcz`g*L`rlnIj@{$`GIICK$a2xRJcbe+ zV9ra=SOh161%N{gx;Fa2RsUG|e^2_o|t5!N4^GZX{Id)?KoeGCH|V4+hsKT=ta zHgBXDr8`71A-Yyp2UZ$&MxbLOej^<=tU^HkY{dk;QPPkdm~YIkeaYU6l~@E8r4_ii zzXJs79VIjP0sKxYw|2VmWBLlR76SY3p;l0Qzo#7n3wT&|Q|TKdUwg)J{YD6P>o2tsag9xc?ssjy{!Q>`*@$6c!@r5mxXb8NkF+0=k<&yA#XM6^ zALvj6t}NE1TaOXL|MXLSJ>TaEmQhNv?s12bK71$MjS-RBj{ z?s2LG0^Y?}poCy{Xe9b1T3!9yOp9-VoFf;>L#6{>J+RBWC^w!+ZA36dDVgBm?4WJ2u+7fS_V4+;kJBR4 z|K4HKd4p6?KZ)Fj9L@BMD|1=8Mk22n3N**&?}v=7=q8tOjItdQ~_X?z6F$-1G6 z0pmdPD#KOcVPeoxRtQAK6CHZ$>;?n2{hvl$kSxE75uN(|1?zTIK`n14_>Ydw^QXf5 zdk;@7{Gp-1*^5P&H(Vfzd#a<^S2Jm&X=00Fj0cM-pfX)r#v%hh4ctOCT^3v#8e{ZU zL(>X?7>ihY-j3b_Xgg%;CZmNER`G!j=(5P;)N>6%vPq|V0zqIrAJ8$L9LStfig(2^e`zZ2T-2-Ku| zBppqbh6r-y=5?aOn76=M9Bas0Kk)GOXi5$&flDvbHoM7(YGN}~&S~`IaWV!Flz@Q! z3;!^-Y~hpXyDd118BdzGCp>{HXJ2&MKBNrJ-qA6=TcB&up>ZIToNNapA>uIG(NOkG zm$3GupMuK`BKp1o^KKWa%ktW<$tgie zI$sFq)#U0HbO0Fng#(wbp5?8zlV^7673CZ4Df0aSNN^^iZvC@)|CU{aoyp=%b$PkF zhXZsL^lKuBFeNba$UQxTd124{s;e1JDVbKHgL4OxZ zT4XdQ^@=;N5d_#LEyrP!F@a7N9ESSdN#{EtOcvP&f18VC4ZjBT26?##)0X-0(Svaj zTezffA{O0ZgmDw7@R6Tj=S?ziX_wB#k(osPdS$l{#$S@mTE-o#-oc-xX(GBkebGqh z(o$id=L{;%B5CDVE)p5%O811AEDB|Urs<~JamgyjfPT_K*Y zN#uZTG52?xqm0c#CAh978xRnLY_~vZF#me@^+6(5&S1|+TGYuA9bOgWsZ!Ol#Jog) zd}l#RAxoT;nq=G0rSt+sHoTjo?>{nA1Y*aDhRs%RyU{rE-#;9o$zXy6VBxFX>PE3Q zXE3a`Q#6orx`sm^paal+e+nlX6f=OddV9?5to#)eda)xfl)1)sQ-5suzM0zIb~;*# zRjSx-jte335GM2SqxRs*It`1Wv|J1xhAJ#!|rf;OIa>;adrzAJ#%o*u^@KB$7ZNFoO5RC7`|?XjF;M&7Pbk^k_fgN!Cj z1t}GuP(FB6!i>Cx8++^5e8$?)&1Pq6mN1Fw*NLFPZrUT-i_s0E!W+T0Kf(}~0ysx( zL?7Pv#kxuO!@RWRwno46dI~thmJAj;H)fHf@e@27hb&|9&n(J_5L*2=XW{@t^4Fb1 zNL;8uJ=&c;*!A%4X zAEO%Lp9pjyFe7mvy0&J&HIj+?86hV)`fQ6JfcekdTxt#@C8A=Ze$=m#ckA0}@QKP?$+Ys|xA)q4CA`6v~+B=Q;;&FP7y%&D_)% z_qM1k-%)KmD)g;}Fa&t1@&@&5ouDypWGp{ITR%?T&H44!Z+3 zRD^gcRIQdYCuJx9{wr^BgZ^5~52?LjHfy&X!{6kZyvK{00*EMz5+mo=YKy_0kOBaSMY@F$Vet0 zq_nbqe#7YRMbC)4n*flNBB%fS57uYD!lcoNUmVgjLizk3ZukFJuSS`w!;tstkGl0F zGn8!@^LbOhbW?~UW1da-Om_ny9Ds|1h43mCsd#Y`2n9=(o4Z@;fn^r9yWY&s)9b~> zH0hSdgCmm-X!TAx;v6UP3E-RLNE(fSmWwKYYD?pP`4eO$7wyGh{2cqp<9aEgJD+X` zLrw?!&h&=*XjTq@+UBq9H?fFMhBZOxz@_*wj&)frm9XJUjK%@e= zuAdvuLC^5O+EWSRMSSm33Y^c*Yn0^Qru}$jEAZYPVMH353%hi;RAZw;^S!#oLY28% fh39x4)5|BUltyPa^cW2e5b#luQI)QgG7I}(CIE|G literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/twisted-overview.dia b/vendor/Twisted-10.0.0/doc/core/img/twisted-overview.dia new file mode 100644 index 0000000000000000000000000000000000000000..fa78656688ddff5ab1641c5d8870921d23a4216e GIT binary patch literal 5984 zcmV-m7oX@KiwFP!000001MOW~ciXm>e($f~a$jkf051TmlbqG0?P=%acItH6IWJzc zMBAFsq=us6cpm1r4>l;r76nQa2|{EHcQuh6np+SzzP-PVefitpzuYF)=Qv-^v-HIY zM$U;9r_*dUPp@B`{PFRZQ+D$Am*4$uHjjSb|6k|Pt;K&MUBY)SPHu`~@x$5K-Q68Z z?w3)K*)<TPcq*>cV=Def0rm(<(`{(N%H`l=O?^K@h|UA^gMeLl!#Jqxj)7Ib7VOUU7+Dak*Y`^X0`N%Zogk7uzB)vn+|Dw3J%p ztGKdAFy`xT)cdIoLGZy=? zf4GsjUd?Cma*wiYUQ=_YoAqU9`#$yRbxl8*@<$buEkq>IeVngv|LW0LTk8kB#+@gh zLU#Gz@w8Yk@4Sf8S(MMLQ|nds<>b-#;GV~P_TuEdv-$B<7sPMI@o($%S!7?8=#(I8 zpSsBRtE=8djyegKmsJ5dUqX~#CvjEC#DkZ_LvBlXaT4ENOtU=Qb`BJ-N=OkHvc0kg z{P29_IZwZW4zh0I`SndvFN{#c!i}$X!Y;FX7U%nx=Cr%{@+P~xka(o~&8-|1&K~!} zlMMnlUu?XOx-IiD{!&Qx4BiJ9+q-64!?1?}-wqM$U_-X{j(qoE7r7K9pAZxdZ#E$3%OYM7wamzohSD% zPNIAsB`4OhxCgXl-pTsce~pvRaWS7pr?0YP_Wm-N|Iela*zR3zZR8r?BXl;uNO#dy zm2IxJ?c{Bm{faVeazw}JE{;3c*XeCs_t4{Xbu00U_Q;YN_`TlqQ%q`yK|~z}aWK#U zLKrz~9X|wQ1Oh%nAfevy z!Q=P%HshDCA4oKv#>?gNgAl*M3YZoM2@Qm_hmht`(l^`>l7LE}B0?daqqpIY_Y|Mt zl8EgJbYUGOPO~(35w894df+mKRZ;XLsB8 zf7~0cg8N@2aeBRH8I?nuO60Yz;Qb$8E4j7d#*OXWirdru=25o8rF1y3ac8WS?DJZ> z@B1Kd;$nyzxrla?ENA^aE>#);kmw9kAf}0d--rjakyKCEo8{WHmn6)Yu?n{pDv!7~j=8rJS|4}cA9>#w=dX*tYXPcIDNwl@fhrJ*3Z(SH5hGxn zy>X(|c?@_l|KF(K&&Hru!$^Pz)N&1K^^96uCx^;72X>T_4hiw@mvnG}Ytlgx(MmeH zFV&EaV~`Gvl^Szjqs9lw=LXzIjxgo{H|zm_Jx*)>DoXi(#*7b#d~9p7G2=rZADAcw ztUxPZd4N$^;FW-@aGYrMHp-VbQ33^J2C|03JOTq*<7FPMQs!|=HVT+DP!BHATXPRb zGxt!iSVKOJLOwjDw)_-pRC-^O8*q`KKXDy|xjjF1_&4LU=pXo_S@0FkKgZUqJo^;q z#;^~Ei%1mPSoT4X5nnBUIa&cMEHR6TghxWeH+Y=*^*Xz~U8Nix%xTE5IEh514Rq-t zB?snHVs#0X)eNNm5-K@hd7#*kRM}1Hp-GjJEGyY%4{y*UR+_}hZi$tL)l1enEVOGW zhUY+Wp|f$)qP!O4K?t3QR~y|VhFXcCx9VaUrOW45w_ zv~@eBMsQII2{>Flh@%EC3|`1|&)`K*Ra!O28lehO0fE)+D1R@kLd0;(!wkwjd>i7s z0j2S27j98^p3%oTYfdtDony2RAldO-Cm3Wi^w5CIZmZss~6C5L~E8)z#a}fv^;7USBJ7y#8z{BKuFh60rZZoR7)$56mS`yUskJPk&em5wTw*E9vC!OSKUPz zwd~PcJtU|-A~>kcSrO&MR{Xv`df)Qi+p+s?r0FhXUq|V-GGs>_YCfVJha1Y@eD{y% zz<{GXTtHj}Aj0@Q@>;i8YziX}QlMcxwn@iU?t6PWc3xMLrj8^)#D7XEXpdbUS*2k- z3rx?)K6^fP4doF}NlR#9b-6YN{YN^uD3Ao5DLll5GNdDjr(%V}3F7(RYShkGlI`M% zXC3I!W`-!2G$8~L8`BX^S3rNpmnLR+gqWRNOATYz9<>V;Vh+`TIb~WfmxU(;ixO-8 zy2~&J{ounv$6Chu=baS|CRo=q_E3X8{YL9Ld-qfcHhCh7H7@7>NaCA!SS!w$6QGA#!d`Ri)JfC$N8DGC|Q|l_pgd?0uhFUZ^5XZQoFtboXYLjX`(^%a6cl7pD)o? zV`FtY`7Tj}JRAY^%palxZLo0giFh5333z^bcW#{fBRcmIO*N=9IM3cyQW@ZiQyZq? zj1feEB;TKS7zx*2N0yF?6&mlpcN?oD=gg(zS%*5b*+(AG#&F_>qUMD-WAyD`XX*6D z`1Z%BuaoMkv9sE}dtV*$fZNnR@&uv>DLD8Omx&rX;*2a;=RBUS^4MD5#7Sa&{9}~X zN%hsB&)~fLK$VxTDOrb$a?8va@bXK~8|(4%O}@_Hy?m<5)@n-E;aVsw1jI~|j*2foTH3+J(ih_7o}kfOCV9TpU78Wtq%z*GOF;}D0Miu$Hi z^=(Ay?2V?iawM0S4IRjT5c@lk=Hu zwX{~t_-d6{SF3aiO;Wkm754_k)+iQKm{y=4#auI_b+kiTJ))3xQQO_3-hHyJ0D2IL z;}kvc{EL8`|bVt zzuwp%vSo3d$M6y%WDsl+vs!C(+d$Z8sEMzXngkkSFoK$Jfj~`|9ctwz-PhTemsHMZ z9IB0|xEpeD@EnLrpwjC5Hr2QveB|16ZS{%GI*+?NHeLAu4+mZuTR#}KBGYJN><7VD zs9NM=NF#7Dn?XJ-BN~b2sG8~E`QD=i%qoF8FAw>y7-wySKkGVh( z1eR6m6uMF2&S^CVaTO&CHe_XxWF=H-@}aoO(Rr{EQZ5k8zEAkB7&lk>CCToLy&nus zk?OLs_(R|+zM~qnpe|G&YB{0Mtx$wn+|r=aae~*6%lmYClV>U9f6byIvaXWd!)Xm~ z(T`)>>TQ5*EY!uvimA&f>0I`5D0w-Br2I8`akQp?DO#{0FGD0RK2hukoMMezBR~!T ziL9*Sr!ZKYjr_djule%E*#04L7>P<7+dl{r#q+{aU*^br`Pk;9KsBW%xtTo0z=LO%B+QDps(`r5GeBQJTAI z6QnVk0^`C7z?JAlJlZ6%A6LZ<*DQ_-P(yDpYdAG3#9)?Y;vsKpb|B|?OloZiXVr&) zilG>eY2ec2j;m<+%kg-k@U;b!lmL&V96h3Z8~=|r^&86q27)(twy+CqwIKQN4l z_f%TTh1GN*tKw0+o7Z!eeSGB*DboI8^}WTUx0ZAmY~xR`uA2|q=HU%*QTf(`$R*@s zFncl&XQ8&wH-GOP*g+CB zLgGyt{mVhsHBwiqUO+$!H3Qa z$b(i1eNC=@ z_W_BAdioes4W7QcIjY3_Ry}=BmGuoPX>NU)Ur=)oj4w1A@J8k9i@&W!mKVm~Ka#&6 zZY5(H%d6Sn_f;Vdc6!Gf2RTIG44n5tvJkhd?x_6z4;*c*S-hO)^97vySqWd>D!Pu1 z>wg^AA3Fwr2JQI=syu%Ol4b%;QGOwdJO5B@5_vih0UZDx~ADsZGnQAWd&ebth z7$X*f+J&405Fs=ilRf@Dj;2MH8~^_p{=ZB~jRn@u0GK++@w$Wn8Y2!Cdc#D_UkIfm zjQJ;7wy?NiH=h4dJb#I{8t75&{2z)$ol^!0L(C?m*Q8?-AkQY;?;(s$T#N|8F34md z8By*rWIF6RaWr|yin0VHDrwRLl_Vbjdw%`z==#k(jU`w@ovv8&Fz|r(2_{gTCK|9z zpk*n@qis+p=loPsuWyMgDXI`uuM+vPYF9))A+C*r5)c4n-4A42QH6P-Y19 zRhxwkoov4^ZsMba_YSSX?~h}L6_=Oj;n+2*YFM`J03US~A`kyd1M*POnUKpO6XGJj zd>xJocz*f#!M4udzy9aB%^ysX#)WOYjnW%#fu1)U?QI9rD;!nz*TB)xPW-8o97xCv zvRxfmIC0HGmF5f|i8f8r7$)9v1NuYvck zVu1~_e^BVDD#CmA?mttK;zlSc+t>E5+<`a^fL`qrvbMpb3+>Qvgx|%VvpeJXAHngL zs;RNKx*dN;)M1X6QhMUSfjXCkxlwi2{kca~U;Jl$X+3H4Z(ZfLQGA!>pN#8&j2s}T z>KY)@Rrj~EVu!h%tN&a?Y)o({g=F6x9e9K6Hs}ZZ<3(b*<9V0)MPXl z4v^pDFs=}7GK991zsxW$g|l>+-*vBX;~|QW`>bV!Vb@R>PZ^tl`M@yNll$x580zAG z{t{1DMRb|OrY64kns|venwog66}C)GTRqHeD)u#mA3>7vw5dPw!O>;N)zDP0`}&%N z)?tFd#un7kQ`_4dx=Fv)NV}ReGQ_z41sYVnjB^Dd<|SUzk(JzD&eO93@@;0Ot=v5Ib{=Q1Bb)ugYJqF#)Se+wu*pq6m zfuX@A*(k{c49fU)|22t$2);wSeS%$1A~_AvQEc0NeN8)StDAH4>@#daCHZ3^3JJJV zLD>IsH|MkzWH}yJZ%#d_Fvs5Lod4TTMYJ%!zkc7pMp=#7)$RRS@&6f8x>2FbAN=Kd#UOg z92y)A@D(KlxxOwU0NFWCDGYLgJZ)S+%_>c1(9zjIpy-TV8y(;R!pLmVA-xEyAP82vGUp1|D9jx8%_mCpI|iI;^r3ph?LkW5t# z@(iNxHe_9ki?~UlekDOhec>&n`-rkzI9giQ0`ysRD_d3VRn@`ATk~ddvunPodfRAK zWL!gwO^Duh`>XJm(`jU~Oi0ip94Ui`3iSdV}J6L=pg@gY_T;(7%H*{W)BZuMh#S8r}LOy7&B5P9`#w93@t<%3vDr{Gn@#lz*r=x_$`%rnx*Ll ze?t3lL$*R`UrNT`P`fw7m3z9VzK6$KAFlwumbgWE(bGnKa_|0Z_fsLdt#|Ea#GO0n zK>tdZVAZE1F9v3b`bIG!7wlkLPrmUUlq{_7BJ80dZ9?PWs@)@`>mDg1@9kSYH6y!z z7Z)z3Wu literal 0 HcmV?d00001 diff --git a/vendor/Twisted-10.0.0/doc/core/img/twisted-overview.png b/vendor/Twisted-10.0.0/doc/core/img/twisted-overview.png new file mode 100644 index 0000000000000000000000000000000000000000..e9c71a6cad2854f95224252347059249ca5e7e33 GIT binary patch literal 50929 zcmcHhWmuJ6*FOp`L>d7lq(cP(>23rCkp@v|P&%YrNAu2UO#}in9D%sN ziF*M)bDnB~{sN}6?0qdk5r^%cQu z3v?0JK49V!Hul#^pIAuO@K38W>Pt4Rp5qjy<$U{=m;Y@=Tu@Ll2(ANxXd{%vO|zO9 zY5x1aU(%}OTz~)N&vY>t{SQWQTulu4`?ecNCi)HdZ!#)ip}#OL6N!E&h>}1?a`fM( zI5O<$zuSlcw9tP?()~aF0$B|*ISr5S*6O!-{+#MTB*^S%288GzyB?a3q`S$3!Br|w zzxcUfQT~)1{R?^6NH9nOe6eIR_b+)*5yN*ng&S#E2(X>7k@6{zU#f<$0CT#pLNnij;Q7 z+y*4POyos)0}UMFWB7k>UNP>)O2)g2v$xT=7?quidPcNxA^!5XqlILNuy~I#u~URJZy{?xo;ca z;=HDyqNe6|yd3x7!2>Jg%s1B!CGHxR{WXnJ24-pLStrkh155<%m4kT2tRK@=NA1x! zpFO-ndJ}6bw6d~te{1q+e|>Pk-L_?ZGuUL5Ncn|e?jIw4{i*V2lPwW6trDuLs=t@x zjFj6W(?7=EdhE8=CzGX_m6H?Qx{5qL{W*UpLO$bBenv({nJ#CklB46UKL+O6+1W^e zPGfR`!RM!Pckf;;}2i+7a#LZ`3&9y&WlF-UsFynmmq#eOM8ML|Kt?{~Ff(VyR^JF7j(owng`-x_^& zGJE*&p{(o$;nbz2rO;4(WOkM7#(N|(Z*aZbZu-a87N=HxoRRPGMp50!#=*u&VWO1J zHwDjvu9sT`h-^}Zo1E^h&d#3-3QX@6;}Fv~H8t_rw=qb0x3L)3dV3yiPg^0)Uy29I z_Lo}pUcdLI7|(5cYJGY6Ag=Cgd8X$1H64RmZ_#VlzE4hyOG+lMTzq01_5J(zPx@bF z2lwF)e{XCI-kUFcrJ94<-xuIQ)jU7KzC@j=nfvk*%_B|Cr^So+7<~2*C+(GTt31cu z_TLZN+Gx+-nOX?Y3bY&D zWGwsgMT^zv_U+rxo^h%S9vmDzn=CgIf)zh(2*eRk`Lo-~Fl7)Qq3`PI+TvM449nTk z*=dCwu`cuV^}V+InNR1E0K!;)1Wz+RKcAG8G}Uv3YQ<>A{d7wBP%+Et{Nwct|PV#oP!6UCS`d+=BdklZw%ikvy$D`6IqpUYF=a#KlX~ zic3rROEuEec`}IY2^9xjG&K4{7<@(CHplw<)T0<3xBXP_|fS zjGbRt7$`ETL#_7mPkPR7InRy}Q}LNjRXU1_io&*L{h(Y73L1Z z&W+f<)jLX5u8@|NrtBXd7k3Z0y}LWh>SQ2G2|hJt+JZW|#X)oW~M7qqA zt%PKJd?f$ww@s1W#8|lf{Eo)@S5{RBarbVB!#6#KNMA z)3JR2WFYfCY|$D|RCd$1#5k3SQL%gWLgfmmm2MN3A?ZJ7&W|6DlHYpNCs1SFjT|qv zaE`|7-ry(p$Z0Y1IT(gx{^Q4wU%xzN_<20{mON1_SlHNn_O_X{cDoDBv9Ym2`oCHU zWUQ|b3=B+7O~s4Ep2ladGx(kHiiw@4!U7Ppk_0M6vrWX09? zj>*YMV}sn?HVN3GW}WXpr*@)N8uiL2CUmb}y}Gz{t(5To{rg4Ldy3IN?6}J5YL<^D z8YrxGW@;8&-{x3}v9Or8zhj1yn{DLJS_;$!%il+%Ei2clIMf##>lP5pKOQoVjZQROyLs~+Ye@U z=RM31N10ZTk&|Dd73phkW@ce&@tBsh|6F({1@jUjzK2DIeW%LD?&(TrHmm`*k+IK4 zVFlC%>EF9=k5Q{(rsOmV3b3g(H3vS(1crBV z@O7%|=}kfw^8X@kPMKi{t_r0T? zon0K40bEz*1LB}a?z*#q#zvE_c-~DVar;Mge||sL){ZYKvVZi5qO{pVbFL>@{L`mT zZ|Nnkw1?vb^p!?2@pcE$D^OKcCdrPFMii)6NG`p|JVoW*n zP?IM9YsO(kbWgRnw~tjEuJiNTX4eks|2}!Oe&)4)otum6q>`vvGmE`!f%CrkB8Am( z-b3gCu+IckxQ%M3rl-#{-b(Q)w1^jR*Wx<#B#CZ}7LBgEKz$Sz!93cTWe{^=Exm%w zUjKc5%5O9S)hIwKx~=qjz}F8x7@O1qLlm9mTeo^=nmMnaZV|(6OScje7LFrvT4=%x zG_6LREQBG-be}0Jho`2_o*Zn9d&lE&*X(P$xe4j{-w7e&MO7|@lvXi5#19>h)}}`} zvy6yRXfsS)t=E|U;bt3A&Ml|$-o|I2E5ri5k5lE)Sa+jzzIQ92>;8XXO@W*L&rl7Z zvcDW-JrKcE9uZNk*8#h;I-IEPL}5}{ zC<~XbT)CD)^PLTVBoiRVOu2&lX7=QrH5LVc*(!&Iw2;{q(5+84io#5t&VKFEiMtgT zRKah{Qi`iR+w08A%6gEadOjCOjC@S_2?GYiPQQN6L$6#A ztB75h_M@zK8QJ;qNb1@cL06!E;h0Zk$1OHCtA+q9*#~y~zw?c^(!a+nbE|7=>Q1-CL(_%qXEwLCRB)*T zt%quTPWkxx??q6{;ZpG!*Td$;Wy(=YO%b%Apvfenrc9$cU!^!Ma?y{nX~y(Jqoc4oGv1YOe##u?Wo6s+>}!Ur-0hLs zMTRxhmoN7h>Qz7s?H4f6)*ef{69OF>4o8jQY_fQxjY*bn4jHtbR)gNl7I|#W4%$=aQ0=!9+|x*DEN4NVbV1q`ZIeh>1-M52MTM z^768P3PEhzeZtTu>MG?tP;FBEet+HFP851}rAYO`2A2Vf@3Qq%wy&bbk2`T#WqAAF`%?`0$~lqXST9{^O#T0&cr;Kak zU5Q4s236m_em&nGRJ*pC<4L+0&N-id=|&+Ux< zK#B2kAGtTa(2(H^fc_8G`mak%r;?P|O^Y4sm^0H)$CPr;v?t(f3XrG_LLE1(@p4sB z>3+|tN5P?8e`AsJPBi7_Q*{-ghULyU&+Eo^^@E#%y3uTOT^Q2ay&WB13$Jg+zpeIU zi&Zo~j5_|M$2tzXc(+@4w)*5pKA?bVyr(86{QUfW(Ej4%lVME6aNjz4EBW7 z^F!O#H*|PRqn`}K{eA=0ScUu9lZoET(8=Iz-NE$)Bvl8$4NjZ`|Getkkl#N`}3JesQ_NqUvIrClq%VoDBFOh29!Vm3O|>@HxgO6*=`#noGT4bKI9I~I&7=k zo&gAfrlL$(Zv!9$nrGLdlCKQ0t}L z)Kqz?nq%^a#;H{Bmz}V^D^lwS+8FGX`uf`}?|E2JDU#2UdU_{6BA%hi3tf*G&G%=a6JfaU#Sk& zQc`T}^2WyLUMDyIA9Hhaz^PbRSm(bM-&Tht3fjD{s;ZJH>~2Q-w2RRe$r~Fj4Pq`o zxbg|!!(@g1pn!p%-g7vy$n2x7$<-b7M`~=W+@JuHX@9yL`Y8Dvjsr~@vKHjue{E~w zWZpVC-xyL=S?Q1pjrO*KLvcmL5!?k!B>YjHlamt`rlhY=d&C@jY@<(a_j0e_@)S96 z0^r?uzcY@TR2H9rV7--KCjRl#Ytoy14>V*$jD@H86 zS&G|b_CUvk@g~_@`$2i_DWqk($?@@RiG?2MmH7GdC*0xXm6gNI@xjWj`T6!J? zwmJF!*ByzjWQJ0TxqKww`|2e4ZvIeu&Y<*}=`j&4Vpw4-#@ z*<*p9O>tsx?BOv{QBgtHg+|rU)@E$`e-tExH$__4FSZD?Tdj5d?HT8&40N&ffq_9S)D@m3ib znJ!JO!bgBd^rHB9`1ttb-_mz`yI6UP1d@Bn4hwLlIF{S;+5PU$uR`;&>nK5*KvTu6 zcY5YCUQ|oQgfi@7DT`%Ai^I5^1OZx(fP^i~HE*%A*Zx`{n6NaEVhTFOF5I>G+UJJM z-q_giGb&ZEF#H&50s#5tYERUdX^WY$hldBWcK#y~VJUsMW$;$Q zZyH_7v(lGXv`skHNc$m>yZz+Jlc}Oy${QPSN&)e}cD5nObQ+{wGVY2YddW!M)wnw` ziY|q6Qj+RY%=1WbcRP9cmmpE3{y9#KkEcXt-??)K_GX3sY^`dtSig5sM>Y57O-yq( zEx?QRe1`|WS{eK#92YL~i?Y*83h2@&Sbp*0)HRMmcN3ml%+O*5sV_xyu(7=+DRJA< zMGgvZ1oF{vIFS--QcaON>K3{I_=noirFK{>P967x*XP8gO!pk%;hr1QziT2>uAu=f zT{lxqB~RQL)$yoMms9skn}`k&$hf%C7`6vc7c5`gYGcGP!PRUyq%HEH`>)d$Ld;+- zKwYLgUFE(5EoMM|V=s|9GdnBm>$h(}GPQ3gba!;%6B6zp9Kg45Piyb@S$dj&PnWUI zIPEBP$7mcJM0F+b1E=1aD6?^6kyYWynr2%RzDDugo2^T%^}>d8^`UDu^$VZRPQ;n4 zifh`H6tszQlaDivbmrHL)6u`wu(~oY&l-N7yY|;pbVpo}KgDLcD8n8f@^?SA-Sw(5 zTpJxVjW1nRv=M}NA!a*SyJkSt8^hvOe+uGg;KKD3!=>@IHih-pGWDJTooi~XFIh2y z=5kaT9`QuuugrUFZ72x7+sh55;^$Rmt1Z$AwFs6dRQ|iR7)r1(DqoR59i`*<9Hq@Q zF?1t~i;KU12gm^;99yWE@0kcw=7$fA930^eD@p@{gAdtbHFLwm!zF-co|2*B&c7?V zxO{n(Z{~)soqVRR;^H1VIh812u!ib_j1@ver^Qb0{9r@gIh)nGIh6eJ<;&GosBN>K z861pQs#pIilTWvRm7wl9ARcWi^hr=h08I@+qrWFWnE^jvmMtkQh2`o|p}2ZAE+!^( za2-0Tkn@W2-mH6@UWw`br%#`PQYS0>9fXGjgZCUde88d8)6=V~C3g9yzB=EOmX;P2 zj6IG@k#}YZx`>UPBH|d}?mkmf>3PBb(mdZqG-NK5-o|NNQCPgZe49a5+`}OPBXOq^F_LoF-Ki1bp-M6Pceq??8_;H}}1x(C8 zzZQu>S?lZT1DFDbgj6;lAON_MgoK3M^fz)^T8@23X*mtRzfw|C*ferZ*{nZ+KhMm} z$W{?-ne9BgnM%)^`p%zO02p8mvagCZg-s>A2cy0>>ZR(TV0K;}I|s)m@Qj9r252kr zr?CADAs}>62y=37CIbEz;o{;F7Ji(iNhj)*3D-ZROooqz%9jWt0ZriBH!;~)nz=(8 z2V`=?qobwLMHc#q?fm3j`I#h7cdG$IH^u*Kj$+HmiM87JdTXX)mWOT*TQ8<6zkUU= z)OM^mO)XW*&=By=0eY2z7y>f<+72ol5uI4(ur(TB0C=W0U07Vq7~Mz^=j6Z4#MCr9 zTUTA3M4}1W?dHPnifvj=J)-3nNx;!f<6F3zoLJYT+M?w%j-gS4Sc?ZdfKaE@Jfa~$ zOI88Ol>631mO3l+c10zngb2WL%*@O|kz-?H@Uq{&eT#{SfmK);=QU~B9Q&M+oxL)s zCjHDhs>#`oY{$?x#hB~8ZZA=}dB5GDrT?unlRIgS(Y{deZ>-_3}iOo;FrVJ zpvB+VY_YzLxUZlMeu#i{A!;(;^Nf{OczTDFBCWk zIh51IzUE*4wLtbozpEzTnE^NoA;D^>|K*QVC*bIToim3U?iXH_p8j;9=`~PJC=bxL z7Fe9!-9ZJx42$C4Jp_6EEv=~Lg9rS+XTQYVw}0AMxfIeQ zGP1F){}|4<87WBMw@~%)__aA+3KiowX!4&7YrY+?D$Q(u#t|bdxW8PoHhx>$-Ik@C z8%iwg16^I+-V{l4{AT!x=y&fb)z)3aK-mbFXpNwGtfzO2HyH1zawi&V1(jTz2? zt=T%GjD8YXfK~j^3u6_5i-8h|G{i#`({o(MVA77p?{gx*dKKs5MG{#u{ASqt-F7*w zKN2ibbgDf*uZNKIvWttKLSNn-&L4%gA6%}WWnhpT7bgsmQC2}iBMNEzIBER3 z7wTUaUkEZrlGf;#wC%I5^6vt7HWwYI41y;7$f59s~;{@i;@@9E(I9c1nG&6LO#JcKsG z1$~lhS@y;(y;y~AGtZ@w&Ur8>zbOHz=_R%+WzI&G4Zi= zb$@mjTWzeZ@0Kex`HraJQVHAXD=I32&aJ4}QR%pdM3UqF>$=?y#G0GZjb`pCmn>(j zs3&HS8nP6yF8c-*@^O)&psMj1x5}WWrzf=L$GKW+k_SVuvSX%%S(*v&-?Q-MgM{(x z^t8fm`UD_9`NUHm5-N-Dws-^tw>UUV%Pq?$E?<|g{PwM-r3GL~S_Wy4k3;Z4Tax7A z?T{Dzy=EQbmM%sio2!C?g5_KR`meqe#kjRr{NTWv)&9cbT~+4^l9}6FJ;t%=@TPK? z_T}SZC*r1~@vja-U2+|C60Zp5b!Wz(Mrpnt&DX22yUJ}C;hxw|YJQGs!HFb905)~c) zRf#4sXp?9_iTi6+mHUq&tHEp)5TT!@r2mT(x;w||nZWq0VDseRr_^Rmnj6c1miywQTMq(2T&7Ek zL78c&dJiAQ$~HKMsE4M5Pd2x;HC>(b?1GJ^nWEj*6q7rV_;0}f)(&|#`Pi-|tcCn z?!KF*4$>39sJOWJ==#xH3?!+rsceFvBg|`iM z1KRn3TXl(VcXzjc|{Qc%lyRY$^;wl4q2 zu&yG4-Ovp+4Y(2LcYLO8VA>HI|6sSMtE)eKdd+@3bJbSw+SaYCpgmoJ1%;*Zo|ex~RMb-7%$Rc+G6qNB8=JM z?_wm7?xb&)uC;f+L|pw(+M)j1&u?}{kZT~(??huYjQW6|d)RLCAUse<$QM4kckdo- zh#3KcZ!T;c95;fK=x`>OE-ExA3$N#T~GSM8Y2as%;gEnXD z{DAw7aDL!?@}ABX3-TS>j|krS)^nMfn!2nH$mME*(z^{N=$uO9B~3x<2v-8jB9gfP z5;h-9;r=qRsSjgFhjO!K5q7!3F+rT#+Og-pateL(#t_<*bFbto&*i+V`3W4R5xr&mE8V~UIyCgkgge{|;8L7hrM}9} z0fq4v$mH(^A0pI$;WmaklrP`8F(QN6p3ug#Z9F;(We#M5M{w9LUygeBZXo|Lx2%Gd z)jDJupu0^0kcFP*vNMxR=`*E<+izV4ECYN(yi1qLo^f`ym2Yhr*ANiKkJYXu+?PAL zu&&z^q|$j;+WaTCJYIs0=%pbb$KaEI)yZ$s!*iLPscCdnSX^AMXf##a{mpB2yw#*) zVsKPYLl4wMHG|mY2pGPAqoSrU1P~)4RZ&&doL*p^Owg)mh3@;4IlAzyx#=_#rLL+) z+fEYd<|iN6bW&1Mpn&?WXT{kXkT_cdAEo+2GyV8?%`G7zp&K`rfPp0>Bw(vi^_D3; z`P#&HW1vS+Pyw0`)G;8H<^y-{-OC^TvA^%KZP+ynl0jr72?IlFcsQYK zTau`Ab#?WS^+D5X(g;!uGc&A<=no$Ls;Q(xZ}S378%<8R-#bKfcs=EWX&ReKojTF@ z*g%$=(&fTZJQiLD6I_MRM9G&gF3TOUQ@Ibem90TZGcz;$i!FBsvN~b;0ImTL^gBOz z9GYHIQsN>;hbm||vjH3ooeOHWB%oTbCV5&p0i#ZREp1@3L~ak|4#FFxrly{OjRaq% zT(Ye~-Dqn%jCEZtw&AP$X^YEnR6$}%7V;w7#g^i{a&|vs68vj01`MpM*ic2_?!n*A zt@;)g7J;}WX=yJK4703|HCvxRFE1<<^cZ2>)?x>JDRy(Z4-^}4dwWx*{mxHZ7n|^d z)ARB|v#p4UiCJcrS8NjpZzxh2C84ugJjSD)3XmodecFV3n4j8ZKcAhG120i(1uK+* zGlc{xc{eB3H{I%6dHKjMoo&OQNceyv_y^=t(58YS*&bxW5m>bMJ55wyfCT|N_+?Pg z&lTHynK86Bnhg_6$9BXYf*W~=WOX+`ocH?ucAlyB+^p+bgr7vaNaI(ax$o@QSDS#l z8iYp*%$tJS(DtmL!4O%XyDVhhkA@1+n&@%xV;C5~Tgcy3xfN1b zvp(03wq|qsjoY|8_jk9}8mm}`1qYtS1P5awd;P?#h;Z0~VU1G(c~I5vy`jy`&8332nJn&({8H3pe=IT<_X6FdQf{68 zFzC~|wq;YptqQ@eHW~099P6?;I`k66*fj0J9L1|MyUBJT|f;@d+TgZ;#8I%}+PODO$`T_=%Z?sjtcu zH)4TjGoNh-_Vm-{P|};K-h1sgLHL9=2}Y47D;ZcQzYj)@*g`vcdVGH$uTD=*jZu?B zYuTPkI8SzQ3kRfsa(*;h_xr?V#&l`das+${L7Nc)0fFk{l|-m?X{_wGZYkvqHa9n~ zA9(2{$kxlNsWtJU0iHT440tqhDQDHA-T#R zJw3fwO-lO5moKr+uZ?i2BIN?LUNam8n%Eq+p09kximlNPWyD!yNANB2G*$wSA(lRy zTU~8En*?czQ?Gn~XEs%(-aDW-p3khKV%8UOY>iOvsh3bAdiIog7!UcE;1Zp-Z_(`q;6B4KXI}xH;Be(yuhCu z@pEs_>uB0NAz|p`6Adr`eOcC|L z#KdR;;zw6q`!^xfA(zur&|>XTVVLkLrI74bOzBDN^?{z_q>2gY0)z9t_H9;b+olTj`x;ngzY#CzQsto z7#hyNnnAJ$@;0|_(+={bO`G++2yx`O%wgX!Fm};##O6JA781E zQd4)V+{gVKhg+7*^JMUX=n~1iai2Wn#XzAk{E^I1IDs4-w&1NmxBwWx1{9VE8ezC4 zKQ=ZJNZ#KU0?!_{+Dxra6=cQ=3c~3&FflQU460asaX~8pjfVd%{IBJ_h1EYwm&KOoXTOpt(r!&nHyl;1vl@K@ot8pwn*1|HfkSa0}Jynxj^!F+SlSH`;3Un&h8 zHlNTczfht?W8`;m)@;GCgrE|4_0R91@xEX@sj0f_t|H|dBc%Rcd!X5 z>$l5*kAnXRBpcO!GZnPS>!lzV2K$`sKQSVHRG>Y-v%^3^fi7Jj`*1>HrQ;!H@x|&q z3`7(UnR?#^fg*d7shi|o>Gva)=B08uG5-Q5I=zf7o7Xavb8B;B$Z0M(z_Yi>?txKGlB8W)+DDG=N*_%83qf{4 zAez3^Q(`eyeVr3PW9dc-8Fnuwg`2DWM&($Tj1(#F>?RFa6Q-a% z^B04-=spCOS8!>k4Z{?-Q3P|fffD;!F?km{yZ!Tay|vgs!D1e@$WTMvfhSWqkxYb( z7c-&}NfhYsLXv(mMIi14XiVP~d4WzKTk0I$7Zkak9W!p2$8kr*9@-~wgoFIeq)S{$ z_{Fgl!qJM(W8XLcY`p1uVrYHfRTD_pwo=Se<_K~)06Rd}L6NY(z)uBx6Ab)&azGpM zfLp@}O(2og7y`;T1(q-9iQaIoyTUo-G^(SAA?UF^UYets3utT=5_+B$>SSe>{nvoF zfz5Re@MLNl&g3KnF@j|qOna(!z+ivKPR`U+`Ef5q%@1MP<{RQzEFezzg-vQ(;@8CKtQasH;?g)y6lOT08oGn6ji z#ShKzadQ^|{I-Lm1G}Zml1qS29FX9s;d7Pz;aW%?16Wy|mc@ml2XrAp+RxY9Tf+Tl z9X_2XR9I5-u06?p7LXPaufQ`5-jksz401A;#f55Q%goaqI^Dn21*HJ zxs(T?qS7=NI-rE$moJ^7!AOC;#Q(`M_a$LOWUv1ddIm*^kU{DUfJ_$`AONVH zhx_}cE7d{x#?|N0w*l7FXyz&_DMdRm%CPUw=$+cS{a0F*a8G=#e>kNd>r!A!brSo8DUu$-%&BOLrV zQ23#@71bOfn`*j5!Vn0dn^JAs3Gx~3?d?oKv{K$GR$y!}!B+Ob~5P;Gy~TxC0i+IRUVAD;miXAIh$ zu8KQ8f;$MFjYj_P6soN|D0{tP#;X%f;|KY!y1KJHrMq{7WYa)zYlTvWz6&#+t8`%T z6&LfEpS*VkOp%k~75e&h@Szzg&DCKRq>GDE%;5fy&NM&dWxcDSU9{jYv&SmBiJx-) z0mTghCYrfgK&K%G#w+r1w^TsI=)Ys@@2-%9f79pK*x2vm;synL zP%A1kyOF|Yj6sm&r(WXrh1~<2Yphk+Zt8>8w}Ju=Mn-?(7YY#`C}DBZ$qF;9+`W0@ zoC`8}Q^l?cAQwXMu|%T#w1I)a;{t7&!F`}3=DjK1DlioQ{MG33hK5+kA!rDej^gw7_$jQ)#{2sCfqx&`gm zRV=x#uI^F(!QS4}-w^b1jGhJ^uJ3~k7>fXGp;8L9HI0uaLVdG+B&tdss(N|aiu!lU#M_a9c;sNdv^09Go!A2#M10{8~%Kh#1Sl+{1vIo`Ppzvl$n7pefquxdI+nv24-GO0Tyd z>VKrqv7+GOc2JzFT-FzsmYRImQ?l1#h5&$8pMb&tUlbc;ILMbsOV=H2jFM)EYNS<$ z{p|J%WoR>w`=5w%7mlK==ibOGVZT!T&pwwk1E?4kVrlFQApfzvx(W#O?%+NYqUq^r z>65kVfu?}ol~hy;3k%I^t(yJ{!CYJrFK-E_syjc_d-m)Z1dAZ*6>qWHl~4-SZL+E` zMCVqpa-r_wJNH40R!tP@p8HVU3&su{JCJl=1_!@3y=Dk|G2d|Fmln>L^e&OH;%d}D z6*AdR$^7c73T*V4S&8I3KcY70gJk)~svT9h{k!vx4X3`1S+9KP4QKAv%t7^J z$Adeig`@QXRmp$)E5-*6XrR{`MLrR=nol0e?Kpw)SmaqN61J zho$0GcAAc^cq>R}ARkrXFg@L@VfH>d3Mx6?~HVum{0*&`@ z8$Mhr5BNv@`8(6&m2}HQID;`(G40^< zk{H@bydZj%lyq+Mc-5_aTPH4o(ZpbHCjQ{CHR!U%r1ISmq>2~e55j) zQFMy7d^w6!@8A~c+n(o~iH0|#WXAk=ng6uV*rR1gDE5MauE2#CLTp?p;|$G}$a?kf zPCV{NE4-Si7#i1*k}wm6?B2hi(Eb9qiMEOL8=r?aP)tDGKUc;2yJTa zr5$5mS{$_0*PRi%VkNRjN=`-w5fm$At^1B4#1Dht^$Ylb+Ycfi!LseYZI{m6 zoaEv9paoDmkguU9IyyQEr~q7I6cHH%aROMv{^A?Qx%wCIyZrspLjY7UtsW%XgE_t6 zJz629Tz_am14@W#iSvdE4jNTL>)%PVH1%eyp5jvbSt&pC#~&yJ$n3#^0Z^4-3=2HW zO70Trv!8*G@Pzl?zQ`=592=2u@^rVWxoO6%=F@m}CC=un{S@Q08MAfTpdU)%>9+z6 zASy~~OH~D9QD~+Nd22%DpvZ!P0>EJ~oZ-081TjF;+Fu3zww_`4aG`q<0jJ>NG8d*M zyDcGHzFk{e3(dBxd}DL1yh|(@K#42pE_f(=K*SKOa21;#Db$0$ zGTsDIgx~(!<)cO%(gzb^XiKJWc5}weo0Als1B0+Z{KYtKBXcJrV&dMmHkgIewzOnD zuM~r@DcW%D?%o0tC*v88GY^(CxO;s9$&ryT*@w-NwaZ+j&Ki#cc#b_bv|BxMVY8T& zf}$L)ZgweWN5XH&kdJ`H;BUJjA>j=N3!LoD;&!d^N--9SUVUX+$pYd1v2`9CO@UlT z&i+fvI|iCga}j5jwQGuNY)tT0p0cpuF#+U-C=2KiHC0v5U0nt3Sx2hV`G`TyhY3%c zNK8FA!|^y*x%6L!hO)zL7J@5CNw{D5@#C+BFm9wSdm3kT)^cH`N2GNzGb`&y$V_Eh z{U@JF3vueD?QO0Y{>ER`juiqKH(%X}oVbB)e+BxVS>%ULpWaNCK|pG})%?B{HwSg5 zB2@u0u5PzX88^462u7@~UpIk-KG;I0PLEj0y1p~nMI`}-yHlD*b6T*=s1JihWMnRo zc>xJ&e>#5TN2bWGfB3u@^hgiahuZ^bII zP|;-aJ*3D}Ee*5a0;h7h4-{=}nrHh9{qJCei9y=8Iy?L2bo!%wBaivuQ6oQ}dVdV) z{Y__J&%`P+yLSQ%1Y_+ke&0vnAHBe1j>*DF_&`1a8)^vV*jQMcoP`Ji2axFbG+@6# zl;BU$c@|X$K|Y1l-6_P;Jv?xiO}uB2jx1))Od*aduBt_&XigKxU27~fQ|I?*$M4T6 zWJF+`_3LYB9fWSel36B^s~@_bbBc?KUY5;PPT&K|U3b1^-_`QJT#C(Y^c`HNgTc1I zqzIfRpb^ylJ}4q(QKsa5dQqH%(r6E+XTZ3sgIOwNX}7slI^NfO``WoOX8Py%QQdeeHWo=o_Z$p1`oNBcDQpO@ z0l@s&Z#iV`3PlL|1AEMCNAkRaI*Na@0EF^Pi+&MGo#Jl0Dc90C|0j@pq~{q8=c^TL zKt@JJe*Oon42vihUzRK~%^R00PO8-sRBu=$8lkJI^Rs7#;BliWAzlN`2OSp%5xTjE z8;&)>WVwCmTRU&g(qnYszwl8V$n{_}6&DxsTtydj}H?=scpF9H7OwdNPYq-5*BajzhWtS_DY>*I@?3yxYm$-2&$) zBm>KIRR&%DI~!b?K>4^;zuCq`|BA)aof5;*FN05rGA%V1b<=6$XQ~PYr6Hxj{>Qfa zU1G4cb$%d{*Wy7*$uO&RPKL1F3KEIT%*=!-2i`y5s;W}OTov<&dlLBXN$d;oX4U)+ zJYPTF=i0U52Oa0W@GEflW%=I>u&Sx`!X^e2;fu{^PmDK*mc-w5GCb)(8zW$d<;iMD zyqY}$K7RZboT~qsLTtLDhQ4oqhi+}SYw|&n6?fY#c4b?GA}a-k2M^EB)1A5sSDhQ^ z*w2>gXVU8I53G6p%JEog*$f1xZ7UHrUKqB9SpM=lU*bXu7c0eR6AMpb$=?tOp|uU! zduIz|fYG!srmLYL9!Q3A+!xnr_mr9theZFvyacT~(3TT;o&;1>ROl9NgI#ZKp8c9? zVGkSO{~oW%A1&b3`)FPQA;NWg>KHV3a62LA(+{{txr+ovitQi^LRr*HM;>mp*A;(X zFeWap!ew34$jAtQ79?T$rC-aVi`ynn|(%ldZ#_&JHL*PJ)b}4pGAXwFT z9?L*Z!PTC}Di+-)2@AlyU>^YHw4MHT0De8VV2!rKL`3HHzR!1NB%%4Q@G>odgk(w& z^0ym|7k4cJ>_E??XNtOCfET5Eo+0&Ky_`aj(?o-|f4Z;^)tdjsExr}N-PFaMwK48$zqN9#zAoWN0 zgWn^L?qexMHu`8beBM38xSy8M*Q2r!iRP^(B?FU_5F#Lz1!pQUF>w;cl>!B@UEji7 zS!igw)d}2>S)U_1VHv=0#PkwlU!5(sw`Ipm4tHjyVMZA)9r@YN+yd1i7m3e_5ujCk z@e6kw#YiS~=-&zGkE%JK38DQ=h;1iq71jOr^7cN1iE~he(R=~66KY!uei0l)010SY zO+pYf24R>Ofpvj$2V;c`&$aB5Xq|mF!n7+rdG_xXRyvYI z@J4*X;~}u2?<500?gKWZfxbS(p4B$WgolNh*hyIy$L*q-cCu<~COV1%6LPR75Keu@of8Hub?jAUUlcsT zB^r9~Ged-b_B*{C*G$GT@R&ff1x+_u=$S5rcJlLWupV4nA0CF*P#LMHO z|8ac@-_=0Cw@|kX4rd?aQ^NT#Pbjs4X96QVO#pjDMCZ#6{@A!oKY8FcaA8x6{{*|C z^&AX_zafMVxl1EfaxWdfl0Pg5O7(_TbPom!L4ia+vZI{qjTXhTS@}jxwkUiN#G)M| zvx}B?fdpq<42p{xXAlK(g#2+xK0|5)Xz0i%1DlPKw&146ywzi(JZVQvn$$JSpTHe5 z$|id){Koc$j5Yd59K2K8xe(d*My!yIoE(|fe^|B_cG)Ygkw5FT-kLJ|CmiT&N~Ia& z6vcdKfg`MH%SYutfCR`n06b^P1X2UpM!;yt$H%~;p=C%&oB{n~2~sCJfXjxuhhB0t z&^q{K-qSL?%3@N^(++b#aR~`lN#smR@GzG3q1^IQ5|?Y+nWF9pfQC$-aG9T!_xQ=Wfcrwh# zj~~&M9jZac=GyYIE7Niz?GlL_36_E7@tiWT<3WOU2tly3=f2uJmdS}VNvjCf~!~EVX765 z?>2b5kUEDcA7FBuTwQdD38&Je{bm_0?XVb$@$nw;oDCMznSvNaGsqsm)`yI0Rh4+6 zs68(vCCKrym~H?uU}IYc)ddQyPObM>X0=30C?%Hc|LkN-Z1_(Y-`}Ol`wZ_9Mn=ME zMfsqU!z(if-IbU3MRf~9@JK*K7;sHH;kGZBa-f#0Elv@>gBdSYX69uO5rLf9ic@8- z!yFq_0g!qj`1^gjM~{$2mN}@Yxfy-@afoRDaw-FN^cJY+yMl%`U-|@dtA*nYzF77H z3V=Vat<1tW8~P^h9UPGB?4l=nppwYv*ID+bnP<4{x85h6hd#8XR442~hsC6YN!@20^p2^uOYw)09^ zW{q4ez{gU6FPaK&tn`{GiWc7KaNLC%t{2@jqbpblOZKm>x>GK69BKW&X59o09Lf4# zEG=x~oUo=KtDzrr1Cdtqr=FP%)B7>qTzK;~PuW`0c}IYMK@zjaLBYY0vVq`gio|`5 zA<(|@n7~>^hqL$_vP87#thPJG?JL3*+Oo2a)=CIUbBDLg_1j#gJ)wT=c^ z8DxE4y~3S&r=62T!3?2)&=M1ArOq)Bn3OFtuNE&&{J)t1`^ERFL*) zn4ZAK>*4~(D*>V;aF)SWRKWP>Sv@d0(7bI1r}YM4Wk~c@Gk;bUefEiYri-DANEn4< z$?l?Cd^MYJV(+Zd52(`rC%Nw3*7Gh*z`5XGK{PQcfpxh%H2ou}j~>sT3+rAfjT_v| zNftO-^o2w6-%T$Ja%XsZgqom#{J#x(|F2*-f8C{AE%pO1NjVMZQux8LmMVD|GX<5e zo0Xm(Iw9lOWb(di249);NTr6|fZ+D5ML9Y${C|7>y(Yz?rz*=u;Jbp&;Ugfmdib%!R#SKKh?&Iv|8!e~*~UWFae=w#o~ zkJs=9O0FQ}&;ST-Xnwg{q8p7b^x`x#K$3TMl0T)w3^7N!gt{dIae2sq!)-?$96%fJ z*k6lkDnPb2Na?k<5&jFzoQs(#>v`LrQLfe=$Lz^#o#`BYTJPpD>xfxfTZ7pk7~&)* ze(OH#Q!%bH&h`)0f-w$p<9s8$6MzjcnFr=0s2&i^|N8vM&Rkqh0}_MaNTW%ehQ{Tp zsPkCH8IxoQ&k_*GK;(pm2}k0+CZg&;x$qS{&3*zIv9Bs)2*l@*CsU8=t_Z+%B(u_c zNW3gV0|1riXlK?URSX^j0DQyf|3}%IfK%DFZNp0>V~Na_p;G2~h)Psqp|lK{iXuGb#eZw!TWyN)0e2w7rJP=AfI@VgWJ zBem=^s36wung0m)0d!@o%*+t|s7;@V+i>80h3gg-l?oU=ckQ}UxpUjEqrpCrFFpbQdIK-Q2Z2yGfJbTmCmZy)qxZ{+gMw#5r9o756klT3no&f#Ku(xAUf`z%obi z9e^Q~s6I3kNlWoJgYp{A=!<>(v;WCW?FmVrmn?$&+N4kL7d7V7RpU&9?fVp-dSl~G z^w?{lGukr?6ye3 zC`FfNqJ}aO;yzZ`Lr|9E*U}ruxzXQ>j<)ifRBUIi^OJBt<;;<(ARBBXLyK@l{ZOH| z1EqJ;T4Iuv9_4S83PIXY$mPob+5ONSg5@5^t+{>VlBfgb>K3uhP37fH#8UX}AdZE=#r>lF3!=)wP0_L>+7H6FSU_~&=D zuG1>maV-h?dgbNH)3j{#Rff>Bw`G74Lhk~!6RQlZOkv_VWjA@{)~z@L^Gp!Bz`vnU zrLfv@K$AvFG%;b^RB+#EOSBO0jJM=I+uJO3nfYmNZ{>ejb1J1VW(-0Kv<4Fh82d9n z#;|T3Qa4Otq`})x43w9XLod$7$48z%rV$ba-138y2z}LmAaNj=;Gm36OzNjzY*iX) zxUFMiVq$L2XXtcEn0IR#r(qHgq^G>J*hswGp>5V7_(_8g+uN-k0CaKXGVx&Fd-j)(B02 zFya-ID3H)0p}~3UN@TTKWO&ZR%F9637M8ji@$hR~RUCu)2ezFudw3TLnLaV>sAgQi}*Jt<2 zN7s>-(#NkWAG)``P_%FTzPYBElXB$9)6E6@L^(sYYCApJv{38V_T1+XQPXrVg;zw_ z0%Jz;12JNMi37(kg+jQ#u+ynSsi8aa?OWhPk1Sb~|2trS87O-|yIJzh6*xA_5L1_p&342>gb5`{&&WFbs0@pEXwjGiXwOQ1P$aEE@&R5h9 z>>FBT6x(rwPiHl)rxx`OnQ4tC?k6Bukj{%UqtUl-H{avKae}xJH#fOI_TpFrU;{U9 z+=!09M;DPk-rm3Pcp8u)G=k8q)>O0M$wgXzh0_I~wk9X~O%8+P92mIkXTxV9ZQ98B zFrQ6IGeA4|`N+Net1n8bTr+!LT;LK674^^BhNmy*R^O3Ji|3w5WOxaleuw562v6_p z0Tad>qV-DxztVa4?h2ouS(94x)M4XH_LuFGI2VZ`dgR8vyE)#!G6={V+D4qXu)zRS zQT;XE7`9GRbmKkB6*O*pAFG}lPErr7X-%ol+pzpYUQh2smiKF&O>(wG3?5t=cVRaL zw+@B&mB!GAcKdr0N$j>7_g!UV@02{X9DA=f1}KI|e3_bZK6!E)0h0GKGNJ-oTPK9p zN+jxuSRlL-i9es+b`B=joRaGWrkJF3HpfT|KuX|`PY23s2=i~l&AGWb;178Bo#!9L zH`<1COFP4|US0hM;f8RP_4-C=!OQCdTLS#>sP7B5n=shCxgoP{8x?punOX#;D0Zi; z>Adr=Ge{^q78x@{ZLFarszM5@r@ zuP*_*zd$tjfFne2fvATB#CPV;b#!##?GOSW;I?0f;U0lQ_(Q|FR(e)$;tp+K@;~I62+XN*-|PwLOj%&WbMVia~qk-+t|(1^H{N+ zera=Wh3iS z+I(o0b_y1j=q{`P{7U#~EwMd>Ga+a?^WMh0hcYJ(p4U|MSjv!V}LWz0kP%A{}z?O{O zKcOLQeTk+2=R?5vj~civE-v4&FR*Fa%JssjJUKZDGwv>2egyb|cG6_CVE&kyd9ZL-CR0rD;f6;vLA^SkV|Ix48#f|U4}efcOit$*o@k)JWe)9O z+n_IOXlN)XDk5uZUjfD@U;)F_Rv=6wB1&hvuKzqF7d)|Xat842Aw4}n-=B+^f8sZU zgoGe3PJ?q5*c(f)0N=^@;Ac45Z^y^K41E>o;8ogp_1~$ z^neZ?lAOlIYMBQhXwICt)xU@WWFY@YOv9XkS#zCgi;|@ zm0!`(2sN&Sm64HxbHB=V;-CA#UtGsmdol_mS@<@R@0y8qK2Yvr<@xKft5LQq^$jbj z<&g0G{W>SqJ_9WRDvS#pLs3d0Gn}v@K@AumUmOZKiBx!LDT0IxwbBQjOKny&6}gwR zLzr}0f$t&L3HIh!(tR`>4g8zs)GsHivh4lqB|(-?p*0WbnoElIa^I9oi>yw?ML ze*XMi@9)u4E5|imumX5{d;3dE#%x&N)aIbi79p@ObwsOR>msBK$T}|J=)ilK#(VMj z&wGJ^9>_H`3q7yW*REX)3dU$fP*GO)E%3;fFYR&s#Q^a(h$SBo<+T06(1vbroxs${ zNZg$}EwQ2v2+Kqq-{jO(js5G*0i1V^q-#OE4Qj>h@B8}6oTB1-4?+RWG(2JL0gMJN ziBlGyMUR$QYjAmBQL6=V19uMS3VVT9(~Vev>9eircrsQQaUQzbrU2{*#jWwT^1kt$q&p?9&xm^vv9Nf@NJdyeYOk@* zWY$ybRgSa^oL{$Ox^#T_Lg?K0<~?KJoLS$ld|jTP&3>@#>f_Fll#`<@u5Y?|^Jc8w zu7ITqJrQO`#_aPfS`tip>+ZY0S+o7D3^!!*9joyzF>OE?*K1aMxXLE4{RbLGMjOvL!_uv82=web*)B%=jUeOadsa|(!sG8Dqz*^L? zG@b860c~8W@B8%QuQR$XWFFLG?95tiXkfq!2pYlwt-Nfz=?P$%5DaRb}h))2569gU4wzXz}~kjCUKTla!pJ}kC4^9E{z zJd^DrBIK=N{_!UQIxCErtuJ@5&zY$&mLzlV@MtsWkjam5D}h}fV%Q0C0XUx$LIHo) zT6r!2rJS6Ys9BtP{^O%UK2b*A^J1jd#wJW1$c^s zW_^E@W8=K{X=~o*rBfVtN+M*pGqW2<-#DQopB+Pq&?<&17P&hKYFwnox4-j5m7*U3 zImLYmgrhGc3WoyB7y*A4ZU)wjJ+CjtfmMw^0g%=ZMPVT&L>yxwY+mCzd3lmTpw&Z? zJ>cDC6bcOu{hSim*u$F5lVL8kJWSVYIwIX2a)jxks*c`=f`Ts@O}294xhvs2ckiyo z`hgG`|Aw;0ni1NKdq!ftq0uX~g3)ntTs%CdU0gsO>XnV4UBuQ+rc&}uu$ct2$+-;d zf)r2c8xWnoau<{(m2zgBpHrHP^+bSt;kaOCVS!74!|2uW7|Sg103^cy`2HPSufWXN zd_9pf9a-yd@9JuGLu{Y3`+TQsuV-9(vaa9uj?Eh3L4`aCI)un7cr`P!u!KGnW&!3S*4d=wl#rV{jv(i^wSj?}SIqN7R$4L;KEtkw2X;RLp_4;xcS_F8$45nItBOhj zTM@1vG%h&#*e{#`Onf~&X+P_@O)s8tJfMJedYf>ufvck^LBzr8HEW7(0hLXJZ&A&K zu7DuW@jx3HZO~<7XD>QGZ5N>xedi8x2Sll;oAI3hBTgal6&d9_e^IKKG#QTCA^JMi zvFw3-2l_lNF0Ras3_yQON^ura($ZK`1rUq3N?`SaEIzBD@e~)h(ppUNpj!Ogp)9TO zD~ByRf2L8eh|9{#u!kTsvvwIwR!9`~6~I4+WJbbs(65|}%1oP~66_XU2hXw1nKNh5 z2f*-%(wIU^gvF&@AL_AGs`%dc4=rx|=69`5b!b#)5**}3oNOVjVYU$UnRg&IPuXv)!w@oygD=afK)v2q&u}cEssDefbd$? z=C`fXYL1*fRXTxjlF{lLd|B(nHf{d+Zv~x}zU_Txmi0g)5W%op+w(-Q(+`%DW)e2t>@V+j#?nH)t;D8UYT~*GxWrO@#Ue z1(8r4g9a=vhnXDGmL})!p>K)+PtZLXl2IF6x^$`ctAWAl@{!`#dx}d-Ss55q{1-}6 z9p}2wWd#fNHsg1ys;NObULa83kvI&E0P;ssH=itTq>#b3L-Cz_=g!FMl>GdSla_sI zX*qdY_QW?n+87j*_;ZF?&`gDG>~WGp;b>;>`1hEa1krJGV+y4RcLxnx3ZgR0+((V5 z^@@vAtD=HcFOcaz0x!vGeEH{DWM1wj8sCJ<0v8GbmH=*2%iMWaKf>Z888xX~>hNuS zoc?gE^TthKfDL87etAHoPekU>5ycy`-yyLM8lPYh5=lEkkGpP^^|APJynadZTi$(t z9(#PcYb3L(A4jsW$~H}Z>#M&q_=bhI)v!&*s7{+tQQMayQ*%;iFJx_I-rYhXr5&41(7<6^dZrR8y|A7$8Wr+CJ_dzxtmXd(D-6qk7) zrJjRkl3gbmb9_i1xNv7669Q_0&J=P@nCHrI^C;{XEb@%}1tB+4bDjO72 z&;ZOW>NuU#VQ>I*Lj=+woA!Rr5q#TUY7dAuG^$d24(ZxH@}uPb3OBXZ(CdodY6JUn z-)r$o-`Sjo30&DvZm8!w9kUj&&B4wRuD67FDy8O=A3sJVC%@`K_=GZ%8qSU?<}@r7 z#u0;9;}g4bBdmP%d2%NmZL+d`3={>8Ros)&I^7Y zIx)Q#G;~ZGI$+SHDSITfz2H6Ok~B9pGZWo_@ii)CMQBE_ykgq$(U3N^FUTOv1uIa&(P2`Y7{0p(6_&_o%c6GeB$TpOK8(jup+z@ z!Q{vYwomWDlc|hi^Zk2&+&iee@tY!Qo}p_NrNUz9lm;0Astvdecu=Q+cLR!ovVY!l zy(XeB$)YIYe9@>Ayn9pQ>(|`2Z-1#zLYIyL%m?=b#6F0KeNlwXUPdO0vH-nVHNdIh zSGL=hpo`c9=?a*8Q0W_ac(N$j_tMjUD>}M}st0IDO+LT%$Hg+~2o29sYVyW4n#lx~ zQ=&Gdv)VA7Yk4$sFES*NG0h-^LYg8Li#`WRTeZa{!q)>RA4@(wGBRb?c{%lX1apB+ zJ9jRs(H6~r1*C>Zj|&fP2F`=sXd1#71edvhiuh~EMZyClU_{*t-73g0xkVKn`$4P~ zlrK0K@u-GGVNYY(q^bZJGK7~v4wRJQSSKnTA*zNOI|I~kz!aCDyFe-cjkrC??_h}- z3Z5~1QU;|!{29%u0MtAXkl<47XoyWpTIf5Izy5EoJ`Ybrev;q!Ca7BB!~;f$*cx>1 zD=0ne?d;e!TT|3OAo7r$Js0N|3V@Kw&0+gUldNw$ExOQKx7A1_<~b-qF^NMNmV}h530lB%TeU9O<>^ z0{XRd9ZMkiWj6*%ty!~%Lla|LJkijh%7q~ksOVMBER$R){MsN_f=#*@EzTDQ0C8YqP;|OZZVIc zM?ForUpQV>R8+2mFvJC1dISa?i84B(_@HA@*;ZJW6hmhd{Ek8c!!O$2uPutOz@Q>T zWMYB-$D@)BQ)6SelZNDM#06miJ3Bwj&;N#}DA!%LQj_lQXS~TbdMDtO&n(So?)h)* z?SC9inY9yZM7Y|g>d#+!$P#J4hLpvWXnXgBRI>J7%Jnu`)X=GmokZ2&^5(6_-=ZeS zcXj&VHwX1TJlUOX?O4&sh~rf(G_gZ0tMe^qm^0x1mmk~c7p{!*$Iaa75cB`>=A5?H zx(b{V(D5mgk93CXN9i_wS=~~Fke|I&2?r75FUrqYwknxf1Mt8efg>NwLs9zGj+-Y1 z8Av2M`+@KyWb!pV5d?f?pFd)kxbW7@!NbFY$_tPeTP=L;PNVWBTo(>IT3cHK!xj3B zLIP)>@Ar>1XvtZ&3@V;Q&<$#9uyEL;z*|45evxuTQd*jrDFuMj+TNbP{9$}5n8lL| z#Hb89<)1&zA9-ZgXd|0U4zxT7bZhAF`^kxVWr+0$cGCCmT_lP&zgyIr*`@1FX(c5y zD7~SR^Z=?~=EQ4p92)ifNRUuieRwUyBYSz8Ug;0!O3(ALWwf`bdJ17!yeR-6QSV}|0!(2P!oiL<6xOwMLLhUt1h4T9{Qh(e;1+a!a08-@koaw{{VL_kF^KB1Ghjzd zNlreC$p}PyzYNx;p|O$ZBGFd6qF_xvv4NZW8@78S%K^Gv#=i4Rz#}s=^ElQ&whqE~ z2@$#YF=;j=SE2xd1?dA;HEx5AIL^04#C8yArFe5tMJ{05@pN8Kn7<%RMls&h(()KD zc4w!1MLCW<5A6SV{BhkPVI5t$i}@c}Yvo(XNSqLtmv=|)L&PCsGRcJtk2TZId3YdX zSXd|n=PVZGmJpmp-e2Eb4DAE_8cXIB5ID@R)91T2?mGZG7{E*~A}Fb(wDi=sCfgzq z7M>t3k*G-&m(Ag?x|x{X2E=o|;Gz?9_4+md7?qHR&i$-gr;gs%B-;oC*6>IM-T3cb zUVk<;1nHDoQ^avm89TMgupfEeRvppPzM7VclwPu0xn+M4mWj(?ser~LTRg9x`SGK%rNy=xB^Guo%V1Pc z?!yhkZnT^=@$;o(AW z6gdhAZYGaHSBe|{QdsBEp&Q}hhC4RIP%yv%zMQ0_z6WOVb5r<<3h&-#^i06VfkeT9 zD8*aLHtGGl>Hr)2b{QF&ojVg*-790kReTQoLR?8YPR3`a;Q_!o#3+RObOxys7qzcz zT5*Iscr$O#0ARAPxX8U>11>D~+Ll+Z@&M3)0D^U8DGF>~J17P;!o)1YmoLXzOr_#M zff4_hncWQBETb!0T2>|{E`E}jnWL&)jEj%u;q)5>49mK8UvfTv8P>W;-^I-JATLi> zS9f(MHr*UtX`ng%>cXn!SFElKj(OD31@(y}r=@ua)9A?Rzs#bV11`mIq{>DWu*|4A z5mVFF+B$`064QoT009@I70vJ7-TtE`D1_!Dc-dVerg@k+8C8CNW7mX&MwE(f27iGK zfdBE&Xop%|QEW=~%(&r8Q7R7{z5F&)6@-M+PysJ5F9&PnSXW!;|Kz4S!NRAI1z;;A z3J)Dl;-0-*wT?uKlr^|>aJyr1Ai=#Y&qA5(fHIC-(i~xuBHDE)+TW{wINM+G17&%s zI0Bk(4kmWsPy?fx5(lmZD#Q$P`OFAD#ZNh1<;M$B-9<`49uU;<+=Ll`u*E`8JX9$< zio0BMr%&Y-J(pTz?a{`|N8ZjpBpgDZ8wK^2PfdlxH)r7NI*-kwznM6mMl*2yJ*uiI zj)g4Oe>nCi%IxL?&#W*@wFgud`W??h!EVyTWDl4LU>!vU8IeK6ybPpXGagkHs znu+oN<>N&9d2)7o`bmgOz(M>+$TsWde;E?g-|!oe*SC&#JxDfq`*& zXy6h{prERtfUQQmt&#LUD7wKt8@*j!8!=>qX$kbA-|QHqv&QD;zSzjHKNQY_fq{0KF*>K!gyYyfy-IuD8XC`(DTc6G)1e+#9kx_;WPp{<8ZJSeY;0`nGbKY(5R zWYySe7FJg16#B-0vs`{!qXVAPO0xmG5bJ)M4gcVP_uUib8@}Y6EVsd?Ye>d1;Jv(c zTf800V?;zq#iRZ81^M7K4CEG~j0y!ndZ=(`rl+xSpc^HEQ~olDjvYHt$^fuMp2&W` zjlDPsae2R=RKuQw3e_|T8&dIhdQ?Z(-iKKGR9mNIWWW-#2NFMbPftQV+}i}+1r=uF zuv=hYVApF8=k?_V{~I^yaPiH(klww_SN0vx4J6S%TveP|`&F$xB9kK%|kJ}Ep zeiFV>V)c+=pmcPI%NdVNVxgSsU!hZcI(7ZQ2_etxHU7c=t4W_zT*}CUPJ+qW*Ta*Y z5Fx?M#iexmjo>r2KS;a%fp}xP57$v4HlUErJd~V+`8}xavD%>^gk0Aaiwl(}B%~;W zX7Fkl!i9hffpW)AM>w4z-$j-(ATFG-#sA=~V?FzB@U(ld$a|?DJc!p*W~$*^r*$-1 zGn|Ori@yO~R15GO76hB8X9 zUOw)+!)6^RNEF@fSL{wp0=9V2V2C=QTwQn*efAROBND{VV}wXen1!IF1u!Jz=$%fmZ~$DHwyBBVG8sF5tDfv6F6E^0!!0D8*K@-JSU2KbFgR*-=O zR=P+Xe~W+=q;<`LGav>oImsz**^-Xx9VGfHI_KHSW%Kt8)pDB<*I|uXz;urac#mS9?6$O^m2E5SJ0BA)17G z#MzfCuZS1SHc!#Fg)5OFR?Bwn*ZB!{|fZS%S`_J3s&EU%-#2VOvfixg90{2zi;yl1L zQ1D8H-bnN!=+N78r>;@5AA3zEMMfTmEt@2KFhmzou8lI!o);N)c;GeH&XR9M1isW z#-!(-s+cwqo|u((MuE>i6!{F`&fFMF3MWg8ikKJ}u0%$1b6ii=Ig*AbYrXQWHS|oB8;N+!FiNjXzxZ)@m7u zh--Wyi-?KkK)vFW87H2Hv;cvXk)44$$lk)CMVpWZ?A&NAAe=G(4ni~(21v^j*VgHZqs)!T$~yQjxAc}NJz zR|#rNOsAt?IX20tts$fbREM`J{cvzw}X&AQ$`}qdL(lS7? zv1CuQ2V%oHj2zxPqaH`H${nkjm(kAsJ?)V~vIHB~2{dU0Kb1qtt~j}utsX?e-;Q+f zUPcTAzkq^@d-wZ?vT5So5?s@<%vQx%`t)MGr%fp7d;9u|%12t0$039L^ogC9H`~Q} zyWKuIjccNFIHRG@0p^06${izEggf%0z8-}ol;^Kf)UB+jOK0!L;K*sHs~duYp>5!G zm<3I0&COPZCv-QjBcY5{4;1Jdw9U8xN605_Y_yQ_yXpExH=r~CRr;~9kjoW)tMVjiZB1Su#3+3A#*NKL zE})j6K|~`*q*PKpa7xR{c0Tbr`X4QTEc{g8(O3NZsfu}z%yys_aC)E!=Gx!}i6_!d zfLox9!7dZd2GBtpqjKgV%ja~bzJY_poE|(xM9?*P6!#E?9snkQhnmYzw;dvr9r4>B zoans(2>_4=VBm;V=~IhnRsmInRBlae1jr>PzelzXqAR|~M+47{s%3p+RF<*{>ULhOra3>%mr-wCM4 z{`!}eL4yx+894~}g08ZHZ}shdc%$cjbVF-}EhS%`{^1f{9CeGUm3EdfPlje5d%Lpj z5(4B|)jb$V2HPggoG2>pNh0jnJ^qDpf#3UpH9;I!I?4eA937O3FV44X8vY6UZ=xP1 zwnIBT`0=9wCMBHeKCOTn+V|&YM!(UispZd&91&Xk7+gI(ApN=7Xd4_KLzz38X0QQR zll_!5#Nj6ZFx_W*$^LVg_3%LN>eE;YYJh)Gi`L?cUa)+TTp$Z?tPmI9>l z@~XB+#ivhzmjsImy3dU7sqP_k?HKWMD=x0SX(Ip=AU#AfvXlm}QuQB*Bv`jewFA{n znN~-n^78b=S?7x>x(4ai(NN~_?=nIh4J&s3DY3I&L#6qCNiw&d8EXVZaxo?nQ zG_pJM)@6*z*FTU9GX3gCD!GeDe$ivZbji<9{-?$irEI_=%#1RT|Bkn@#kgYXZ-xC(4$HZ!L;$84u~IwXrPa- zjm6pB3R!p^6)5>$wIa$@nY+NfGpt5K3cnNzdZS6l=gOI z8qEfK#9N}4FCwx8$5DNiR_%w+xmmjQfx!;k#jbp-*CuFsFJPu9d<5_a!L^?}dP$jC z)}I#1&!mKl9(#0S4{bFTu9*5LXX}9jaQ6N&+*g391f_?Rj!RIXuoDy8!Jf3QAg8{) zod{RPU%;!qjHE*d7PJ`>(F*|S149Hf8sAxd;kVngbGQRaFcfJ-oq%~gylHZP8Bir7 zH!Oq=TIfl&!wfcUyC%VdVV6^M?oVK&9M;O(Z~391Au#pnz0Q=t z2Q(~EtxHq_B++=8#qQ+*_}~LE7zg{>x0=O=D9MBKhu3TV@4Dn11Vx9iRa!oLdwO1- zmXYO=U;-a~Hu``qC zgrIg2sdP0vxRg}SONbIl4Av%^ACtWYcva4&Lj8{1z#i#{^_m3eiV1pgbcL&r`dmwC?H*|8kNzY4$-5I*K**$+3vImaN?yp#`Ko8cypX(sSfPJaQchYv{|3 zF%1elk@vgatv1rQ&h2^kDW1E*^dxI~xm_Xbsn_U%&EnX=+AWSptD=f+0I;vkK$gd? z(LmvfISYFB?u>hm@h2b1omu-Uqf1zC<0_IV*PZf+=88pkh-ZNT+YjA`kq(?oYm#F4 z(zMPo6t~##dYf^sz}YAjG&3`Uz7hQz{*5dU4K^N&yUH$ivDoG=2gGK=Dd~9nG#W0W z+)nJlA8-9w;p^cK<7l=m0<_!*;0y=--}qGg@V_NCb5L!n%gNQDr@tvIlbr{#=5V7B z-6|MjK&D-_D(8)P?D}`gJ33ZPSde5$_Zb*9(|MBL{vs0NK=EmazQ;C&uPbN&yONEC zRE)^*qyg^?$g$ezhp?EKn2L%i-Y$58$cTu}(+c!T8umnWhlP1YAqg@{jAn13CLi-l z;W_je5*YxZK(>I(k{!;S`woP+vahuGYrm<6kg9i5D+A%PMxd@C+0f7sh`DYA!LZPn zS$s$43Qy4VCAz`KlK%Ln)2IxopdgB3 zZeSo7-z@49VuW0zIIP0|E_?$bVl#n6F7;}FK2?u+WWae_ zY&!O(uj(rf-`Cs-iNeAqWIwQ%pv(aYApAdMQFp1t@FAqLtVR4N7{H&?CP_sAhzOYW zW5XU^HlufGGlGP&Wq>Hiip(qQ8&gXiMEO{eQlJugwtr<~#UKmmiSWnA ztb+OpgkCAg{Nj3bqS3aFB9vLATbD3Bh;V;FUPbik=k@V>546ZM8w8({aAyu1eEOpFK-gtX z+8`;_*R3i?wP{-4k!}V1Q9}3Mn0iIw<6)N46cqg$MI!adV1Aj5C^=hU#Xr{0h-Bb$ zA}WVpL}^USGEBqqTp11N4dW&3qlVEFwqj3Hc}ZJoKTNG5$?#uipdrnySJaMeXlK$Z z)AJEv(iPe)!S9`z>hC$pAtTFjn1;ksH>4C|>qDoL95$!Ox*SknM3tD`kvz=JRMvR>uO_~c7#aCGo1 zxT^N{nr3Z&T!ts}&+qu-g0ZkIPA>P|pe41eI;cz{CEQJT{e#%w>n~S<-b@X{b=q4U zmd;Ni>8<;fu`1~NM=4zAEFnVFmjBPMtRr4*a|HsdNsbKoo{lprk7!6wX7Hn=gf&x8 z%?0rhz{)J&4KT1WyYZu>k&FNTzrHeIzg!|qyClhz{f4v5?xl5x0$+C(rjCx&-t68W z+qj<^2%$C_HJY21@5CQ zUwrTsr7g(3Fuq)VMA$y?ruTa7t>!Y1R4-hi32OYf^S|GoAw*#D54}9CEgV%2hKfps zZ^=FszKkM7F$89S|9rUTdJlM3?iyK6w;ZC#I~u4DHkts(*;lchGxd1gTg-pS+-@gK zTdT4fSJDB`!2h^n9Md~D|Gfw2xM?~L!$C8$mPU_;^(^1NKYrG-!ozH1qEuKrzfU=K znor}L4+za}sUznvH+0mP3Ts1tazr9*Ki?yeAShCmEkJkyW`oYN<7zPNVX zu<7qzxV-xP0qfCtspjhaI&^<7TlenurTvCBqqOg)<)qE7(98Rz#rV!|5RpE%MM&b;e}v*n|`$cz(&+l88F`fnNQ1qAXTkY+jn{5^1SQh7>p?iZkJ=>EiFbKv_%eIwBfV*;A< zF$}2Kv?;H646j4Y%tTEGu=l8yf2;+#9s+9Y>*;|a0i9AotcrhUlo?-=S{tBW(3e$? zojH(Jm>3zc4cxqokRS*p-~EG|1A~%Chs&_QS_8!QY;69f`_BjO8f9I2?lF#X006;b zlU!1rgs%V@&e>u;%?5%^Cdy&_5^Rh)ox2SWL+iF_mroQM>az791>iK{_&e1Dv+n|m z5Z(;v2-EFRi@3p_0c^v%he$%gK$jT^Cy*bQAaB36Sq>vwfOJY@WG9sIs<{DB&zX4w znIgWZoJAc1>{TluU>UtIw3NsBEsm{Aq^4DF*)1uV2lFoWev}Qn{1+62KEw7lp^fB* z1t{7;%*$FoAqBXjTPTiEZSKBNd?Ph>eRSCEV{FlFJM_$svL>o@@F|5{efM@RPiLzd zJ~B`Z<0>vurvOEx{6R;Dlj4#l!tbv~hJS(6#Jt#!^>6bFR0H;_)R>2+g*E{2muwOG zfhrz)2t04n)ibwJqo9c*o*OV9Kv|H^A!o4@v!EbXgGdt=4Q8bk6g%kD?3oGE0(B;@ zd;s(g9n}N+BMgRYJBnZ_$jMN_ptZsk$SHkqs2KL{XmH-)^JmNPe4wJBkU@Vzek#$m z5%>5;WaO9a#F%rSfSBBDYHFHwjF?aD;2=sZdHlE^e#g9-xw+QYukXho^#7pWIyOz@ zK6GU2^OGF_cmvS#N-8RnbEeJ(@8>yfC8|(;p5Gif>2!1kWJrFdA<;4f?tq+a zo`(h~GX~=Qq6s{!KuIkl0FEmf)U>MTw|t$nShFVE#zy<_;dLx58xl?R3Kh?He;sQP z^n82oz`w3Ej0J>etjnDl*bTDJK{MeZE}{dgc)YUEU5gg?d7Fe~#hcE~Qa~kWbI^8r zxVxX}DHudIOGiaYO?`K_H=(awseKO#1B!nr-wFx~M*&=Vd6f+^ygER+WF$LFPP_d> zXXT%#7$lz=Bi@0`azHj0*iWtv8<0kVG7#PO$jAsp*TuC^fiBtN8OM0Ua-cR?eUNOg z4mGu~aJOfNOuvjG6@S#o=*`NeGe2ojaUsXK78=s%XbyFKbYPg?x3c^P6e*59*2L;G zV)7&KA9OSj%Nmd&DT1!)2Oa>x(hv2@kcH`tIY3!QtbVHwDacME_rxHMjy~Jwoc!5( zCZ0=1>03f(rrs|?WB|SbBm)vWQ?Egm1uO{o?5bW94P7ui0v?;S^^u+@#6yk;DB+Rt6QHGzMt>VRz|Hem`pq= z$0&qnI;IUKKPY#_t1`#Hmyty$A!k5MTvt~oN(B`18}^QAG}C9cr^moObKu}Xbo7h^ zvq&Jb7uo}0roZ2kEV_8>PAdtTr)d~{i5Y!X=BS)8C!ra#159+Yjxyh8=kgWHPydce zA)fF4BwT*LfS(|v9Bz`>u_M)7KHH{=`?-USv7?o&sH9(%d0nE?vwfnF&R6)(icx1! zOUfk@e*74W2Fx!SWqsHKrUdKH3~?&Ie-|>Y(|~^wZfG!=P@dv`gPfzg)G-!%GdA`` z95Ht%MfnrgX7zxUICw`Yno<4U2IJB~1un}-HA;W`J^nZMJdT9 z^y}(IgnrJVod6zQ|Kf${2BxyMzP_YCk+SIG(CZ>5UjgEE96b~7F*WNP)(8eVH^qo9 z&}WBpNkR^aw>i{(%EcuRdDGD1;G5Vkb@0jg?Sl#kfCC3dU0yFz0EBE&dQJ>qS--_J z0wfd0!ew~%U{~b5e>KrQN(|N0I!c6Gdp+koxJg`$ILafmbSRkVQ?WrO%AI0Tr(VAN z5?2`zIiNrE@Y}Ou60tR795hr|n=zpS0$)76-1N5v?jgM<&h4au|Cdu!!yp>Lc|J(i z`XVRv8C`f1{0FV~67ZQ2o7oFSe3Ca&&rU_tJ^OfFC&j{ILC`oUUMl$%yXJAf$x>{Y zShRrvGPK@r>$D%eWHc8O2Fo^i6lYlF3f+7ow7E4?Po{ZGDvzKBh;ffF!ao4VFuP1>tB-wWV9ei_PXF7r}6 zDj;mGOYgK+GnOz{Vu?eph#@8(nHe!kjvc45aPXFLL0UC84}Iz6O{>&)j1tQ{F6tq_ zXVmJEN1P5rCG-<`IC_pQN7P#7ykK9GZ7snrkfAv#`&zJ+=~SXulw*Hl8JPY6#8q7n1s8mC>Ec>$>Ui?Rn^NEFHi>nU0%y87m{++K4e7rW*A3f z(Y?@|XM&}BaJdI^D2vy09AL(%_8jN3_{XOnpCADh5E4+w-dB~+S>W77%p&MK^ttC; zT}LsO25~T8(y}Sp^NrX66VTH~M`NzR0yu-w2QQ%K5wPG?V)TKoHUj33Fg1kkcO(=|QNW&8afFkAT0x9#zCs~UjjlJfEt)C54uvWoAZ z$;K>XxKy!ERikWEd`Hd-hUOb5QKA}?XwJ;`(TH$03(&Skk=J>?xjmD*$X=_W2yO_T z%7^cU5>Ol+C`sQ?kpKI(+O?JR{K5H?hNE=XolVMlovlg&-rJ@6zn$^4i0wPtqF6G( zN*SkRp^R^z3~Rrus%Tbh;yb6Iocj(>%xnCw{Q4ek&RJLW8d@4_v*&>ok$=0W!!4#J z&cQ0AC$=o>R4cuEdoQLWtOyho#^Fun=yxE`P?;1LyuztV>!3Y(oh++~-dFbdg&UU> zje=CQ{qDuo4$w7I7EO4O=C%>7l&2M|TEXyKploptLHLlYE1&Dxn$EdXG=l#kFo1k8 ztq9Z<;EGQ1msdV4%6{M0zOc8&pj1gr(KUk5#CBx+QK&K}>Y1{&j(FO&5IXIio*u%H zv0(%9Q3!G6$PEUDx9UcvG3oqIC`OB7q>)+NMo^64dx5ebc>I!4*A)mM>T@0i%yG$D zoPCsNG^e)_rVlVi7{8tF3r|wJw*51`rG3wGk`62&(YzGYJ*pn;R`?!*=*vT%hhe53 zG8#joaHL^?9gM-H=KdhEr7r$;Vje*c;vHwttlG{o@FGYOm5e$#hrjOS2_i>o^RCl9 z1w!%H|FH1XGV|_GM|^~g%$tFM0BE0rSwF|8%S)G20ijL*_yMn-a0mw2V+4A@X`CIGT_~TIzuuBue zO`vkIf0cxsrT~y2Q1`nzKv@N+9L6SlX-)pT6dY`lfBYDx2Xk_6>){V}E1CXTK7*3c ztk7mP)dTu5T6&f&h3eGrV#FzQeES0h4N2p(mRq#010v&~dO-UNJP-RfI~8UKJBW3I zW4H=;iWrU#?`^6YM^??izyOk1(724`Zx6xF-_zR*>Beci9fWL3N!_5RI<&!h&T z^hR$2eS@uO!HRu$tGsln4r0xcT)_~+Y5^6Qipo5mrW|ywlmUKyG0U%5!1;^$VW?^-6JB*$8=r zd&iZCKd|yxqC~fp`l4Dmw|3CQT9NM_MX_bz9}sd(B98xY)9}hKQ^q+(#F%HH#(*x= z^~}8N>HA-}n-ciqT|_AW9YAM~xBK8S&mrj@kI6aR=jwN-8D$@LH$Juv%N2J!H3n{} z6R4FS^v{U7!6PdyBBHg(q~O#e1FIR%^l9C!qv;%nsPVL-u^W%&{ zxDHU?Adm`Qpc`Zg4fa!puq059+bIxwm_mo_!}?8v4J0%gEKs}i_;0goDBRzeBN*-XU;!_Xzk9XmdDVSWgDSX}9%K{YBy zV8HcjW#zk8-3)4PD6fF(?x3PmHV}(-T6^8f^RH$ir(_AQh!Yh`;fLm>ce3GJ7PH#}Yv5(RZxnE{kGD?5E4$v|?$7Ys|bj!KG+?Zk*W z^Z`I_M;`>oF8%rt=;!MTDV!o6|K!ve>S3msd;ie_#KlwgRSJfNuzW&C6Vr zE2!PE9K|J5^b6nh^A8*SigPos9KPGSWoXb~W*Ew(|Au{qc z_EOD#`$VaLyzmI9_1J6Rs>5=L4C>=I~!D#r2Pj&<{1kNL&_a_=I!aui|?%po_xfGnhu0EIiUo;ASJR8aSo&|8{=|IJXxykzbRY(y&QDGYI5?8sqEZ13y+RXbZ?a zRi%4WnPp#vB+m7377EdAd3D<{e{h+}j>3AFt4I+~^9a-D4>|X$_p#`eb$*u+9FAt- zF{lp9rItO^z0?7`p&EKYoV{r=7>JcebYJwON%1%6F7Xs$Q%^gly_+}r92(*zNO-}> zeEf*9^iQqwU7}RcU&fLN%Mb0P4)7!>3B)%rd3mR&G6j5s36VpyqUt?MMvMd=DZ-8< zA)!a5l)8La#%@*+LCcJ1mo~57gN4WKhUBkcmJ) zvV)V;0}$cS(e1dkFV<1ccs{$HzPtP&0}%&L=bcJ?=6`iO*@nrhf1W%Uh^!j(a%ai- zW*mqW{N<}}OZPm(6yAb5xledrL;-amE!k(wKF?jeF%7PVqeE^uLrpjxq}ko z=2>EOli^+Y?^sHtlWLrEQS9?-407KmJ(>Uh#(=c4nR+ z;&0Ah*|}()KoPJ`Sf!eFvctH3cOm~YpnGdqd|2MJZ=J^T+#AF~LF+u(^?cnP8NKXW z89kaHKDnQ}vsJ`($feRNrw@jkP`zIAtS1N!!ZSAgWChHS!M;+r!(t>bEhwPn!CsjmkZIwUQL?8mAZ7)ERq1wW3u-?o_QLO0n zzdXP(ca(ZW@Gg=M@DK4N)L3pVyRq9@JoM{c*DRb&1Mk_pf)khKA3ZVss!rET-^TAl z#Nv0}Ah2PaRx7aj!MKi5XtF?L0U!SfoP6(Xx_>HmjZ+$}aY&JFC*R z3QqY}JrbKbpJo62Wl5O)gnz93c7tyo`nglncOEi&>C~^O^Zh!M)MXg|F-e1w=?X2a zyg{w{Snu+z*Dn@H+GwBJ@s(?>)hTVVS$p!ew@lB7|YWm6nb?k&qIwpd%7} ztkNy?L;@}bpu{lw$ZgxV!@q`O105p#UN`;2m+>MHq~o}NLkVSxRyYn%7PwGggGR;t z>LK#i6g<=d~e%-+0p+3%Td3k$}M-q@?&mqqVk$>CY@{OXIfVZA^y z0+xOpbiQ`zzOJ5L#=Gkmd$M-Q4AgQ>_;ZaJY8nlCc z4FwVSav)7m_X^(m7|Zr^VfGB7wf0?uw97oBqQVD5JF!1OaJ_!BNSzEgUtFc^0v(*2 z7(bB@D+;)HV8=b)c(z8z$H!4i!^?}&Bv1>$!Jd(^HA$m{^_ku6=$p%>VH_!EpFl=} z$3%m{tTq6tbHvyL1|eb^0@UEjw>8sxFhK8k{$VnC3>w1xTS)F&#+;?x?ChP0T(q>@ zsDEeI`Ja%hAjujTJ;J+={{y(ZE%QeuO9(HsvxyXECnt%je9%9LC5`hE*C*`MgdYP_ z^PHn$fq(G!bVYB=1J}jwfncNXb^hZHm)@_*z9;#hym#l$ohZ(4-D>ADI~*nE+U@m* z8Rwy-sOaFf8jvcVf2Fbbpa1$s2q}KPb}Z7M{&#|XPQ`_>f{Ku51y~ql8xiif zHAIBZ)oa(tFag=|L;9Aovh)S2Sp>B*i6hD5&8SC66vo=)JZXoZ@Cg_uHqEcThR10! zK3P)u^Z2+fGn5;@7Z!>Z0rGIV)WQYhB)r%KXUF;8mX=i-V^zlXu^KrsCPWajgv5h^ zmRXZiKiVS|h6GEG6Xs~JQ~Z|wO}+bH7CPmYhN-BjjnDcXzemZ=;P0ok<*~yQbC?gF zeb>C-j=4@m_@+}UwuEBY+=!xoFNUb3#NT9g3&PT*?-7ur*>9s%ll4%M?B--cAU31teD zv`IVSo5#n-8opKC;)LzJI@GQ8aC5%ZwMbGL4l(LdtbEZi9> zR8!>X0NvMn@OTJiARU5U3Trlu7Tj^5mIFf1hG z+$AF=F4+wa82)o}W7*`ODFUkq-ho4N5A>A4w%WD}?U#wf1Tuh|%1D#_1>>@dzCNo9 zH%h8Z`yZejLS7P1Ag`Tkg#J!{AxsjtwbF%x)v}+}qrm=I)6{1b<#;)8yC)(6Az>p^ z-9Vysc6KV<+)bCLH#ggD1py2`NFjE6tkVriO2vb;FDvc50Ld2f} zHgSX5Gq)4h-cC&`6?*wjyite}k)P4N!8!lzhni|EiT>FkV2KdN5t9qcZ+-go2|YN& z(d_M8w<4t@R<>ZrfOAh*@7Yk`i0CZ%oypK8M#zMK1ojbydI{&k{8Ou9=~ z=JbKS7b+U!4gZ>a*Zux|HLM{()~{T-a?ag-$PR(#uUc9p#Km>BwR!ejP{}6-&nWFp z6B4bH!RPicDlm-BdJPvpm^ExwPS=o!6@npb+Y}XJZ@15%7we-RdPN^+PR#qAg<6;= zj2Ej4%LfS)=g&X#ngl^`q|$O~c=*L}5m>fho8}3Fs&X0B&W!IL>~r)iJbORqpf!(L93?A}>ii$I6p<_u7ITVPl)2Qy7-@;m>fJ7iSJDVWH!xOYq*Ut`0j0%TA zU`!#%lIvgoN`5j&XuB|e1MqNP9@Xh)=|!!CVpG!a_BDP*CY*&q=n2OzM3LK3O2K-^ zxKxlujUsSo#%8R?POQXx^IzO($MB;)g@DGRvw$BAIX4VVttk&p58Z7&v3VW#Y|uey zar)p2ey?=|?euQg7|ebJFpg!(XmbT-E~jbiX#}_hp|q1XlNyL6jcbC&%g0H%<-u5q zGg&v?gR*z_osj7nPnKa3*M{b1sc=}oDssZ~#mu?{#VtB2rLYTllre|ir=O3HcsN2d zye<&Nv6PVEyz$2&1AzfN0`OeDN8drC8-``Xv29!&99=jI)2%Hmezxd~AQ-?B$*=N} z^An6>mRC`E+uf}pAyH#MQFeR3;PLss?L<|*f-!6uJ~Tgn-m9f$r`BN&4m8v*o|qyX zf+V1#actk$E}j;ax6UfJO9$jKQ&%#eMBg0lTznK$r74LYxbs9rP=5gH42T|+nZjQ6 z(MSybC^NgeUHv~NFHRh={o(Ixi6ODYT|hl9iR`*@-5yZax8HL9r;**^b?qa*@L(a4 ztvjmfb=)NliVZ@mHNIi5-j_y@CvkDjOiX}_5i==jRz$kpL?O#^QE!bHj31Zow2z(C z$Xokbe{GxvjYTNq-RKCjaHaKlU!Tt5w3qfuT{3=QP5aM2suU6y7ABM3B)P^12r-M? zQD2NR2gkb=Cvo^Mx$4^Zf`DB{@G9&$HGs5cuX>v2=+rC#3N$UDR`Fy5%e8f6cpp$n zOUswC%~DF7{qS1TGAYy{j^AeHQJ36k4h{~45)31fj--KK2V@jnJowV027A-m3T~kU zMM^LdGe+a1qss#(Y_aK})iXYvVo|ffR=t1TO|6pacI>45!;g;*$B!E(ua-V#cvDeM z>!-ca5!|bJwvWVu1eGN2~7TE0t%zk?{G`Cv8!%9l02 zmq3+7F7o@P$oj1ouW>O!i|>A*R!5@Pfo%@zA<;g}DiRcO1pa`YH%NuYVC7ty3JNDE z{ShNbAJDBifZiX$ZvoO7<7iP}a}G0qbaa^K=LJsNS~`Ic*0D-D#Io*VJDp+-ng(pb z&}x?7r$%p4iK?F$wcja>ze4Nhn*=w`s99pL%@jT{O&W&2aIIuC37&uJxW*0DSXVCx zsza#WR&KOhO`OJ@`462WbRQH6FK*kNop* zl{WGl2)eSQFXYKVqy#5PqMf-;wb4~|N`_f*)%YegJ;ovUm1`HV(KHX5n`coO2voaZ z0SnSW+7|VmPFevZD1f-4+a2ju6$d394hRY|VU0sMZ+jeYAi9g>e3KVD$vxuX+=Ou( zAr7RR0G^N7@ZSA`%a}3l?G5C(^}7}nu?uJ^NJ>#nL7WF)up5stLZWinz`AC~i4JLW zJ5S&g@Bhmd#gr`XMf7D_c7)SX@D?Dqc`dIKX{4%8pHK;kxcx{peAxG4QBe`}96syH zyE$v_9W8zVU>%lh$e^o*Vx1_HJQHhiv=ZiR5jri1wdg{x(}85Kx4&OvR_x>_*X;d6 zGhf^<_CY&?8QN5g`HMYlWvgf_A)6V#NvPD6#AoRb*2%sT#>wC2p}@tSE4c=i(-s93$X};v zBuGO%RQiLMb2OKN3<@JuxZ#<~wPAZkn`=aBpqDBmYYi2k7#4*ok@2Gn z-b@&j{9byrwx-Zh&^gAbhAL6#Laj?Oxv6k@h5@{ooYlSbkq&sg%e#K}?hq&#yKAK` zzy$$A{|FfcicF}Bi*{C57Um9=&|uqzG8M)@@5!6p2kN*|XB35GbmS?KT`vDxQIT_f zGG1AkQMiBIDh3h_5TAH*wFUO0L`Ct!ql1h~aKna1sqbu>xfO_1z&^2xc7=m zo--oAQu ziMy|-XEzx>va7|AN1WBq9M*1YZ-3zAi;=hmyt4B0<6I#>!_Xc_Puk!|Yqwz0_WUb} zDYab)pU_7tc5XF`W)<#>Cucym&0ZCaPq--vtxV8?gTw~ z1~XTCCnuy^;*h#Fy!+GBEzqN(sc9f?Wy{rXGQGi=Z$+#IojJB zrzo7`R>B6uop_L1-XEpbTt4F-9`tOT3NPbZ9iP7)2DYMCRKY^C6XKWOz*8qO z$voNjYz>lAbT43~26?Q+j@+BTByQyq;Tb#T<;y$5LYR?;O~I zS-}w11td}Od0(MufEPe?hgqcMi)~?t7{!LqC;ww*Ad)9u=b4J8`*v}mkOSbCKn;qtB2zx@bCJ(951pVjefsnX;uSP~ zV4h}g4@M*?0X`ryPCD4wWR#Zr6UH8r($deN@Pjt8!Z7{tMT}Na$)pWq1k(tU|oUQCW9?6nw-UkGWps0?_9#>*5c_Y{ltD zG+ohtKYnI{wW+2%vDWK;1!>ReyJUB=t(_gfPo&&0iHp4grja=Qi_~NFh3C_NHE~+7Zp*|FCPV>5E(;)ZA^jTqc%Y0<*x&K4a+ zaXR8O^Qc7kMQwG8TM9{7eG;A@jva}D)6NTaE&~1YF@|-I1xJqiRtBSB(@Su@$xWy1 zZ(m0@daoKrb9{pDoY<-9xpiNT8-_HBgsuK3L3|`e`4#gglg+@l96jWP;)dD;b!M7_ z$XZiwUKt7~Lti0|D+e-$r@F{YU*G$0YPsLq>kp<-e}}4F7=zc&wI*&i$3L&O;1=f# zb=>zNbJbD;YRGklCyPl%_NO`L8$%ANBAscBui3MJol`T^(e%a13aRp>izjm@^KN3Q*~_QmkYy+&(uoEW)9Hvpat2Rt%gRt) zsww|_L)H_Kd%3b~5}5l}Zq<>hGjQxTQ#H?lt1gPvFW7TqV{`E705hLr-o!Lba-+)a zpS{v$@yPAXY*mSkX6R0_C%qoz7w$LAv=l_c5_rqQ;F^{Kkv$w5B7cy+fH{mj@1(CU(ye*)ZaYiM z{7C=+leZq}8yXt=`)?uWlv|vggHUAwK7o-Q2!w!5^wLj&jR8Cagm}pC3C9%tsJhWB z&+VaTz_NxHIHKrSr!G61{Ak9TR-#>6zWAgcfnBCB^!5QUoFhWt@!P`+5L9_U%6|hKt0z`*T?XE z&5wu_3Ug^G=^10w$TH6v(M`L0br4gk!Gu*H1`Pp*D(i$wpa3W(9-!9E0~v4h8e*Nv ztAZCNED?}uHi5^8v<$XGt&`A{WtWwK^;y4baqc9hwL2>lq~nmqnGo=~`!dPdNwgF+ z7eE(J;_e$9^jnw7{vY1AAzph@5%Lb|-W}>K~SnSdYakjoi?(jZ|N=Az_ z%;`Jb-=~)1*H-i`-)fbX&NxGstugUW5z9e#!6{P71e?SCylIV z8xDsSjJZKYTFQ3y*fxKB?d^>X!TO{7(g3#(K+nmbogaWB)#)z)dM5g0@QCU^jWIJQIy&&~`K@2EySTBttg!{5 zyE%@u9n5rRBXp{Os!Rn1dw9SiA}=rAB;@BevHY@Q+w!xsZ+D~oRVqqG9S>^l&X9!) z@V(%L(*16XBO3%Xc*=a^5OOsJ01LkZAg*vhS4Ba{1|!%tIIg{r=in$np5q`OrKIG8 zuZatA!Yv0vo{uPt3B$R$nbBAoD#LWb!5j{+h=#*cvna>moA`P%B_#z%1k|1D`S_Yp zUpBq6@>(w}9E{Oqx#pHs>H{G0XhRC+Cplr2;Z+}i%fJcaBO?PRip6SNGwHUKH%RLM zHPNT3wEl$848RPx4mwS6yuffq%(6v`w==>BxJ*oJ>=`Jjz(qpLbg@PLfnlClWT@A) zz_{>}$9n}Sc`P0&Z_?$<*gKFT)yE3fczc!s|0Z;{d4^Wd1mzb;3MnHe$6GQxeE4o= z=KYve8Rb>jT+ia4qL_}PdA={xCMGjbYK}D24(@<50#4({wvD&v5~ltxedq$hG3@yu zBR7{A2!=i$FNYr19@f;}R^*&G20+e%XTUIt)p{Tt?r%k47kxt0w5dr>U40gKRNd(3 z>ssjiK)Z%aB0neR^xgBf>sf6!(T_<fl|1magM{5TTPPPVa@> z+BrC3sc6l0L1jvSaA^C7R#ta{mod0`KvC-}oq2bE%~BNQA*QV(o*!9`!sehBEJfjr zBQ;A;k6LPfTj&zOpBR{lhl1>K6qdmY6^PgWCtDzb_g0pe3;&n15YKg7Zl7Bq$ys`R zs3U19#Sy{FLLF>)@avZFG5q(_d&{r?pPZ)#zqWu={da@C5{12vQgT1e!`&|Zk(Iol zM4*XG?v=`E8*+C?Ntg$I$Y{9RQUYNitO>Cy9ywv6OaE~1kMm1!zCfKF>P!5#zeS;c d{r^2-ZdYP<(s + +Twisted Documentation: Twisted Core Documentation + + + + +

                            Twisted Core Documentation

                            +
                              +
                              + +

                              Index

                              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/man/manhole-man.html b/vendor/Twisted-10.0.0/doc/core/man/manhole-man.html new file mode 100644 index 000000000000..8e8e6fafa05f --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/man/manhole-man.html @@ -0,0 +1,50 @@ + + +Twisted Documentation: MANHOLE.1 + + + + +

                              MANHOLE.1

                              + + + +

                              Index

                              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/man/manhole.1 b/vendor/Twisted-10.0.0/doc/core/man/manhole.1 new file mode 100644 index 000000000000..3d78617cd9a3 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/man/manhole.1 @@ -0,0 +1,16 @@ +.TH MANHOLE "1" "August 2001" "" "" +.SH NAME +manhole \- Connect to a Twisted Manhole service +.SH SYNOPSIS +.B manhole +.SH DESCRIPTION +manhole is a GTK interface to Twisted Manhole services. You can execute python code as if at an interactive Python console inside a running Twisted process with this. +.SH AUTHOR +Written by Chris Armstrong, copied from Moshe Zadka's "faucet" manpage. +.SH "REPORTING BUGS" +To report a bug, visit \fIhttp://twistedmatrix.com/bugs/\fR +.SH COPYRIGHT +Copyright \(co 2000-2008 Twisted Matrix Laboratories. +.br +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. diff --git a/vendor/Twisted-10.0.0/doc/core/man/mktap-man.html b/vendor/Twisted-10.0.0/doc/core/man/mktap-man.html new file mode 100644 index 000000000000..c3cf9617da15 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/man/mktap-man.html @@ -0,0 +1,328 @@ + + +Twisted Documentation: MKTAP.1 + + + + +

                              MKTAP.1

                              + +
                              + + + +

                              NAME

                              + +

                              mktap - create twisted.servers +

                              + +

                              SYNOPSIS

                              + +

                              mktap [options] apptype [application_option]... +

                              + +

                              mktap apptype --help +

                              + +

                              DESCRIPTION

                              + +

                              The --help prints out a usage message to standard output. +

                              --debug, -d +
                              Show debug information for plugin loading. +
                              + +
                              --progress, -p +
                              Show progress information for plugin loading. +
                              + +
                              --encrypted, -e +
                              Encrypt file before writing (will make the extension of the resultant file begin with 'e'). +
                              + +
                              --uid, -u <uid> +
                              Application belongs to this uid, and should run with its permissions. +
                              + +
                              --gid, -d <gid> +
                              Application belongs to this gid, and should run with its permissions. +
                              + +
                              --append, -a <file> +
                              Append given servers to given file, instead of creating a new one. +File should be be a tap file. +
                              + +
                              --appname, -n <name> +
                              Use the specified name as the process name when the application is run with +twistd(1). This option also causes some initialization code to be +duplicated when twistd(1) is run. +
                              + +
                              --type, -t <type> +
                              Specify the output file type. Available types are: +pickle - (default) Output as a python pickle file. +source - Output as a .tas (AOT Python source) file. +apptype +Can be 'web', 'portforward', 'toc', 'coil', 'words', 'manhole', 'im', 'news', 'socks', 'telnet', 'parent', 'sibling', 'ftp', and 'mail'. Each of those support different options. +
                              + +
                              + +

                              + +

                              portforward options

                              + +
                              -h, --host <host> +
                              Proxy connections to <host> +
                              + +
                              -d, --dest_port <port> +
                              Proxy connections to <port> on remote host. +
                              + +
                              -p, --port <port> +
                              Listen locally on <port> +
                              + +
                              + +

                              web options

                              + +
                              -u, --user +
                              Makes a server with ~/public_html and +~/.twistd-web-pb support for users. +
                              + +
                              --personal +
                              Instead of generating a webserver, generate a +ResourcePublisher which listens on ~/.twistd-web-pb +
                              + +
                              --path <path> +
                              <path> is either a specific file or a directory to be +set as the root of the web server. Use this if you +have a directory full of HTML, cgi, php3, epy, or rpy files or +any other files that you want to be served up raw. +
                              + +
                              -p, --port <port> +
                              <port> is a number representing which port you want to +start the server on. +
                              + +
                              -m, --mime_type <mimetype> +
                              <mimetype> is the default MIME type to use for +files in a --path web server when none can be determined +for a particular extension. The default is 'text/html'. +
                              + +
                              --allow_ignore_ext +
                              Specify whether or not a request for 'foo' should return 'foo.ext'. +Default is off. +
                              + +
                              --ignore-ext .<extension> +
                              Specify that a request for 'foo' should return 'foo.<extension>'. +
                              + +
                              -t, --telnet <port> +
                              Run a telnet server on <port>, for additional +configuration later. +
                              + +
                              -i, --index <name> +
                              Use an index name other than index.html +
                              + +
                              --https <port> +
                              Port to listen on for Secure HTTP. +
                              + +
                              -c, --certificate <filename> +
                              SSL certificate to use for HTTPS. [default: server.pem] +
                              + +
                              -k, --privkey <filename> +
                              SSL certificate to use for HTTPS. [default: server.pem] +
                              + +
                              --processor <ext>=<class name> +
                              Adds a processor to those file names. (Only usable if after +--path)
                              + +
                              --resource-script <script name> +
                              Sets the root as a resource script. This script will be re-evaluated on +every request. +
                              + +
                              + +

                              This creates a web.tap file that can be used by twistd. If you +specify no arguments, it will be a demo webserver that has the Test +class from twisted.web.test in it. +

                              + +

                              toc options

                              + +
                              -p <port> +
                              <port> is a number representing which port you want to +start the server on. +
                              + +
                              + +

                              mail options

                              + +
                              -r, --relay <ip>,<port>=<queue directory> +
                              Relay mail to all unknown domains through given IP and port, +using queue directory as temporary place to place files. +
                              + +
                              -d, --domain <domain>=<path> +
                              generate an SMTP/POP3 virtual maildir domain named domain which saves to +path +
                              + +
                              -u, --username <name>=<password> +
                              add a user/password to the last specified domains +
                              + +
                              -b, --bounce_to_postmaster +
                              undelivered mails are sent to the postmaster, instead of being rejected. +
                              + +
                              -p, --pop <port> +
                              <port> is a number representing which port you want to +start the pop3 server on. +
                              + +
                              -s, --smtp <port> +
                              <port> is a number representing which port you want to +start the smtp server on. +
                              + +
                              + +

                              This creates a mail.tap file that can be used by twistd(1) +

                              + +

                              telnet options

                              + +
                              -p, --port <port> +
                              Run the telnet server on <port> +
                              + +
                              -u, --username <name> +
                              set the username to <name> +
                              + +
                              -w, --password <password> +
                              set the password to <password> +
                              + +
                              + +

                              socks options

                              + +
                              -i, --interface <interface> +
                              Listen on interface <interface> +
                              + +
                              -p, --port <port> +
                              Run the SOCKSv4 server on <port> +
                              + +
                              -l, --log <filename> +
                              log connection data to <filename> +
                              + +
                              + +

                              ftp options

                              + +
                              -a, --anonymous +
                              Allow anonymous logins +
                              + +
                              -3, --thirdparty +
                              Allow third party connections +
                              + +
                              --otp +
                              Use one time passwords (OTP) +
                              + +
                              -p, --port <port> +
                              Run the FTP server on <port> +
                              + +
                              -r, --root <path> +
                              Define the local root of the FTP server +
                              + +
                              --anonymoususer <username> +
                              Define the the name of the anonymous user +
                              + +
                              + +

                              manhole options

                              + +
                              -p, --port <port> +
                              Run the manhole server on <port> +
                              + +
                              -u, --user <name> +
                              set the username to <name> +
                              + +
                              -w, --password <password> +
                              set the password to <password> +
                              + +
                              + +

                              words options

                              + +
                              -p, --port <port> +
                              Run the Words server on <port> +
                              + +
                              -i, --irc <port> +
                              Run IRC server on port <port> +
                              + +
                              -w, --web <port> +
                              Run web server on port <port> +
                              + +
                              + +

                              AUTHOR

                              + +

                              Written by Moshe Zadka, based on mktap's help messages +

                              + +

                              REPORTING BUGS

                              + +

                              To report a bug, visit http://twistedmatrix.com/bugs/ +

                              + +

                              COPYRIGHT

                              + +

                              Copyright © 2000-2008 Twisted Matrix Laboratories. +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +

                              + +

                              SEE ALSO

                              + +

                              twistd(1) +

                              + +
                              + +

                              Index

                              + Version: 10.0.0 + + \ No newline at end of file diff --git a/vendor/Twisted-10.0.0/doc/core/man/mktap.1 b/vendor/Twisted-10.0.0/doc/core/man/mktap.1 new file mode 100644 index 000000000000..70f79b6f51d9 --- /dev/null +++ b/vendor/Twisted-10.0.0/doc/core/man/mktap.1 @@ -0,0 +1,219 @@ +.TH MKTAP "1" "July 2001" "" "" +.SH NAME +mktap \- create twisted.servers +.SH SYNOPSIS +.B mktap +[\fIoptions\fR] \fIapptype\fR [\fIapplication_option\fR]... +.PP +.B mktap +\fIapptype\fR --help +.SH DESCRIPTION +.PP +The \fB\--help\fR prints out a usage message to standard output. +.TP +\fB\--debug\fR, \fB\-d\fR +Show debug information for plugin loading. +.TP +\fB\--progress\fR, \fB\-p\fR +Show progress information for plugin loading. +.TP +\fB\--encrypted\fR, \fB\-e\fR +Encrypt file before writing (will make the extension of the resultant file begin with 'e'). +.TP +\fB\--uid\fR, \fB\-u\fR \fI\fR +Application belongs to this uid, and should run with its permissions. +.TP +\fB\--gid\fR, \fB\-d\fR \fI\fR +Application belongs to this gid, and should run with its permissions. +.TP +\fB\--append\fR, \fB\-a\fR \fI\fR +Append given servers to given file, instead of creating a new one. +File should be be a tap file. +.TP +\fB\--appname\fR, \fB\-n\fR \fI\fR +Use the specified name as the process name when the application is run with +\fItwistd(1)\fR. This option also causes some initialization code to be +duplicated when \fItwistd(1)\fR is run. +.TP +\fB\--type\fR, \fB\-t\fR \fI\fR +Specify the output file type. Available types are: +.IP +pickle - (default) Output as a python pickle file. +.br +source - Output as a .tas (AOT Python source) file. +.P +\fIapptype\fR +Can be 'web', 'portforward', 'toc', 'coil', 'words', \ +'manhole', 'im', 'news', 'socks', 'telnet', 'parent', 'sibling', \ +'ftp', and 'mail'. Each of those support different options. +.PP +.SH \fBportforward\fR options +.TP +\fB\-h\fR, \fB\--host\fR \fI\fR +Proxy connections to \fI\fR +.TP +\fB\-d\fR, \fB\--dest_port\fR \fI\fR +Proxy connections to \fI\fR on remote host. +.TP +\fB\-p\fR, \fB\--port\fR \fI\fR +Listen locally on \fI\fR +.PP +.SH \fBweb\fR options +.TP +\fB\-u\fR, \fB\--user\fR +Makes a server with ~/public_html and +~/.twistd-web-pb support for users. +.TP +\fB\--personal\fR +Instead of generating a webserver, generate a +ResourcePublisher which listens on ~/.twistd-web-pb +.TP +\fB\--path\fR \fI\fR + is either a specific file or a directory to be +set as the root of the web server. Use this if you +have a directory full of HTML, cgi, php3, epy, or rpy files or +any other files that you want to be served up raw. +.TP +\fB\-p\fR, \fB\--port\fR \fI\fR + is a number representing which port you want to +start the server on. +.TP +\fB\-m\fR, \fB\--mime_type\fR \fI\fR + is the default MIME type to use for +files in a --path web server when none can be determined +for a particular extension. The default is 'text/html'. +.TP +\fB\--allow_ignore_ext\fR +Specify whether or not a request for 'foo' should return 'foo.ext'. +Default is off. +.TP +\fB\--ignore-ext\fR \fI.\fR +Specify that a request for 'foo' should return 'foo.\fI\fR'. +.TP +\fB\-t\fR, \fB\--telnet\fR \fI\fR +Run a telnet server on , for additional +configuration later. +.TP +\fB\-i\fR, \fB\--index\fR \fI\fR +Use an index name other than "index.html" +.TP +\fB--https\fR \fI\fR +Port to listen on for Secure HTTP. +.TP +\fB-c\fR, \fB--certificate\fR \fI\fR +SSL certificate to use for HTTPS. [default: server.pem] +.TP +\fB-k\fR, \fB--privkey\fR \fI\fR +SSL certificate to use for HTTPS. [default: server.pem] +.TP +\fB--processor\fR \fI=\fR +Adds a processor to those file names. (Only usable if after +.B --path) +.TP +\fB--resource-script\fR \fI + # tidy does this, for example. + prefix = "" + oldvalue = c.value + match = self.COMMENT.match(oldvalue) + if match: + prefix = match.group() + oldvalue = oldvalue[len(prefix):] + + # now see if contents are actual node and comment or CDATA + try: + e = parseString("%s" % oldvalue).childNodes[0] + except (ParseError, MismatchedTags): + return + if len(e.childNodes) != 1: + return + e = e.firstChild() + if isinstance(e, (CDATASection, Comment)): + el.childNodes = [] + if prefix: + el.childNodes.append(Text(prefix)) + el.childNodes.append(e) + + def gotDoctype(self, doctype): + self._mddoctype = doctype + + def gotTagStart(self, name, attributes): + # print ' '*self.indentlevel, 'start tag',name + # self.indentlevel += 1 + parent = self._getparent() + if (self.beExtremelyLenient and isinstance(parent, Element)): + parentName = parent.tagName + myName = name + if self.caseInsensitive: + parentName = parentName.lower() + myName = myName.lower() + if myName in self.laterClosers.get(parentName, []): + self.gotTagEnd(parent.tagName) + parent = self._getparent() + attributes = _unescapeDict(attributes) + namespaces = self.nsstack[-1][0] + newspaces = {} + for k, v in attributes.items(): + if k.startswith('xmlns'): + spacenames = k.split(':',1) + if len(spacenames) == 2: + newspaces[spacenames[1]] = v + else: + newspaces[''] = v + del attributes[k] + if newspaces: + namespaces = namespaces.copy() + namespaces.update(newspaces) + for k, v in attributes.items(): + ksplit = k.split(':', 1) + if len(ksplit) == 2: + pfx, tv = ksplit + if pfx != 'xml' and namespaces.has_key(pfx): + attributes[namespaces[pfx], tv] = v + del attributes[k] + el = Element(name, attributes, parent, + self.filename, self.saveMark(), + caseInsensitive=self.caseInsensitive, + preserveCase=self.preserveCase, + namespace=namespaces.get('')) + revspaces = _reverseDict(newspaces) + el.addPrefixes(revspaces) + + if newspaces: + rscopy = self.nsstack[-1][2].copy() + rscopy.update(revspaces) + self.nsstack.append((namespaces, el, rscopy)) + self.elementstack.append(el) + if parent: + parent.appendChild(el) + if (self.beExtremelyLenient and el.tagName in self.soonClosers): + self.gotTagEnd(name) + + def _gotStandalone(self, factory, data): + parent = self._getparent() + te = factory(data, parent) + if parent: + parent.appendChild(te) + elif self.beExtremelyLenient: + self.documents.append(te) + + def gotText(self, data): + if data.strip() or self.shouldPreserveSpace(): + self._gotStandalone(Text, data) + + def gotComment(self, data): + self._gotStandalone(Comment, data) + + def gotEntityReference(self, entityRef): + self._gotStandalone(EntityReference, entityRef) + + def gotCData(self, cdata): + self._gotStandalone(CDATASection, cdata) + + def gotTagEnd(self, name): + # print ' '*self.indentlevel, 'end tag',name + # self.indentlevel -= 1 + if not self.elementstack: + if self.beExtremelyLenient: + return + raise MismatchedTags(*((self.filename, "NOTHING", name) + +self.saveMark()+(0,0))) + el = self.elementstack.pop() + pfxdix = self.nsstack[-1][2] + if self.nsstack[-1][1] is el: + nstuple = self.nsstack.pop() + else: + nstuple = None + if self.caseInsensitive: + tn = el.tagName.lower() + cname = name.lower() + else: + tn = el.tagName + cname = name + + nsplit = name.split(':',1) + if len(nsplit) == 2: + pfx, newname = nsplit + ns = pfxdix.get(pfx,None) + if ns is not None: + if el.namespace != ns: + if not self.beExtremelyLenient: + raise MismatchedTags(*((self.filename, el.tagName, name) + +self.saveMark()+el._markpos)) + if not (tn == cname): + if self.beExtremelyLenient: + if self.elementstack: + lastEl = self.elementstack[0] + for idx in xrange(len(self.elementstack)): + if self.elementstack[-(idx+1)].tagName == cname: + self.elementstack[-(idx+1)].endTag(name) + break + else: + # this was a garbage close tag; wait for a real one + self.elementstack.append(el) + if nstuple is not None: + self.nsstack.append(nstuple) + return + del self.elementstack[-(idx+1):] + if not self.elementstack: + self.documents.append(lastEl) + return + else: + raise MismatchedTags(*((self.filename, el.tagName, name) + +self.saveMark()+el._markpos)) + el.endTag(name) + if not self.elementstack: + self.documents.append(el) + if self.beExtremelyLenient and el.tagName == "script": + self._fixScriptElement(el) + + def connectionLost(self, reason): + XMLParser.connectionLost(self, reason) # This can cause more events! + if self.elementstack: + if self.beExtremelyLenient: + self.documents.append(self.elementstack[0]) + else: + raise MismatchedTags(*((self.filename, self.elementstack[-1], + "END_OF_FILE") + +self.saveMark() + +self.elementstack[-1]._markpos)) + + +def parse(readable, *args, **kwargs): + """Parse HTML or XML readable.""" + if not hasattr(readable, "read"): + readable = open(readable, "rb") + mdp = MicroDOMParser(*args, **kwargs) + mdp.filename = getattr(readable, "name", "") + mdp.makeConnection(None) + if hasattr(readable,"getvalue"): + mdp.dataReceived(readable.getvalue()) + else: + r = readable.read(1024) + while r: + mdp.dataReceived(r) + r = readable.read(1024) + mdp.connectionLost(None) + + if not mdp.documents: + raise ParseError(mdp.filename, 0, 0, "No top-level Nodes in document") + + if mdp.beExtremelyLenient: + if len(mdp.documents) == 1: + d = mdp.documents[0] + if not isinstance(d, Element): + el = Element("html") + el.appendChild(d) + d = el + else: + d = Element("html") + for child in mdp.documents: + d.appendChild(child) + else: + d = mdp.documents[0] + doc = Document(d) + doc.doctype = mdp._mddoctype + return doc + +def parseString(st, *args, **kw): + if isinstance(st, UnicodeType): + # this isn't particularly ideal, but it does work. + return parse(StringIO(st.encode('UTF-16')), *args, **kw) + return parse(StringIO(st), *args, **kw) + + +def parseXML(readable): + """Parse an XML readable object.""" + return parse(readable, caseInsensitive=0, preserveCase=1) + + +def parseXMLString(st): + """Parse an XML readable object.""" + return parseString(st, caseInsensitive=0, preserveCase=1) + + +# Utility + +class lmx: + """Easy creation of XML.""" + + def __init__(self, node='div'): + if isinstance(node, StringTypes): + node = Element(node) + self.node = node + + def __getattr__(self, name): + if name[0] == '_': + raise AttributeError("no private attrs") + return lambda **kw: self.add(name,**kw) + + def __setitem__(self, key, val): + self.node.setAttribute(key, val) + + def __getitem__(self, key): + return self.node.getAttribute(key) + + def text(self, txt, raw=0): + nn = Text(txt, raw=raw) + self.node.appendChild(nn) + return self + + def add(self, tagName, **kw): + newNode = Element(tagName, caseInsensitive=0, preserveCase=0) + self.node.appendChild(newNode) + xf = lmx(newNode) + for k, v in kw.items(): + if k[0] == '_': + k = k[1:] + xf[k]=v + return xf diff --git a/vendor/Twisted-10.0.0/twisted/web/proxy.py b/vendor/Twisted-10.0.0/twisted/web/proxy.py new file mode 100644 index 000000000000..61446782b0e6 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/proxy.py @@ -0,0 +1,302 @@ +# -*- test-case-name: twisted.web.test.test_proxy -*- +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Simplistic HTTP proxy support. + +This comes in two main variants - the Proxy and the ReverseProxy. + +When a Proxy is in use, a browser trying to connect to a server (say, +www.yahoo.com) will be intercepted by the Proxy, and the proxy will covertly +connect to the server, and return the result. + +When a ReverseProxy is in use, the client connects directly to the ReverseProxy +(say, www.yahoo.com) which farms off the request to one of a pool of servers, +and returns the result. + +Normally, a Proxy is used on the client end of an Internet connection, while a +ReverseProxy is used on the server end. +""" + +import urlparse +from urllib import quote as urlquote + +from twisted.internet import reactor +from twisted.internet.protocol import ClientFactory +from twisted.web.resource import Resource +from twisted.web.server import NOT_DONE_YET +from twisted.web.http import HTTPClient, Request, HTTPChannel + + + +class ProxyClient(HTTPClient): + """ + Used by ProxyClientFactory to implement a simple web proxy. + + @ivar _finished: A flag which indicates whether or not the original request + has been finished yet. + """ + _finished = False + + def __init__(self, command, rest, version, headers, data, father): + self.father = father + self.command = command + self.rest = rest + if "proxy-connection" in headers: + del headers["proxy-connection"] + headers["connection"] = "close" + headers.pop('keep-alive', None) + self.headers = headers + self.data = data + + + def connectionMade(self): + self.sendCommand(self.command, self.rest) + for header, value in self.headers.items(): + self.sendHeader(header, value) + self.endHeaders() + self.transport.write(self.data) + + + def handleStatus(self, version, code, message): + self.father.setResponseCode(int(code), message) + + + def handleHeader(self, key, value): + # t.web.server.Request sets default values for these headers in its + # 'process' method. When these headers are received from the remote + # server, they ought to override the defaults, rather than append to + # them. + if key.lower() in ['server', 'date', 'content-type']: + self.father.responseHeaders.setRawHeaders(key, [value]) + else: + self.father.responseHeaders.addRawHeader(key, value) + + + def handleResponsePart(self, buffer): + self.father.write(buffer) + + + def handleResponseEnd(self): + """ + Finish the original request, indicating that the response has been + completely written to it, and disconnect the outgoing transport. + """ + if not self._finished: + self._finished = True + self.father.finish() + self.transport.loseConnection() + + + +class ProxyClientFactory(ClientFactory): + """ + Used by ProxyRequest to implement a simple web proxy. + """ + + protocol = ProxyClient + + + def __init__(self, command, rest, version, headers, data, father): + self.father = father + self.command = command + self.rest = rest + self.headers = headers + self.data = data + self.version = version + + + def buildProtocol(self, addr): + return self.protocol(self.command, self.rest, self.version, + self.headers, self.data, self.father) + + + def clientConnectionFailed(self, connector, reason): + """ + Report a connection failure in a response to the incoming request as + an error. + """ + self.father.setResponseCode(501, "Gateway error") + self.father.responseHeaders.addRawHeader("Content-Type", "text/html") + self.father.write("

                              Could not connect

                              ") + self.father.finish() + + + +class ProxyRequest(Request): + """ + Used by Proxy to implement a simple web proxy. + + @ivar reactor: the reactor used to create connections. + @type reactor: object providing L{twisted.internet.interfaces.IReactorTCP} + """ + + protocols = {'http': ProxyClientFactory} + ports = {'http': 80} + + def __init__(self, channel, queued, reactor=reactor): + Request.__init__(self, channel, queued) + self.reactor = reactor + + + def process(self): + parsed = urlparse.urlparse(self.uri) + protocol = parsed[0] + host = parsed[1] + port = self.ports[protocol] + if ':' in host: + host, port = host.split(':') + port = int(port) + rest = urlparse.urlunparse(('', '') + parsed[2:]) + if not rest: + rest = rest + '/' + class_ = self.protocols[protocol] + headers = self.getAllHeaders().copy() + if 'host' not in headers: + headers['host'] = host + self.content.seek(0, 0) + s = self.content.read() + clientFactory = class_(self.method, rest, self.clientproto, headers, + s, self) + self.reactor.connectTCP(host, port, clientFactory) + + + +class Proxy(HTTPChannel): + """ + This class implements a simple web proxy. + + Since it inherits from L{twisted.protocols.http.HTTPChannel}, to use it you + should do something like this:: + + from twisted.web import http + f = http.HTTPFactory() + f.protocol = Proxy + + Make the HTTPFactory a listener on a port as per usual, and you have + a fully-functioning web proxy! + """ + + requestFactory = ProxyRequest + + + +class ReverseProxyRequest(Request): + """ + Used by ReverseProxy to implement a simple reverse proxy. + + @ivar proxyClientFactoryClass: a proxy client factory class, used to create + new connections. + @type proxyClientFactoryClass: L{ClientFactory} + + @ivar reactor: the reactor used to create connections. + @type reactor: object providing L{twisted.internet.interfaces.IReactorTCP} + """ + + proxyClientFactoryClass = ProxyClientFactory + + def __init__(self, channel, queued, reactor=reactor): + Request.__init__(self, channel, queued) + self.reactor = reactor + + + def process(self): + """ + Handle this request by connecting to the proxied server and forwarding + it there, then forwarding the response back as the response to this + request. + """ + self.received_headers['host'] = self.factory.host + clientFactory = self.proxyClientFactoryClass( + self.method, self.uri, self.clientproto, self.getAllHeaders(), + self.content.read(), self) + self.reactor.connectTCP(self.factory.host, self.factory.port, + clientFactory) + + + +class ReverseProxy(HTTPChannel): + """ + Implements a simple reverse proxy. + + For details of usage, see the file examples/proxy.py. + """ + + requestFactory = ReverseProxyRequest + + + +class ReverseProxyResource(Resource): + """ + Resource that renders the results gotten from another server + + Put this resource in the tree to cause everything below it to be relayed + to a different server. + + @ivar proxyClientFactoryClass: a proxy client factory class, used to create + new connections. + @type proxyClientFactoryClass: L{ClientFactory} + + @ivar reactor: the reactor used to create connections. + @type reactor: object providing L{twisted.internet.interfaces.IReactorTCP} + """ + + proxyClientFactoryClass = ProxyClientFactory + + + def __init__(self, host, port, path, reactor=reactor): + """ + @param host: the host of the web server to proxy. + @type host: C{str} + + @param port: the port of the web server to proxy. + @type port: C{port} + + @param path: the base path to fetch data from. Note that you shouldn't + put any trailing slashes in it, it will be added automatically in + request. For example, if you put B{/foo}, a request on B{/bar} will + be proxied to B{/foo/bar}. Any required encoding of special + characters (such as " " or "/") should have been done already. + + @type path: C{str} + """ + Resource.__init__(self) + self.host = host + self.port = port + self.path = path + self.reactor = reactor + + + def getChild(self, path, request): + """ + Create and return a proxy resource with the same proxy configuration + as this one, except that its path also contains the segment given by + C{path} at the end. + """ + return ReverseProxyResource( + self.host, self.port, self.path + '/' + urlquote(path, safe="")) + + + def render(self, request): + """ + Render a request by forwarding it to the proxied server. + """ + # RFC 2616 tells us that we can omit the port if it's the default port, + # but we have to provide it otherwise + if self.port == 80: + host = self.host + else: + host = "%s:%d" % (self.host, self.port) + request.received_headers['host'] = host + request.content.seek(0, 0) + qs = urlparse.urlparse(request.uri)[4] + if qs: + rest = self.path + '?' + qs + else: + rest = self.path + clientFactory = self.proxyClientFactoryClass( + request.method, rest, request.clientproto, + request.getAllHeaders(), request.content.read(), request) + self.reactor.connectTCP(self.host, self.port, clientFactory) + return NOT_DONE_YET diff --git a/vendor/Twisted-10.0.0/twisted/web/resource.py b/vendor/Twisted-10.0.0/twisted/web/resource.py new file mode 100644 index 000000000000..afd1ea5c7141 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/resource.py @@ -0,0 +1,300 @@ +# -*- test-case-name: twisted.web.test.test_web -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Implementation of the lowest-level Resource class. +""" + +import warnings + +from zope.interface import Attribute, implements, Interface + +from twisted.web import http + + +class IResource(Interface): + """ + A web resource. + """ + + isLeaf = Attribute( + """ + Signal if this IResource implementor is a "leaf node" or not. If True, + getChildWithDefault will not be called on this Resource. + """) + + def getChildWithDefault(name, request): + """ + Return a child with the given name for the given request. + This is the external interface used by the Resource publishing + machinery. If implementing IResource without subclassing + Resource, it must be provided. However, if subclassing Resource, + getChild overridden instead. + """ + + def putChild(path, child): + """ + Put a child IResource implementor at the given path. + """ + + def render(request): + """ + Render a request. This is called on the leaf resource for + a request. Render must return either a string, which will + be sent to the browser as the HTML for the request, or + server.NOT_DONE_YET. If NOT_DONE_YET is returned, + at some point later (in a Deferred callback, usually) + call request.write("") to write data to the request, + and request.finish() to send the data to the browser. + """ + + + +def getChildForRequest(resource, request): + """ + Traverse resource tree to find who will handle the request. + """ + while request.postpath and not resource.isLeaf: + pathElement = request.postpath.pop(0) + request.prepath.append(pathElement) + resource = resource.getChildWithDefault(pathElement, request) + return resource + + + +class Resource: + """ + I define a web-accessible resource. + + I serve 2 main purposes; one is to provide a standard representation for + what HTTP specification calls an 'entity', and the other is to provide an + abstract directory structure for URL retrieval. + """ + + implements(IResource) + + entityType = IResource + + server = None + + def __init__(self): + """Initialize. + """ + self.children = {} + + isLeaf = 0 + + ### Abstract Collection Interface + + def listStaticNames(self): + return self.children.keys() + + def listStaticEntities(self): + return self.children.items() + + def listNames(self): + return self.listStaticNames() + self.listDynamicNames() + + def listEntities(self): + return self.listStaticEntities() + self.listDynamicEntities() + + def listDynamicNames(self): + return [] + + def listDynamicEntities(self, request=None): + return [] + + def getStaticEntity(self, name): + return self.children.get(name) + + def getDynamicEntity(self, name, request): + if not self.children.has_key(name): + return self.getChild(name, request) + else: + return None + + def delEntity(self, name): + del self.children[name] + + def reallyPutEntity(self, name, entity): + self.children[name] = entity + + # Concrete HTTP interface + + def getChild(self, path, request): + """ + Retrieve a 'child' resource from me. + + Implement this to create dynamic resource generation -- resources which + are always available may be registered with self.putChild(). + + This will not be called if the class-level variable 'isLeaf' is set in + your subclass; instead, the 'postpath' attribute of the request will be + left as a list of the remaining path elements. + + For example, the URL /foo/bar/baz will normally be:: + + | site.resource.getChild('foo').getChild('bar').getChild('baz'). + + However, if the resource returned by 'bar' has isLeaf set to true, then + the getChild call will never be made on it. + + @param path: a string, describing the child + + @param request: a twisted.web.server.Request specifying meta-information + about the request that is being made for this child. + """ + return NoResource("No such child resource.") + + + def getChildWithDefault(self, path, request): + """ + Retrieve a static or dynamically generated child resource from me. + + First checks if a resource was added manually by putChild, and then + call getChild to check for dynamic resources. Only override if you want + to affect behaviour of all child lookups, rather than just dynamic + ones. + + This will check to see if I have a pre-registered child resource of the + given name, and call getChild if I do not. + """ + if path in self.children: + return self.children[path] + return self.getChild(path, request) + + + def getChildForRequest(self, request): + warnings.warn("Please use module level getChildForRequest.", DeprecationWarning, 2) + return getChildForRequest(self, request) + + + def putChild(self, path, child): + """ + Register a static child. + + You almost certainly don't want '/' in your path. If you + intended to have the root of a folder, e.g. /foo/, you want + path to be ''. + """ + self.children[path] = child + child.server = self.server + + + def render(self, request): + """ + Render a given resource. See L{IResource}'s render method. + + I delegate to methods of self with the form 'render_METHOD' + where METHOD is the HTTP that was used to make the + request. Examples: render_GET, render_HEAD, render_POST, and + so on. Generally you should implement those methods instead of + overriding this one. + + render_METHOD methods are expected to return a string which + will be the rendered page, unless the return value is + twisted.web.server.NOT_DONE_YET, in which case it is this + class's responsibility to write the results to + request.write(data), then call request.finish(). + + Old code that overrides render() directly is likewise expected + to return a string or NOT_DONE_YET. + """ + m = getattr(self, 'render_' + request.method, None) + if not m: + # This needs to be here until the deprecated subclasses of the + # below three error resources in twisted.web.error are removed. + from twisted.web.error import UnsupportedMethod + raise UnsupportedMethod(getattr(self, 'allowedMethods', ())) + return m(request) + + + def render_HEAD(self, request): + """ + Default handling of HEAD method. + + I just return self.render_GET(request). When method is HEAD, + the framework will handle this correctly. + """ + return self.render_GET(request) + + + +class ErrorPage(Resource): + """ + L{ErrorPage} is a resource which responds with a particular + (parameterized) status and a body consisting of HTML containing some + descriptive text. This is useful for rendering simple error pages. + + @ivar template: A C{str} which will have a dictionary interpolated into + it to generate the response body. The dictionary has the following + keys: + + - C{"code"}: The status code passed to L{ErrorPage.__init__}. + - C{"brief"}: The brief description passed to L{ErrorPage.__init__}. + - C{"detail"}: The detailed description passed to + L{ErrorPage.__init__}. + + @ivar code: An integer status code which will be used for the response. + @ivar brief: A short string which will be included in the response body. + @ivar detail: A longer string which will be included in the response body. + """ + + template = """ + + %(code)s - %(brief)s + +

                              %(brief)s

                              +

                              %(detail)s

                              + + +""" + + def __init__(self, status, brief, detail): + Resource.__init__(self) + self.code = status + self.brief = brief + self.detail = detail + + + def render(self, request): + request.setResponseCode(self.code) + request.setHeader("content-type", "text/html") + return self.template % dict( + code=self.code, + brief=self.brief, + detail=self.detail) + + + def getChild(self, chnam, request): + return self + + + +class NoResource(ErrorPage): + """ + L{NoResource} is a specialization of L{ErrorPage} which returns the HTTP + response code I{NOT FOUND}. + """ + def __init__(self, message="Sorry. No luck finding that resource."): + ErrorPage.__init__(self, http.NOT_FOUND, + "No Such Resource", + message) + + + +class ForbiddenResource(ErrorPage): + """ + L{ForbiddenResource} is a specialization of L{ErrorPage} which returns the + I{FORBIDDEN} HTTP response code. + """ + def __init__(self, message="Sorry, resource is forbidden."): + ErrorPage.__init__(self, http.FORBIDDEN, + "Forbidden Resource", + message) + + +__all__ = [ + 'IResource', 'getChildForRequest', + 'Resource', 'ErrorPage', 'NoResource', 'ForbiddenResource'] diff --git a/vendor/Twisted-10.0.0/twisted/web/rewrite.py b/vendor/Twisted-10.0.0/twisted/web/rewrite.py new file mode 100644 index 000000000000..b41ca00347ca --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/rewrite.py @@ -0,0 +1,52 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# +from twisted.web import resource + +class RewriterResource(resource.Resource): + + def __init__(self, orig, *rewriteRules): + resource.Resource.__init__(self) + self.resource = orig + self.rewriteRules = list(rewriteRules) + + def _rewrite(self, request): + for rewriteRule in self.rewriteRules: + rewriteRule(request) + + def getChild(self, path, request): + request.postpath.insert(0, path) + request.prepath.pop() + self._rewrite(request) + path = request.postpath.pop(0) + request.prepath.append(path) + return self.resource.getChildWithDefault(path, request) + + def render(self, request): + self._rewrite(request) + return self.resource.render(request) + + +def tildeToUsers(request): + if request.postpath and request.postpath[0][:1]=='~': + request.postpath[:1] = ['users', request.postpath[0][1:]] + request.path = '/'+'/'.join(request.prepath+request.postpath) + +def alias(aliasPath, sourcePath): + """ + I am not a very good aliaser. But I'm the best I can be. If I'm + aliasing to a Resource that generates links, and it uses any parts + of request.prepath to do so, the links will not be relative to the + aliased path, but rather to the aliased-to path. That I can't + alias static.File directory listings that nicely. However, I can + still be useful, as many resources will play nice. + """ + sourcePath = sourcePath.split('/') + aliasPath = aliasPath.split('/') + def rewriter(request): + if request.postpath[:len(aliasPath)] == aliasPath: + after = request.postpath[len(aliasPath):] + request.postpath = sourcePath + after + request.path = '/'+'/'.join(request.prepath+request.postpath) + return rewriter diff --git a/vendor/Twisted-10.0.0/twisted/web/script.py b/vendor/Twisted-10.0.0/twisted/web/script.py new file mode 100644 index 000000000000..c1e41da0f6c4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/script.py @@ -0,0 +1,169 @@ +# -*- test-case-name: twisted.web.test.test_script -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +I contain PythonScript, which is a very simple python script resource. +""" + +import os, traceback + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +from twisted import copyright +from twisted.web import http, server, static, resource, html + + +rpyNoResource = """

                              You forgot to assign to the variable "resource" in your script. For example:

                              +
                              +# MyCoolWebApp.rpy
                              +
                              +import mygreatresource
                              +
                              +resource = mygreatresource.MyGreatResource()
                              +
                              +""" + +class AlreadyCached(Exception): + """This exception is raised when a path has already been cached. + """ + +class CacheScanner: + def __init__(self, path, registry): + self.path = path + self.registry = registry + self.doCache = 0 + + def cache(self): + c = self.registry.getCachedPath(self.path) + if c is not None: + raise AlreadyCached(c) + self.recache() + + def recache(self): + self.doCache = 1 + +noRsrc = resource.ErrorPage(500, "Whoops! Internal Error", rpyNoResource) + +def ResourceScript(path, registry): + """ + I am a normal py file which must define a 'resource' global, which should + be an instance of (a subclass of) web.resource.Resource; it will be + renderred. + """ + cs = CacheScanner(path, registry) + glob = {'__file__': path, + 'resource': noRsrc, + 'registry': registry, + 'cache': cs.cache, + 'recache': cs.recache} + try: + execfile(path, glob, glob) + except AlreadyCached, ac: + return ac.args[0] + rsrc = glob['resource'] + if cs.doCache and rsrc is not noRsrc: + registry.cachePath(path, rsrc) + return rsrc + +def ResourceTemplate(path, registry): + from quixote import ptl_compile + + glob = {'__file__': path, + 'resource': resource.ErrorPage(500, "Whoops! Internal Error", + rpyNoResource), + 'registry': registry} + + e = ptl_compile.compile_template(open(path), path) + exec e in glob + return glob['resource'] + + +class ResourceScriptWrapper(resource.Resource): + + def __init__(self, path, registry=None): + resource.Resource.__init__(self) + self.path = path + self.registry = registry or static.Registry() + + def render(self, request): + res = ResourceScript(self.path, self.registry) + return res.render(request) + + def getChildWithDefault(self, path, request): + res = ResourceScript(self.path, self.registry) + return res.getChildWithDefault(path, request) + + + +class ResourceScriptDirectory(resource.Resource): + """ + L{ResourceScriptDirectory} is a resource which serves scripts from a + filesystem directory. File children of a L{ResourceScriptDirectory} will + be served using L{ResourceScript}. Directory children will be served using + another L{ResourceScriptDirectory}. + + @ivar path: A C{str} giving the filesystem path in which children will be + looked up. + + @ivar registry: A L{static.Registry} instance which will be used to decide + how to interpret scripts found as children of this resource. + """ + def __init__(self, pathname, registry=None): + resource.Resource.__init__(self) + self.path = pathname + self.registry = registry or static.Registry() + + def getChild(self, path, request): + fn = os.path.join(self.path, path) + + if os.path.isdir(fn): + return ResourceScriptDirectory(fn, self.registry) + if os.path.exists(fn): + return ResourceScript(fn, self.registry) + return resource.NoResource() + + def render(self, request): + return resource.NoResource().render(request) + + +class PythonScript(resource.Resource): + """I am an extremely simple dynamic resource; an embedded python script. + + This will execute a file (usually of the extension '.epy') as Python code, + internal to the webserver. + """ + isLeaf = 1 + def __init__(self, filename, registry): + """Initialize me with a script name. + """ + self.filename = filename + self.registry = registry + + def render(self, request): + """Render me to a web client. + + Load my file, execute it in a special namespace (with 'request' and + '__file__' global vars) and finish the request. Output to the web-page + will NOT be handled with print - standard output goes to the log - but + with request.write. + """ + request.setHeader("x-powered-by","Twisted/%s" % copyright.version) + namespace = {'request': request, + '__file__': self.filename, + 'registry': self.registry} + try: + execfile(self.filename, namespace, namespace) + except IOError, e: + if e.errno == 2: #file not found + request.setResponseCode(http.NOT_FOUND) + request.write(resource.NoResource("File not found.").render(request)) + except: + io = StringIO.StringIO() + traceback.print_exc(file=io) + request.write(html.PRE(io.getvalue())) + request.finish() + return server.NOT_DONE_YET diff --git a/vendor/Twisted-10.0.0/twisted/web/server.py b/vendor/Twisted-10.0.0/twisted/web/server.py new file mode 100644 index 000000000000..2e9eabfb286e --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/server.py @@ -0,0 +1,527 @@ +# -*- test-case-name: twisted.web.test.test_web -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +This is a web-server which integrates with the twisted.internet +infrastructure. +""" + +# System Imports + +import warnings +import string +import types +import copy +import os +from urllib import quote + +from zope.interface import implements + +try: + from twisted.protocols._c_urlarg import unquote +except ImportError: + from urllib import unquote + +#some useful constants +NOT_DONE_YET = 1 + +# Twisted Imports +from twisted.spread import pb +from twisted.internet import defer, address, task +from twisted.web import iweb, http +from twisted.python import log, reflect, failure, components +from twisted import copyright +from twisted.web import util as webutil, resource +from twisted.web.error import UnsupportedMethod + +# backwards compatability +date_time_string = http.datetimeToString +string_date_time = http.stringToDatetime + +# Support for other methods may be implemented on a per-resource basis. +supportedMethods = ('GET', 'HEAD', 'POST') + + +def _addressToTuple(addr): + if isinstance(addr, address.IPv4Address): + return ('INET', addr.host, addr.port) + elif isinstance(addr, address.UNIXAddress): + return ('UNIX', addr.name) + else: + return tuple(addr) + +class Request(pb.Copyable, http.Request, components.Componentized): + implements(iweb.IRequest) + + site = None + appRootURL = None + __pychecker__ = 'unusednames=issuer' + + def __init__(self, *args, **kw): + http.Request.__init__(self, *args, **kw) + components.Componentized.__init__(self) + + def getStateToCopyFor(self, issuer): + x = self.__dict__.copy() + del x['transport'] + # XXX refactor this attribute out; it's from protocol + # del x['server'] + del x['channel'] + del x['content'] + del x['site'] + self.content.seek(0, 0) + x['content_data'] = self.content.read() + x['remote'] = pb.ViewPoint(issuer, self) + + # Address objects aren't jellyable + x['host'] = _addressToTuple(x['host']) + x['client'] = _addressToTuple(x['client']) + + # Header objects also aren't jellyable. + x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders()) + + return x + + # HTML generation helpers + + def sibLink(self, name): + "Return the text that links to a sibling of the requested resource." + if self.postpath: + return (len(self.postpath)*"../") + name + else: + return name + + def childLink(self, name): + "Return the text that links to a child of the requested resource." + lpp = len(self.postpath) + if lpp > 1: + return ((lpp-1)*"../") + name + elif lpp == 1: + return name + else: # lpp == 0 + if len(self.prepath) and self.prepath[-1]: + return self.prepath[-1] + '/' + name + else: + return name + + def process(self): + "Process a request." + + # get site from channel + self.site = self.channel.site + + # set various default headers + self.setHeader('server', version) + self.setHeader('date', http.datetimeToString()) + self.setHeader('content-type', "text/html") + + # Resource Identification + self.prepath = [] + self.postpath = map(unquote, string.split(self.path[1:], '/')) + try: + resrc = self.site.getResourceFor(self) + self.render(resrc) + except: + self.processingFailed(failure.Failure()) + + + def render(self, resrc): + try: + body = resrc.render(self) + except UnsupportedMethod, e: + allowedMethods = e.allowedMethods + if (self.method == "HEAD") and ("GET" in allowedMethods): + # We must support HEAD (RFC 2616, 5.1.1). If the + # resource doesn't, fake it by giving the resource + # a 'GET' request and then return only the headers, + # not the body. + log.msg("Using GET to fake a HEAD request for %s" % + (resrc,)) + self.method = "GET" + body = resrc.render(self) + + if body is NOT_DONE_YET: + log.msg("Tried to fake a HEAD request for %s, but " + "it got away from me." % resrc) + # Oh well, I guess we won't include the content length. + else: + self.setHeader('content-length', str(len(body))) + + self.write('') + self.finish() + return + + if self.method in (supportedMethods): + # We MUST include an Allow header + # (RFC 2616, 10.4.6 and 14.7) + self.setHeader('Allow', allowedMethods) + s = ('''Your browser approached me (at %(URI)s) with''' + ''' the method "%(method)s". I only allow''' + ''' the method%(plural)s %(allowed)s here.''' % { + 'URI': self.uri, + 'method': self.method, + 'plural': ((len(allowedMethods) > 1) and 's') or '', + 'allowed': string.join(allowedMethods, ', ') + }) + epage = resource.ErrorPage(http.NOT_ALLOWED, + "Method Not Allowed", s) + body = epage.render(self) + else: + epage = resource.ErrorPage(http.NOT_IMPLEMENTED, "Huh?", + "I don't know how to treat a" + " %s request." % (self.method,)) + body = epage.render(self) + # end except UnsupportedMethod + + if body == NOT_DONE_YET: + return + if type(body) is not types.StringType: + body = resource.ErrorPage( + http.INTERNAL_SERVER_ERROR, + "Request did not return a string", + "Request: " + html.PRE(reflect.safe_repr(self)) + "
                              " + + "Resource: " + html.PRE(reflect.safe_repr(resrc)) + "
                              " + + "Value: " + html.PRE(reflect.safe_repr(body))).render(self) + + if self.method == "HEAD": + if len(body) > 0: + # This is a Bad Thing (RFC 2616, 9.4) + log.msg("Warning: HEAD request %s for resource %s is" + " returning a message body." + " I think I'll eat it." + % (self, resrc)) + self.setHeader('content-length', str(len(body))) + self.write('') + else: + self.setHeader('content-length', str(len(body))) + self.write(body) + self.finish() + + def processingFailed(self, reason): + log.err(reason) + if self.site.displayTracebacks: + body = ("web.Server Traceback (most recent call last)" + "web.Server Traceback (most recent call last):\n\n" + "%s\n\n\n" + % webutil.formatFailure(reason)) + else: + body = ("Processing Failed" + "Processing Failed") + + self.setResponseCode(http.INTERNAL_SERVER_ERROR) + self.setHeader('content-type',"text/html") + self.setHeader('content-length', str(len(body))) + self.write(body) + self.finish() + return reason + + def view_write(self, issuer, data): + """Remote version of write; same interface. + """ + self.write(data) + + def view_finish(self, issuer): + """Remote version of finish; same interface. + """ + self.finish() + + def view_addCookie(self, issuer, k, v, **kwargs): + """Remote version of addCookie; same interface. + """ + self.addCookie(k, v, **kwargs) + + def view_setHeader(self, issuer, k, v): + """Remote version of setHeader; same interface. + """ + self.setHeader(k, v) + + def view_setLastModified(self, issuer, when): + """Remote version of setLastModified; same interface. + """ + self.setLastModified(when) + + def view_setETag(self, issuer, tag): + """Remote version of setETag; same interface. + """ + self.setETag(tag) + + def view_setResponseCode(self, issuer, code): + """Remote version of setResponseCode; same interface. + """ + self.setResponseCode(code) + + def view_registerProducer(self, issuer, producer, streaming): + """Remote version of registerProducer; same interface. + (requires a remote producer.) + """ + self.registerProducer(_RemoteProducerWrapper(producer), streaming) + + def view_unregisterProducer(self, issuer): + self.unregisterProducer() + + ### these calls remain local + + session = None + + def getSession(self, sessionInterface = None): + # Session management + if not self.session: + cookiename = string.join(['TWISTED_SESSION'] + self.sitepath, "_") + sessionCookie = self.getCookie(cookiename) + if sessionCookie: + try: + self.session = self.site.getSession(sessionCookie) + except KeyError: + pass + # if it still hasn't been set, fix it up. + if not self.session: + self.session = self.site.makeSession() + self.addCookie(cookiename, self.session.uid, path='/') + self.session.touch() + if sessionInterface: + return self.session.getComponent(sessionInterface) + return self.session + + def _prePathURL(self, prepath): + port = self.getHost().port + if self.isSecure(): + default = 443 + else: + default = 80 + if port == default: + hostport = '' + else: + hostport = ':%d' % port + return 'http%s://%s%s/%s' % ( + self.isSecure() and 's' or '', + self.getRequestHostname(), + hostport, + '/'.join([quote(segment, safe='') for segment in prepath])) + + def prePathURL(self): + return self._prePathURL(self.prepath) + + def URLPath(self): + from twisted.python import urlpath + return urlpath.URLPath.fromRequest(self) + + def rememberRootURL(self): + """ + Remember the currently-processed part of the URL for later + recalling. + """ + url = self._prePathURL(self.prepath[:-1]) + self.appRootURL = url + + def getRootURL(self): + """ + Get a previously-remembered URL. + """ + return self.appRootURL + + +class _RemoteProducerWrapper: + def __init__(self, remote): + self.resumeProducing = remote.remoteMethod("resumeProducing") + self.pauseProducing = remote.remoteMethod("pauseProducing") + self.stopProducing = remote.remoteMethod("stopProducing") + + +class Session(components.Componentized): + """ + A user's session with a system. + + This utility class contains no functionality, but is used to + represent a session. + + @ivar _reactor: An object providing L{IReactorTime} to use for scheduling + expiration. + @ivar sessionTimeout: timeout of a session, in seconds. + @ivar loopFactory: Deprecated in Twisted 9.0. Does nothing. Do not use. + """ + sessionTimeout = 900 + loopFactory = task.LoopingCall + + _expireCall = None + + def __init__(self, site, uid, reactor=None): + """ + Initialize a session with a unique ID for that session. + """ + components.Componentized.__init__(self) + + if reactor is None: + from twisted.internet import reactor + self._reactor = reactor + + self.site = site + self.uid = uid + self.expireCallbacks = [] + self.touch() + self.sessionNamespaces = {} + + + def startCheckingExpiration(self, lifetime=None): + """ + Start expiration tracking. + + @param lifetime: Ignored; deprecated. + + @return: C{None} + """ + if lifetime is not None: + warnings.warn( + "The lifetime parameter to startCheckingExpiration is " + "deprecated since Twisted 9.0. See Session.sessionTimeout " + "instead.", DeprecationWarning, stacklevel=2) + self._expireCall = self._reactor.callLater( + self.sessionTimeout, self.expire) + + + def notifyOnExpire(self, callback): + """ + Call this callback when the session expires or logs out. + """ + self.expireCallbacks.append(callback) + + + def expire(self): + """ + Expire/logout of the session. + """ + del self.site.sessions[self.uid] + for c in self.expireCallbacks: + c() + self.expireCallbacks = [] + if self._expireCall and self._expireCall.active(): + self._expireCall.cancel() + # Break reference cycle. + self._expireCall = None + + + def touch(self): + """ + Notify session modification. + """ + self.lastModified = self._reactor.seconds() + if self._expireCall is not None: + self._expireCall.reset(self.sessionTimeout) + + + def checkExpired(self): + """ + Deprecated; does nothing. + """ + warnings.warn( + "Session.checkExpired is deprecated since Twisted 9.0; sessions " + "check themselves now, you don't need to.", + stacklevel=2, category=DeprecationWarning) + + +version = "TwistedWeb/%s" % copyright.version + + +class Site(http.HTTPFactory): + """ + A web site: manage log, sessions, and resources. + + @ivar counter: increment value used for generating unique sessions ID. + @ivar requestFactory: factory creating requests objects. Default to + L{Request}. + @ivar displayTracebacks: if set, Twisted internal errors are displayed on + rendered pages. Default to C{True}. + @ivar sessionFactory: factory for sessions objects. Default to L{Session}. + @ivar sessionCheckTime: Deprecated. See L{Session.sessionTimeout} instead. + """ + counter = 0 + requestFactory = Request + displayTracebacks = True + sessionFactory = Session + sessionCheckTime = 1800 + + def __init__(self, resource, logPath=None, timeout=60*60*12): + """ + Initialize. + """ + http.HTTPFactory.__init__(self, logPath=logPath, timeout=timeout) + self.sessions = {} + self.resource = resource + + def _openLogFile(self, path): + from twisted.python import logfile + return logfile.LogFile(os.path.basename(path), os.path.dirname(path)) + + def __getstate__(self): + d = self.__dict__.copy() + d['sessions'] = {} + return d + + def _mkuid(self): + """ + (internal) Generate an opaque, unique ID for a user's session. + """ + from twisted.python.hashlib import md5 + import random + self.counter = self.counter + 1 + return md5("%s_%s" % (str(random.random()) , str(self.counter))).hexdigest() + + def makeSession(self): + """ + Generate a new Session instance, and store it for future reference. + """ + uid = self._mkuid() + session = self.sessions[uid] = self.sessionFactory(self, uid) + session.startCheckingExpiration() + return session + + def getSession(self, uid): + """ + Get a previously generated session, by its unique ID. + This raises a KeyError if the session is not found. + """ + return self.sessions[uid] + + def buildProtocol(self, addr): + """ + Generate a channel attached to this site. + """ + channel = http.HTTPFactory.buildProtocol(self, addr) + channel.requestFactory = self.requestFactory + channel.site = self + return channel + + isLeaf = 0 + + def render(self, request): + """ + Redirect because a Site is always a directory. + """ + request.redirect(request.prePathURL() + '/') + request.finish() + + def getChildWithDefault(self, pathEl, request): + """ + Emulate a resource's getChild method. + """ + request.site = self + return self.resource.getChildWithDefault(pathEl, request) + + def getResourceFor(self, request): + """ + Get a resource for a request. + + This iterates through the resource heirarchy, calling + getChildWithDefault on each resource it finds for a path element, + stopping when it hits an element where isLeaf is true. + """ + request.site = self + # Sitepath is used to determine cookie names between distributed + # servers and disconnected sites. + request.sitepath = copy.copy(request.prepath) + return resource.getChildForRequest(self.resource, request) + + +import html + diff --git a/vendor/Twisted-10.0.0/twisted/web/soap.py b/vendor/Twisted-10.0.0/twisted/web/soap.py new file mode 100644 index 000000000000..70a605581b05 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/soap.py @@ -0,0 +1,154 @@ +# -*- test-case-name: twisted.web.test.test_soap -*- +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +SOAP support for twisted.web. + +Requires SOAPpy 0.10.1 or later. + +Maintainer: Itamar Shtull-Trauring + +Future plans: +SOAPContext support of some kind. +Pluggable method lookup policies. +""" + +# SOAPpy +import SOAPpy + +# twisted imports +from twisted.web import server, resource, client +from twisted.internet import defer + + +class SOAPPublisher(resource.Resource): + """Publish SOAP methods. + + By default, publish methods beginning with 'soap_'. If the method + has an attribute 'useKeywords', it well get the arguments passed + as keyword args. + """ + + isLeaf = 1 + + # override to change the encoding used for responses + encoding = "UTF-8" + + def lookupFunction(self, functionName): + """Lookup published SOAP function. + + Override in subclasses. Default behaviour - publish methods + starting with soap_. + + @return: callable or None if not found. + """ + return getattr(self, "soap_%s" % functionName, None) + + def render(self, request): + """Handle a SOAP command.""" + data = request.content.read() + + p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1) + + methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns + + # deal with changes in SOAPpy 0.11 + if callable(args): + args = args() + if callable(kwargs): + kwargs = kwargs() + + function = self.lookupFunction(methodName) + + if not function: + self._methodNotFound(request, methodName) + return server.NOT_DONE_YET + else: + if hasattr(function, "useKeywords"): + keywords = {} + for k, v in kwargs.items(): + keywords[str(k)] = v + d = defer.maybeDeferred(function, **keywords) + else: + d = defer.maybeDeferred(function, *args) + + d.addCallback(self._gotResult, request, methodName) + d.addErrback(self._gotError, request, methodName) + return server.NOT_DONE_YET + + def _methodNotFound(self, request, methodName): + response = SOAPpy.buildSOAP(SOAPpy.faultType("%s:Client" % + SOAPpy.NS.ENV_T, "Method %s not found" % methodName), + encoding=self.encoding) + self._sendResponse(request, response, status=500) + + def _gotResult(self, result, request, methodName): + if not isinstance(result, SOAPpy.voidType): + result = {"Result": result} + response = SOAPpy.buildSOAP(kw={'%sResponse' % methodName: result}, + encoding=self.encoding) + self._sendResponse(request, response) + + def _gotError(self, failure, request, methodName): + e = failure.value + if isinstance(e, SOAPpy.faultType): + fault = e + else: + fault = SOAPpy.faultType("%s:Server" % SOAPpy.NS.ENV_T, + "Method %s failed." % methodName) + response = SOAPpy.buildSOAP(fault, encoding=self.encoding) + self._sendResponse(request, response, status=500) + + def _sendResponse(self, request, response, status=200): + request.setResponseCode(status) + + if self.encoding is not None: + mimeType = 'text/xml; charset="%s"' % self.encoding + else: + mimeType = "text/xml" + request.setHeader("Content-type", mimeType) + request.setHeader("Content-length", str(len(response))) + request.write(response) + request.finish() + + +class Proxy: + """A Proxy for making remote SOAP calls. + + Pass the URL of the remote SOAP server to the constructor. + + Use proxy.callRemote('foobar', 1, 2) to call remote method + 'foobar' with args 1 and 2, proxy.callRemote('foobar', x=1) + will call foobar with named argument 'x'. + """ + + # at some point this should have encoding etc. kwargs + def __init__(self, url, namespace=None, header=None): + self.url = url + self.namespace = namespace + self.header = header + + def _cbGotResult(self, result): + result = SOAPpy.parseSOAPRPC(result) + if hasattr(result, 'Result'): + return result.Result + elif len(result) == 1: + ## SOAPpy 0.11.6 wraps the return results in a containing structure. + ## This check added to make Proxy behaviour emulate SOAPProxy, which + ## flattens the structure by default. + ## This behaviour is OK because even singleton lists are wrapped in + ## another singleton structType, which is almost always useless. + return result[0] + else: + return result + + def callRemote(self, method, *args, **kwargs): + payload = SOAPpy.buildSOAP(args=args, kw=kwargs, method=method, + header=self.header, namespace=self.namespace) + return client.getPage(self.url, postdata=payload, method="POST", + headers={'content-type': 'text/xml', + 'SOAPAction': method} + ).addCallback(self._cbGotResult) + diff --git a/vendor/Twisted-10.0.0/twisted/web/static.py b/vendor/Twisted-10.0.0/twisted/web/static.py new file mode 100644 index 000000000000..e31795cf2525 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/static.py @@ -0,0 +1,1104 @@ +# -*- test-case-name: twisted.web.test.test_static -*- +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Static resources for L{twisted.web}. +""" + +import os +import warnings +import urllib +import itertools +import cgi +import time + +from zope.interface import implements + +from twisted.web import server +from twisted.web import resource +from twisted.web import http +from twisted.web.util import redirectTo + +from twisted.python import components, filepath, log +from twisted.internet import abstract, interfaces +from twisted.spread import pb +from twisted.persisted import styles +from twisted.python.util import InsensitiveDict +from twisted.python.runtime import platformType + + +dangerousPathError = resource.NoResource("Invalid request URL.") + +def isDangerous(path): + return path == '..' or '/' in path or os.sep in path + + +class Data(resource.Resource): + """ + This is a static, in-memory resource. + """ + + def __init__(self, data, type): + resource.Resource.__init__(self) + self.data = data + self.type = type + + + def render_GET(self, request): + request.setHeader("content-type", self.type) + request.setHeader("content-length", str(len(self.data))) + if request.method == "HEAD": + return '' + return self.data + render_HEAD = render_GET + + +def addSlash(request): + qs = '' + qindex = request.uri.find('?') + if qindex != -1: + qs = request.uri[qindex:] + + return "http%s://%s%s/%s" % ( + request.isSecure() and 's' or '', + request.getHeader("host"), + (request.uri.split('?')[0]), + qs) + +class Redirect(resource.Resource): + def __init__(self, request): + resource.Resource.__init__(self) + self.url = addSlash(request) + + def render(self, request): + return redirectTo(self.url, request) + + +class Registry(components.Componentized, styles.Versioned): + """ + I am a Componentized object that will be made available to internal Twisted + file-based dynamic web content such as .rpy and .epy scripts. + """ + + def __init__(self): + components.Componentized.__init__(self) + self._pathCache = {} + + persistenceVersion = 1 + + def upgradeToVersion1(self): + self._pathCache = {} + + def cachePath(self, path, rsrc): + self._pathCache[path] = rsrc + + def getCachedPath(self, path): + return self._pathCache.get(path) + + +def loadMimeTypes(mimetype_locations=['/etc/mime.types']): + """ + Multiple file locations containing mime-types can be passed as a list. + The files will be sourced in that order, overriding mime-types from the + files sourced beforehand, but only if a new entry explicitly overrides + the current entry. + """ + import mimetypes + # Grab Python's built-in mimetypes dictionary. + contentTypes = mimetypes.types_map + # Update Python's semi-erroneous dictionary with a few of the + # usual suspects. + contentTypes.update( + { + '.conf': 'text/plain', + '.diff': 'text/plain', + '.exe': 'application/x-executable', + '.flac': 'audio/x-flac', + '.java': 'text/plain', + '.ogg': 'application/ogg', + '.oz': 'text/x-oz', + '.swf': 'application/x-shockwave-flash', + '.tgz': 'application/x-gtar', + '.wml': 'text/vnd.wap.wml', + '.xul': 'application/vnd.mozilla.xul+xml', + '.py': 'text/plain', + '.patch': 'text/plain', + } + ) + # Users can override these mime-types by loading them out configuration + # files (this defaults to ['/etc/mime.types']). + for location in mimetype_locations: + if os.path.exists(location): + more = mimetypes.read_mime_types(location) + if more is not None: + contentTypes.update(more) + + return contentTypes + +def getTypeAndEncoding(filename, types, encodings, defaultType): + p, ext = os.path.splitext(filename) + ext = ext.lower() + if encodings.has_key(ext): + enc = encodings[ext] + ext = os.path.splitext(p)[1].lower() + else: + enc = None + type = types.get(ext, defaultType) + return type, enc + + + +class File(resource.Resource, styles.Versioned, filepath.FilePath): + """ + File is a resource that represents a plain non-interpreted file + (although it can look for an extension like .rpy or .cgi and hand the + file to a processor for interpretation if you wish). Its constructor + takes a file path. + + Alternatively, you can give a directory path to the constructor. In this + case the resource will represent that directory, and its children will + be files underneath that directory. This provides access to an entire + filesystem tree with a single Resource. + + If you map the URL 'http://server/FILE' to a resource created as + File('/tmp'), then http://server/FILE/ will return an HTML-formatted + listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will + return the contents of /tmp/foo/bar.html . + + @cvar childNotFound: L{Resource} used to render 404 Not Found error pages. + """ + + contentTypes = loadMimeTypes() + + contentEncodings = { + ".gz" : "gzip", + ".bz2": "bzip2" + } + + processors = {} + + indexNames = ["index", "index.html", "index.htm", "index.trp", "index.rpy"] + + type = None + + ### Versioning + + persistenceVersion = 6 + + def upgradeToVersion6(self): + self.ignoredExts = [] + if self.allowExt: + self.ignoreExt("*") + del self.allowExt + + + def upgradeToVersion5(self): + if not isinstance(self.registry, Registry): + self.registry = Registry() + + + def upgradeToVersion4(self): + if not hasattr(self, 'registry'): + self.registry = {} + + + def upgradeToVersion3(self): + if not hasattr(self, 'allowExt'): + self.allowExt = 0 + + + def upgradeToVersion2(self): + self.defaultType = "text/html" + + + def upgradeToVersion1(self): + if hasattr(self, 'indexName'): + self.indexNames = [self.indexName] + del self.indexName + + + def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0): + """ + Create a file with the given path. + + @param path: The filename of the file from which this L{File} will + serve data. + @type path: C{str} + + @param defaultType: A I{major/minor}-style MIME type specifier + indicating the I{Content-Type} with which this L{File}'s data + will be served if a MIME type cannot be determined based on + C{path}'s extension. + @type defaultType: C{str} + + @param ignoredExts: A sequence giving the extensions of paths in the + filesystem which will be ignored for the purposes of child + lookup. For example, if C{ignoredExts} is C{(".bar",)} and + C{path} is a directory containing a file named C{"foo.bar"}, a + request for the C{"foo"} child of this resource will succeed + with a L{File} pointing to C{"foo.bar"}. + + @param registry: The registry object being used to handle this + request. If C{None}, one will be created. + @type registry: L{Registry} + + @param allowExt: Ignored parameter, only present for backwards + compatibility. Do not pass a value for this parameter. + """ + resource.Resource.__init__(self) + filepath.FilePath.__init__(self, path) + self.defaultType = defaultType + if ignoredExts in (0, 1) or allowExt: + warnings.warn("ignoredExts should receive a list, not a boolean") + if ignoredExts or allowExt: + self.ignoredExts = ['*'] + else: + self.ignoredExts = [] + else: + self.ignoredExts = list(ignoredExts) + self.registry = registry or Registry() + + + def ignoreExt(self, ext): + """Ignore the given extension. + + Serve file.ext if file is requested + """ + self.ignoredExts.append(ext) + + childNotFound = resource.NoResource("File not found.") + + def directoryListing(self): + return DirectoryLister(self.path, + self.listNames(), + self.contentTypes, + self.contentEncodings, + self.defaultType) + + + def getChild(self, path, request): + """ + If this L{File}'s path refers to a directory, return a L{File} + referring to the file named C{path} in that directory. + + If C{path} is the empty string, return a L{DirectoryLister} instead. + """ + self.restat(reraise=False) + + if not self.isdir(): + return self.childNotFound + + if path: + try: + fpath = self.child(path) + except filepath.InsecurePath: + return self.childNotFound + else: + fpath = self.childSearchPreauth(*self.indexNames) + if fpath is None: + return self.directoryListing() + + if not fpath.exists(): + fpath = fpath.siblingExtensionSearch(*self.ignoredExts) + if fpath is None: + return self.childNotFound + + if platformType == "win32": + # don't want .RPY to be different than .rpy, since that would allow + # source disclosure. + processor = InsensitiveDict(self.processors).get(fpath.splitext()[1]) + else: + processor = self.processors.get(fpath.splitext()[1]) + if processor: + return resource.IResource(processor(fpath.path, self.registry)) + return self.createSimilarFile(fpath.path) + + + # methods to allow subclasses to e.g. decrypt files on the fly: + def openForReading(self): + """Open a file and return it.""" + return self.open() + + + def getFileSize(self): + """Return file size.""" + return self.getsize() + + + def _parseRangeHeader(self, range): + """ + Parse the value of a Range header into (start, stop) pairs. + + In a given pair, either of start or stop can be None, signifying that + no value was provided, but not both. + + @return: A list C{[(start, stop)]} of pairs of length at least one. + + @raise ValueError: if the header is syntactically invalid or if the + Bytes-Unit is anything other than 'bytes'. + """ + try: + kind, value = range.split('=', 1) + except ValueError: + raise ValueError("Missing '=' separator") + kind = kind.strip() + if kind != 'bytes': + raise ValueError("Unsupported Bytes-Unit: %r" % (kind,)) + unparsedRanges = filter(None, map(str.strip, value.split(','))) + parsedRanges = [] + for byteRange in unparsedRanges: + try: + start, end = byteRange.split('-', 1) + except ValueError: + raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) + if start: + try: + start = int(start) + except ValueError: + raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) + else: + start = None + if end: + try: + end = int(end) + except ValueError: + raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) + else: + end = None + if start is not None: + if end is not None and start > end: + # Start must be less than or equal to end or it is invalid. + raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) + elif end is None: + # One or both of start and end must be specified. Omitting + # both is invalid. + raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) + parsedRanges.append((start, end)) + return parsedRanges + + + def _rangeToOffsetAndSize(self, start, end): + """ + Convert a start and end from a Range header to an offset and size. + + This method checks that the resulting range overlaps with the resource + being served (and so has the value of C{getFileSize()} as an indirect + input). + + Either but not both of start or end can be C{None}: + + - Omitted start means that the end value is actually a start value + relative to the end of the resource. + + - Omitted end means the end of the resource should be the end of + the range. + + End is interpreted as inclusive, as per RFC 2616. + + If this range doesn't overlap with any of this resource, C{(0, 0)} is + returned, which is not otherwise a value return value. + + @param start: The start value from the header, or C{None} if one was + not present. + @param end: The end value from the header, or C{None} if one was not + present. + @return: C{(offset, size)} where offset is how far into this resource + this resource the range begins and size is how long the range is, + or C{(0, 0)} if the range does not overlap this resource. + """ + size = self.getFileSize() + if start is None: + start = size - end + end = size + elif end is None: + end = size + elif end < size: + end += 1 + elif end > size: + end = size + if start >= size: + start = end = 0 + return start, (end - start) + + + def _contentRange(self, offset, size): + """ + Return a string suitable for the value of a Content-Range header for a + range with the given offset and size. + + The offset and size are not sanity checked in any way. + + @param offset: How far into this resource the range begins. + @param size: How long the range is. + @return: The value as appropriate for the value of a Content-Range + header. + """ + return 'bytes %d-%d/%d' % ( + offset, offset + size - 1, self.getFileSize()) + + + def _doSingleRangeRequest(self, request, (start, end)): + """ + Set up the response for Range headers that specify a single range. + + This method checks if the request is satisfiable and sets the response + code and Content-Range header appropriately. The return value + indicates which part of the resource to return. + + @param request: The Request object. + @param start: The start of the byte range as specified by the header. + @param end: The end of the byte range as specified by the header. At + most one of C{start} and C{end} may be C{None}. + @return: A 2-tuple of the offset and size of the range to return. + offset == size == 0 indicates that the request is not satisfiable. + """ + offset, size = self._rangeToOffsetAndSize(start, end) + if offset == size == 0: + # This range doesn't overlap with any of this resource, so the + # request is unsatisfiable. + request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) + request.setHeader( + 'content-range', 'bytes */%d' % (self.getFileSize(),)) + else: + request.setResponseCode(http.PARTIAL_CONTENT) + request.setHeader( + 'content-range', self._contentRange(offset, size)) + return offset, size + + + def _doMultipleRangeRequest(self, request, byteRanges): + """ + Set up the response for Range headers that specify a single range. + + This method checks if the request is satisfiable and sets the response + code and Content-Type and Content-Length headers appropriately. The + return value, which is a little complicated, indicates which parts of + the resource to return and the boundaries that should separate the + parts. + + In detail, the return value is a tuple rangeInfo C{rangeInfo} is a + list of 3-tuples C{(partSeparator, partOffset, partSize)}. The + response to this request should be, for each element of C{rangeInfo}, + C{partSeparator} followed by C{partSize} bytes of the resource + starting at C{partOffset}. Each C{partSeparator} includes the + MIME-style boundary and the part-specific Content-type and + Content-range headers. It is convenient to return the separator as a + concrete string from this method, becasue this method needs to compute + the number of bytes that will make up the response to be able to set + the Content-Length header of the response accurately. + + @param request: The Request object. + @param byteRanges: A list of C{(start, end)} values as specified by + the header. For each range, at most one of C{start} and C{end} + may be C{None}. + @return: See above. + """ + matchingRangeFound = False + rangeInfo = [] + contentLength = 0 + boundary = "%x%x" % (int(time.time()*1000000), os.getpid()) + if self.type: + contentType = self.type + else: + contentType = 'bytes' # It's what Apache does... + for start, end in byteRanges: + partOffset, partSize = self._rangeToOffsetAndSize(start, end) + if partOffset == partSize == 0: + continue + contentLength += partSize + matchingRangeFound = True + partContentRange = self._contentRange(partOffset, partSize) + partSeparator = ( + "\r\n" + "--%s\r\n" + "Content-type: %s\r\n" + "Content-range: %s\r\n" + "\r\n") % (boundary, contentType, partContentRange) + contentLength += len(partSeparator) + rangeInfo.append((partSeparator, partOffset, partSize)) + if not matchingRangeFound: + request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) + request.setHeader( + 'content-length', '0') + request.setHeader( + 'content-range', 'bytes */%d' % (self.getFileSize(),)) + return [], '' + finalBoundary = "\r\n--" + boundary + "--\r\n" + rangeInfo.append((finalBoundary, 0, 0)) + request.setResponseCode(http.PARTIAL_CONTENT) + request.setHeader( + 'content-type', 'multipart/byteranges; boundary="%s"' % (boundary,)) + request.setHeader( + 'content-length', contentLength + len(finalBoundary)) + return rangeInfo + + + def _setContentHeaders(self, request, size=None): + """ + Set the Content-length and Content-type headers for this request. + + This method is not appropriate for requests for multiple byte ranges; + L{_doMultipleRangeRequest} will set these headers in that case. + + @param request: The L{Request} object. + @param size: The size of the response. If not specified, default to + C{self.getFileSize()}. + """ + if size is None: + size = self.getFileSize() + request.setHeader('content-length', str(size)) + if self.type: + request.setHeader('content-type', self.type) + if self.encoding: + request.setHeader('content-encoding', self.encoding) + + + def makeProducer(self, request, fileForReading): + """ + Make a L{StaticProducer} that will produce the body of this response. + + This method will also set the response code and Content-* headers. + + @param request: The L{Request} object. + @param fileForReading: The file object containing the resource. + @return: A L{StaticProducer}. Calling C{.start()} on this will begin + producing the response. + """ + byteRange = request.getHeader('range') + if byteRange is None: + self._setContentHeaders(request) + request.setResponseCode(http.OK) + return NoRangeStaticProducer(request, fileForReading) + try: + parsedRanges = self._parseRangeHeader(byteRange) + except ValueError: + log.msg("Ignoring malformed Range header %r" % (byteRange,)) + self._setContentHeaders(request) + request.setResponseCode(http.OK) + return NoRangeStaticProducer(request, fileForReading) + + if len(parsedRanges) == 1: + offset, size = self._doSingleRangeRequest( + request, parsedRanges[0]) + self._setContentHeaders(request, size) + return SingleRangeStaticProducer( + request, fileForReading, offset, size) + else: + rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) + return MultipleRangeStaticProducer( + request, fileForReading, rangeInfo) + + + def render_GET(self, request): + """ + Begin sending the contents of this L{File} (or a subset of the + contents, based on the 'range' header) to the given request. + """ + self.restat(False) + + if self.type is None: + self.type, self.encoding = getTypeAndEncoding(self.basename(), + self.contentTypes, + self.contentEncodings, + self.defaultType) + + if not self.exists(): + return self.childNotFound.render(request) + + if self.isdir(): + return self.redirect(request) + + request.setHeader('accept-ranges', 'bytes') + + try: + fileForReading = self.openForReading() + except IOError, e: + import errno + if e[0] == errno.EACCES: + return resource.ForbiddenResource().render(request) + else: + raise + + if request.setLastModified(self.getmtime()) is http.CACHED: + return '' + + + producer = self.makeProducer(request, fileForReading) + + if request.method == 'HEAD': + return '' + + producer.start() + # and make sure the connection doesn't get closed + return server.NOT_DONE_YET + render_HEAD = render_GET + + + def redirect(self, request): + return redirectTo(addSlash(request), request) + + + def listNames(self): + if not self.isdir(): + return [] + directory = self.listdir() + directory.sort() + return directory + + def listEntities(self): + return map(lambda fileName, self=self: self.createSimilarFile(os.path.join(self.path, fileName)), self.listNames()) + + + def createPickleChild(self, name, child): + warnings.warn( + "File.createPickleChild is deprecated since Twisted 9.0. " + "Resource persistence is beyond the scope of Twisted Web.", + DeprecationWarning, stacklevel=2) + + if not os.path.isdir(self.path): + resource.Resource.putChild(self, name, child) + # xxx use a file-extension-to-save-function dictionary instead + if type(child) == type(""): + fl = open(os.path.join(self.path, name), 'wb') + fl.write(child) + else: + if '.' not in name: + name = name + '.trp' + fl = open(os.path.join(self.path, name), 'wb') + from pickle import Pickler + pk = Pickler(fl) + pk.dump(child) + fl.close() + + + def createSimilarFile(self, path): + f = self.__class__(path, self.defaultType, self.ignoredExts, self.registry) + # refactoring by steps, here - constructor should almost certainly take these + f.processors = self.processors + f.indexNames = self.indexNames[:] + f.childNotFound = self.childNotFound + return f + + + +class StaticProducer(object): + """ + Superclass for classes that implement the business of producing. + + @ivar request: The L{IRequest} to write the contents of the file to. + @ivar fileObject: The file the contents of which to write to the request. + """ + + implements(interfaces.IPullProducer) + + bufferSize = abstract.FileDescriptor.bufferSize + + + def __init__(self, request, fileObject): + """ + Initialize the instance. + """ + self.request = request + self.fileObject = fileObject + + + def start(self): + raise NotImplementedError(self.start) + + + def resumeProducing(self): + raise NotImplementedError(self.resumeProducing) + + + def stopProducing(self): + """ + Stop producing data. + + L{IPullProducer.stopProducing} is called when our consumer has died, + and subclasses also call this method when they are done producing + data. + """ + self.fileObject.close() + self.request = None + + + +class NoRangeStaticProducer(StaticProducer): + """ + A L{StaticProducer} that writes the entire file to the request. + """ + + def start(self): + self.request.registerProducer(self, False) + + + def resumeProducing(self): + if not self.request: + return + data = self.fileObject.read(self.bufferSize) + if data: + # this .write will spin the reactor, calling .doWrite and then + # .resumeProducing again, so be prepared for a re-entrant call + self.request.write(data) + else: + self.request.unregisterProducer() + self.request.finish() + self.stopProducing() + + + +class SingleRangeStaticProducer(StaticProducer): + """ + A L{StaticProducer} that writes a single chunk of a file to the request. + """ + + def __init__(self, request, fileObject, offset, size): + """ + Initialize the instance. + + @param request: See L{StaticProducer}. + @param fileObject: See L{StaticProducer}. + @param offset: The offset into the file of the chunk to be written. + @param size: The size of the chunk to write. + """ + StaticProducer.__init__(self, request, fileObject) + self.offset = offset + self.size = size + + + def start(self): + self.fileObject.seek(self.offset) + self.bytesWritten = 0 + self.request.registerProducer(self, 0) + + + def resumeProducing(self): + if not self.request: + return + data = self.fileObject.read( + min(self.bufferSize, self.size - self.bytesWritten)) + if data: + self.bytesWritten += len(data) + # this .write will spin the reactor, calling .doWrite and then + # .resumeProducing again, so be prepared for a re-entrant call + self.request.write(data) + if self.request and self.bytesWritten == self.size: + self.request.unregisterProducer() + self.request.finish() + self.stopProducing() + + + +class MultipleRangeStaticProducer(StaticProducer): + """ + A L{StaticProducer} that writes several chunks of a file to the request. + """ + + def __init__(self, request, fileObject, rangeInfo): + """ + Initialize the instance. + + @param request: See L{StaticProducer}. + @param fileObject: See L{StaticProducer}. + @param rangeInfo: A list of tuples C{[(boundary, offset, size)]} + where: + - C{boundary} will be written to the request first. + - C{offset} the offset into the file of chunk to write. + - C{size} the size of the chunk to write. + """ + StaticProducer.__init__(self, request, fileObject) + self.rangeInfo = rangeInfo + + + def start(self): + self.rangeIter = iter(self.rangeInfo) + self._nextRange() + self.request.registerProducer(self, 0) + + + def _nextRange(self): + self.partBoundary, partOffset, self._partSize = self.rangeIter.next() + self._partBytesWritten = 0 + self.fileObject.seek(partOffset) + + + def resumeProducing(self): + if not self.request: + return + data = [] + dataLength = 0 + done = False + while dataLength < self.bufferSize: + if self.partBoundary: + dataLength += len(self.partBoundary) + data.append(self.partBoundary) + self.partBoundary = None + p = self.fileObject.read( + min(self.bufferSize - dataLength, + self._partSize - self._partBytesWritten)) + self._partBytesWritten += len(p) + dataLength += len(p) + data.append(p) + if self.request and self._partBytesWritten == self._partSize: + try: + self._nextRange() + except StopIteration: + done = True + break + self.request.write(''.join(data)) + if done: + self.request.unregisterProducer() + self.request.finish() + self.request = None + + +class FileTransfer(pb.Viewable): + """ + A class to represent the transfer of a file over the network. + """ + request = None + + def __init__(self, file, size, request): + warnings.warn( + "FileTransfer is deprecated since Twisted 9.0. " + "Use a subclass of StaticProducer instead.", + DeprecationWarning, stacklevel=2) + self.file = file + self.size = size + self.request = request + self.written = self.file.tell() + request.registerProducer(self, 0) + + def resumeProducing(self): + if not self.request: + return + data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written)) + if data: + self.written += len(data) + # this .write will spin the reactor, calling .doWrite and then + # .resumeProducing again, so be prepared for a re-entrant call + self.request.write(data) + if self.request and self.file.tell() == self.size: + self.request.unregisterProducer() + self.request.finish() + self.request = None + + def pauseProducing(self): + pass + + def stopProducing(self): + self.file.close() + self.request = None + + # Remotely relay producer interface. + + def view_resumeProducing(self, issuer): + self.resumeProducing() + + def view_pauseProducing(self, issuer): + self.pauseProducing() + + def view_stopProducing(self, issuer): + self.stopProducing() + + + +class ASISProcessor(resource.Resource): + """ + Serve files exactly as responses without generating a status-line or any + headers. Inspired by Apache's mod_asis. + """ + + def __init__(self, path, registry=None): + resource.Resource.__init__(self) + self.path = path + self.registry = registry or Registry() + + + def render(self, request): + request.startedWriting = 1 + res = File(self.path, registry=self.registry) + return res.render(request) + + + +def formatFileSize(size): + """ + Format the given file size in bytes to human readable format. + """ + if size < 1024: + return '%iB' % size + elif size < (1024 ** 2): + return '%iK' % (size / 1024) + elif size < (1024 ** 3): + return '%iM' % (size / (1024 ** 2)) + else: + return '%iG' % (size / (1024 ** 3)) + + + +class DirectoryLister(resource.Resource): + """ + Print the content of a directory. + + @ivar template: page template used to render the content of the directory. + It must contain the format keys B{header} and B{tableContent}. + @type template: C{str} + + @ivar linePattern: template used to render one line in the listing table. + It must contain the format keys B{class}, B{href}, B{text}, B{size}, + B{type} and B{encoding}. + @type linePattern: C{str} + + @ivar contentEncodings: a mapping of extensions to encoding types. + @type contentEncodings: C{dict} + + @ivar defaultType: default type used when no mimetype is detected. + @type defaultType: C{str} + + @ivar dirs: filtered content of C{path}, if the whole content should not be + displayed (default to C{None}, which means the actual content of + C{path} is printed). + @type dirs: C{NoneType} or C{list} + + @ivar path: directory which content should be listed. + @type path: C{str} + """ + + template = """ + +%(header)s + + + + +

                              %(header)s

                              + + + + + + + + + + + +%(tableContent)s + +
                              FilenameSizeContent typeContent encoding
                              + + + +""" + + linePattern = """ + %(text)s + %(size)s + %(type)s + %(encoding)s + +""" + + def __init__(self, pathname, dirs=None, + contentTypes=File.contentTypes, + contentEncodings=File.contentEncodings, + defaultType='text/html'): + resource.Resource.__init__(self) + self.contentTypes = contentTypes + self.contentEncodings = contentEncodings + self.defaultType = defaultType + # dirs allows usage of the File to specify what gets listed + self.dirs = dirs + self.path = pathname + + + def _getFilesAndDirectories(self, directory): + """ + Helper returning files and directories in given directory listing, with + attributes to be used to build a table content with + C{self.linePattern}. + + @return: tuple of (directories, files) + @rtype: C{tuple} of C{list} + """ + files = [] + dirs = [] + for path in directory: + url = urllib.quote(path, "/") + escapedPath = cgi.escape(path) + if os.path.isdir(os.path.join(self.path, path)): + url = url + '/' + dirs.append({'text': escapedPath + "/", 'href': url, + 'size': '', 'type': '[Directory]', + 'encoding': ''}) + else: + mimetype, encoding = getTypeAndEncoding(path, self.contentTypes, + self.contentEncodings, + self.defaultType) + try: + size = os.stat(os.path.join(self.path, path)).st_size + except OSError: + continue + files.append({ + 'text': escapedPath, "href": url, + 'type': '[%s]' % mimetype, + 'encoding': (encoding and '[%s]' % encoding or ''), + 'size': formatFileSize(size)}) + return dirs, files + + + def _buildTableContent(self, elements): + """ + Build a table content using C{self.linePattern} and giving elements odd + and even classes. + """ + tableContent = [] + rowClasses = itertools.cycle(['odd', 'even']) + for element, rowClass in zip(elements, rowClasses): + element["class"] = rowClass + tableContent.append(self.linePattern % element) + return tableContent + + + def render(self, request): + """ + Render a listing of the content of C{self.path}. + """ + if self.dirs is None: + directory = os.listdir(self.path) + directory.sort() + else: + directory = self.dirs + + dirs, files = self._getFilesAndDirectories(directory) + + tableContent = "".join(self._buildTableContent(dirs + files)) + + header = "Directory listing for %s" % ( + cgi.escape(urllib.unquote(request.uri)),) + + return self.template % {"header": header, "tableContent": tableContent} + + + def __repr__(self): + return '' % self.path + + __str__ = __repr__ diff --git a/vendor/Twisted-10.0.0/twisted/web/sux.py b/vendor/Twisted-10.0.0/twisted/web/sux.py new file mode 100644 index 000000000000..6f8fea1dc3b1 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/sux.py @@ -0,0 +1,657 @@ +# -*- test-case-name: twisted.web.test.test_xml -*- +# +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +*S*mall, *U*ncomplicated *X*ML. + +This is a very simple implementation of XML/HTML as a network +protocol. It is not at all clever. Its main features are that it +does not: + + - support namespaces + - mung mnemonic entity references + - validate + - perform *any* external actions (such as fetching URLs or writing files) + under *any* circumstances + - has lots and lots of horrible hacks for supporting broken HTML (as an + option, they're not on by default). +""" + +from twisted.internet.protocol import Protocol, FileWrapper +from twisted.python.reflect import prefixedMethodNames + + + +# Elements of the three-tuples in the state table. +BEGIN_HANDLER = 0 +DO_HANDLER = 1 +END_HANDLER = 2 + +identChars = '.-_:' +lenientIdentChars = identChars + ';+#/%~' + +def nop(*args, **kw): + "Do nothing." + + +def unionlist(*args): + l = [] + for x in args: + l.extend(x) + d = dict([(x, 1) for x in l]) + return d.keys() + + +def zipfndict(*args, **kw): + default = kw.get('default', nop) + d = {} + for key in unionlist(*[fndict.keys() for fndict in args]): + d[key] = tuple([x.get(key, default) for x in args]) + return d + + +def prefixedMethodClassDict(clazz, prefix): + return dict([(name, getattr(clazz, prefix + name)) for name in prefixedMethodNames(clazz, prefix)]) + + +def prefixedMethodObjDict(obj, prefix): + return dict([(name, getattr(obj, prefix + name)) for name in prefixedMethodNames(obj.__class__, prefix)]) + + +class ParseError(Exception): + + def __init__(self, filename, line, col, message): + self.filename = filename + self.line = line + self.col = col + self.message = message + + def __str__(self): + return "%s:%s:%s: %s" % (self.filename, self.line, self.col, + self.message) + +class XMLParser(Protocol): + + state = None + encodings = None + filename = "" + beExtremelyLenient = 0 + _prepend = None + + # _leadingBodyData will sometimes be set before switching to the + # 'bodydata' state, when we "accidentally" read a byte of bodydata + # in a different state. + _leadingBodyData = None + + def connectionMade(self): + self.lineno = 1 + self.colno = 0 + self.encodings = [] + + def saveMark(self): + '''Get the line number and column of the last character parsed''' + # This gets replaced during dataReceived, restored afterwards + return (self.lineno, self.colno) + + def _parseError(self, message): + raise ParseError(*((self.filename,)+self.saveMark()+(message,))) + + def _buildStateTable(self): + '''Return a dictionary of begin, do, end state function tuples''' + # _buildStateTable leaves something to be desired but it does what it + # does.. probably slowly, so I'm doing some evil caching so it doesn't + # get called more than once per class. + stateTable = getattr(self.__class__, '__stateTable', None) + if stateTable is None: + stateTable = self.__class__.__stateTable = zipfndict( + *[prefixedMethodObjDict(self, prefix) + for prefix in ('begin_', 'do_', 'end_')]) + return stateTable + + def _decode(self, data): + if 'UTF-16' in self.encodings or 'UCS-2' in self.encodings: + assert not len(data) & 1, 'UTF-16 must come in pairs for now' + if self._prepend: + data = self._prepend + data + for encoding in self.encodings: + data = unicode(data, encoding) + return data + + def maybeBodyData(self): + if self.endtag: + return 'bodydata' + + # Get ready for fun! We're going to allow + # to work! + # We do this by making everything between a Text + # BUT + # -radix + + if (self.tagName == 'script' + and not self.tagAttributes.has_key('src')): + # we do this ourselves rather than having begin_waitforendscript + # becuase that can get called multiple times and we don't want + # bodydata to get reset other than the first time. + self.begin_bodydata(None) + return 'waitforendscript' + return 'bodydata' + + + + def dataReceived(self, data): + stateTable = self._buildStateTable() + if not self.state: + # all UTF-16 starts with this string + if data.startswith('\xff\xfe'): + self._prepend = '\xff\xfe' + self.encodings.append('UTF-16') + data = data[2:] + elif data.startswith('\xfe\xff'): + self._prepend = '\xfe\xff' + self.encodings.append('UTF-16') + data = data[2:] + self.state = 'begin' + if self.encodings: + data = self._decode(data) + # bring state, lineno, colno into local scope + lineno, colno = self.lineno, self.colno + curState = self.state + # replace saveMark with a nested scope function + _saveMark = self.saveMark + def saveMark(): + return (lineno, colno) + self.saveMark = saveMark + # fetch functions from the stateTable + beginFn, doFn, endFn = stateTable[curState] + try: + for byte in data: + # do newline stuff + if byte == '\n': + lineno += 1 + colno = 0 + else: + colno += 1 + newState = doFn(byte) + if newState is not None and newState != curState: + # this is the endFn from the previous state + endFn() + curState = newState + beginFn, doFn, endFn = stateTable[curState] + beginFn(byte) + finally: + self.saveMark = _saveMark + self.lineno, self.colno = lineno, colno + # state doesn't make sense if there's an exception.. + self.state = curState + + + def connectionLost(self, reason): + """ + End the last state we were in. + """ + stateTable = self._buildStateTable() + stateTable[self.state][END_HANDLER]() + + + # state methods + + def do_begin(self, byte): + if byte.isspace(): + return + if byte != '<': + if self.beExtremelyLenient: + self._leadingBodyData = byte + return 'bodydata' + self._parseError("First char of document [%r] wasn't <" % (byte,)) + return 'tagstart' + + def begin_comment(self, byte): + self.commentbuf = '' + + def do_comment(self, byte): + self.commentbuf += byte + if self.commentbuf.endswith('-->'): + self.gotComment(self.commentbuf[:-3]) + return 'bodydata' + + def begin_tagstart(self, byte): + self.tagName = '' # name of the tag + self.tagAttributes = {} # attributes of the tag + self.termtag = 0 # is the tag self-terminating + self.endtag = 0 + + def do_tagstart(self, byte): + if byte.isalnum() or byte in identChars: + self.tagName += byte + if self.tagName == '!--': + return 'comment' + elif byte.isspace(): + if self.tagName: + if self.endtag: + # properly strict thing to do here is probably to only + # accept whitespace + return 'waitforgt' + return 'attrs' + else: + self._parseError("Whitespace before tag-name") + elif byte == '>': + if self.endtag: + self.gotTagEnd(self.tagName) + return 'bodydata' + else: + self.gotTagStart(self.tagName, {}) + return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData() + elif byte == '/': + if self.tagName: + return 'afterslash' + else: + self.endtag = 1 + elif byte in '!?': + if self.tagName: + if not self.beExtremelyLenient: + self._parseError("Invalid character in tag-name") + else: + self.tagName += byte + self.termtag = 1 + elif byte == '[': + if self.tagName == '!': + return 'expectcdata' + else: + self._parseError("Invalid '[' in tag-name") + else: + if self.beExtremelyLenient: + self.bodydata = '<' + return 'unentity' + self._parseError('Invalid tag character: %r'% byte) + + def begin_unentity(self, byte): + self.bodydata += byte + + def do_unentity(self, byte): + self.bodydata += byte + return 'bodydata' + + def end_unentity(self): + self.gotText(self.bodydata) + + def begin_expectcdata(self, byte): + self.cdatabuf = byte + + def do_expectcdata(self, byte): + self.cdatabuf += byte + cdb = self.cdatabuf + cd = '[CDATA[' + if len(cd) > len(cdb): + if cd.startswith(cdb): + return + elif self.beExtremelyLenient: + ## WHAT THE CRAP!? MSWord9 generates HTML that includes these + ## bizarre chunks, so I've gotta ignore + ## 'em as best I can. this should really be a separate parse + ## state but I don't even have any idea what these _are_. + return 'waitforgt' + else: + self._parseError("Mal-formed CDATA header") + if cd == cdb: + self.cdatabuf = '' + return 'cdata' + self._parseError("Mal-formed CDATA header") + + def do_cdata(self, byte): + self.cdatabuf += byte + if self.cdatabuf.endswith("]]>"): + self.cdatabuf = self.cdatabuf[:-3] + return 'bodydata' + + def end_cdata(self): + self.gotCData(self.cdatabuf) + self.cdatabuf = '' + + def do_attrs(self, byte): + if byte.isalnum() or byte in identChars: + # XXX FIXME really handle !DOCTYPE at some point + if self.tagName == '!DOCTYPE': + return 'doctype' + if self.tagName[0] in '!?': + return 'waitforgt' + return 'attrname' + elif byte.isspace(): + return + elif byte == '>': + self.gotTagStart(self.tagName, self.tagAttributes) + return (not self.beExtremelyLenient) and 'bodydata' or self.maybeBodyData() + elif byte == '/': + return 'afterslash' + elif self.beExtremelyLenient: + # discard and move on? Only case I've seen of this so far was: + # + return + self._parseError("Unexpected character: %r" % byte) + + def begin_doctype(self, byte): + self.doctype = byte + + def do_doctype(self, byte): + if byte == '>': + return 'bodydata' + self.doctype += byte + + def end_doctype(self): + self.gotDoctype(self.doctype) + self.doctype = None + + def do_waitforgt(self, byte): + if byte == '>': + if self.endtag or not self.beExtremelyLenient: + return 'bodydata' + return self.maybeBodyData() + + def begin_attrname(self, byte): + self.attrname = byte + self._attrname_termtag = 0 + + def do_attrname(self, byte): + if byte.isalnum() or byte in identChars: + self.attrname += byte + return + elif byte == '=': + return 'beforeattrval' + elif byte.isspace(): + return 'beforeeq' + elif self.beExtremelyLenient: + if byte in '"\'': + return 'attrval' + if byte in lenientIdentChars or byte.isalnum(): + self.attrname += byte + return + if byte == '/': + self._attrname_termtag = 1 + return + if byte == '>': + self.attrval = 'True' + self.tagAttributes[self.attrname] = self.attrval + self.gotTagStart(self.tagName, self.tagAttributes) + if self._attrname_termtag: + self.gotTagEnd(self.tagName) + return 'bodydata' + return self.maybeBodyData() + # something is really broken. let's leave this attribute where it + # is and move on to the next thing + return + self._parseError("Invalid attribute name: %r %r" % (self.attrname, byte)) + + def do_beforeattrval(self, byte): + if byte in '"\'': + return 'attrval' + elif byte.isspace(): + return + elif self.beExtremelyLenient: + if byte in lenientIdentChars or byte.isalnum(): + return 'messyattr' + if byte == '>': + self.attrval = 'True' + self.tagAttributes[self.attrname] = self.attrval + self.gotTagStart(self.tagName, self.tagAttributes) + return self.maybeBodyData() + if byte == '\\': + # I saw this in actual HTML once: + # SM + return + self._parseError("Invalid initial attribute value: %r; Attribute values must be quoted." % byte) + + attrname = '' + attrval = '' + + def begin_beforeeq(self,byte): + self._beforeeq_termtag = 0 + + def do_beforeeq(self, byte): + if byte == '=': + return 'beforeattrval' + elif byte.isspace(): + return + elif self.beExtremelyLenient: + if byte.isalnum() or byte in identChars: + self.attrval = 'True' + self.tagAttributes[self.attrname] = self.attrval + return 'attrname' + elif byte == '>': + self.attrval = 'True' + self.tagAttributes[self.attrname] = self.attrval + self.gotTagStart(self.tagName, self.tagAttributes) + if self._beforeeq_termtag: + self.gotTagEnd(self.tagName) + return 'bodydata' + return self.maybeBodyData() + elif byte == '/': + self._beforeeq_termtag = 1 + return + self._parseError("Invalid attribute") + + def begin_attrval(self, byte): + self.quotetype = byte + self.attrval = '' + + def do_attrval(self, byte): + if byte == self.quotetype: + return 'attrs' + self.attrval += byte + + def end_attrval(self): + self.tagAttributes[self.attrname] = self.attrval + self.attrname = self.attrval = '' + + def begin_messyattr(self, byte): + self.attrval = byte + + def do_messyattr(self, byte): + if byte.isspace(): + return 'attrs' + elif byte == '>': + endTag = 0 + if self.attrval.endswith('/'): + endTag = 1 + self.attrval = self.attrval[:-1] + self.tagAttributes[self.attrname] = self.attrval + self.gotTagStart(self.tagName, self.tagAttributes) + if endTag: + self.gotTagEnd(self.tagName) + return 'bodydata' + return self.maybeBodyData() + else: + self.attrval += byte + + def end_messyattr(self): + if self.attrval: + self.tagAttributes[self.attrname] = self.attrval + + def begin_afterslash(self, byte): + self._after_slash_closed = 0 + + def do_afterslash(self, byte): + # this state is only after a self-terminating slash, e.g. + if self._after_slash_closed: + self._parseError("Mal-formed")#XXX When does this happen?? + if byte != '>': + if self.beExtremelyLenient: + return + else: + self._parseError("No data allowed after '/'") + self._after_slash_closed = 1 + self.gotTagStart(self.tagName, self.tagAttributes) + self.gotTagEnd(self.tagName) + # don't need maybeBodyData here because there better not be + # any javascript code after a , we need to + # remember all the data we've been through so we can append it + # to bodydata + self.temptagdata += byte + + # 1 + if byte == '/': + self.endtag = True + elif not self.endtag: + self.bodydata += "<" + self.temptagdata + return 'waitforendscript' + # 2 + elif byte.isalnum() or byte in identChars: + self.tagName += byte + if not 'script'.startswith(self.tagName): + self.bodydata += "<" + self.temptagdata + return 'waitforendscript' + elif self.tagName == 'script': + self.gotText(self.bodydata) + self.gotTagEnd(self.tagName) + return 'waitforgt' + # 3 + elif byte.isspace(): + return 'waitscriptendtag' + # 4 + else: + self.bodydata += "<" + self.temptagdata + return 'waitforendscript' + + + def begin_entityref(self, byte): + self.erefbuf = '' + self.erefextra = '' # extra bit for lenient mode + + def do_entityref(self, byte): + if byte.isspace() or byte == "<": + if self.beExtremelyLenient: + # '&foo' probably was '&foo' + if self.erefbuf and self.erefbuf != "amp": + self.erefextra = self.erefbuf + self.erefbuf = "amp" + if byte == "<": + return "tagstart" + else: + self.erefextra += byte + return 'spacebodydata' + self._parseError("Bad entity reference") + elif byte != ';': + self.erefbuf += byte + else: + return 'bodydata' + + def end_entityref(self): + self.gotEntityReference(self.erefbuf) + + # hacky support for space after & in entityref in beExtremelyLenient + # state should only happen in that case + def begin_spacebodydata(self, byte): + self.bodydata = self.erefextra + self.erefextra = None + do_spacebodydata = do_bodydata + end_spacebodydata = end_bodydata + + # Sorta SAX-ish API + + def gotTagStart(self, name, attributes): + '''Encountered an opening tag. + + Default behaviour is to print.''' + print 'begin', name, attributes + + def gotText(self, data): + '''Encountered text + + Default behaviour is to print.''' + print 'text:', repr(data) + + def gotEntityReference(self, entityRef): + '''Encountered mnemonic entity reference + + Default behaviour is to print.''' + print 'entityRef: &%s;' % entityRef + + def gotComment(self, comment): + '''Encountered comment. + + Default behaviour is to ignore.''' + pass + + def gotCData(self, cdata): + '''Encountered CDATA + + Default behaviour is to call the gotText method''' + self.gotText(cdata) + + def gotDoctype(self, doctype): + """Encountered DOCTYPE + + This is really grotty: it basically just gives you everything between + '' as an argument. + """ + print '!DOCTYPE', repr(doctype) + + def gotTagEnd(self, name): + '''Encountered closing tag + + Default behaviour is to print.''' + print 'end', name + +if __name__ == '__main__': + from cStringIO import StringIO + testDocument = ''' + + + + + A + + boz &zop; + + + ''' + x = XMLParser() + x.makeConnection(FileWrapper(StringIO())) + # fn = "/home/glyph/Projects/Twisted/doc/howto/ipc10paper.html" + fn = "/home/glyph/gruesome.xml" + # testDocument = open(fn).read() + x.dataReceived(testDocument) diff --git a/vendor/Twisted-10.0.0/twisted/web/tap.py b/vendor/Twisted-10.0.0/twisted/web/tap.py new file mode 100644 index 000000000000..bd3b4079b23a --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/tap.py @@ -0,0 +1,234 @@ +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Support for creating a service which runs a web server. +""" + +import os + +# Twisted Imports +from twisted.web import server, static, twcgi, script, demo, distrib, wsgi +from twisted.internet import interfaces, reactor +from twisted.python import usage, reflect, threadpool +from twisted.spread import pb +from twisted.application import internet, service, strports + + +class Options(usage.Options): + """ + Define the options accepted by the I{twistd web} plugin. + """ + synopsis = "[web options]" + + optParameters = [["port", "p", None, "strports description of the port to " + "start the server on."], + ["logfile", "l", None, "Path to web CLF (Combined Log Format) log file."], + ["https", None, None, "Port to listen on for Secure HTTP."], + ["certificate", "c", "server.pem", "SSL certificate to use for HTTPS. "], + ["privkey", "k", "server.pem", "SSL certificate to use for HTTPS."], + ] + + optFlags = [["personal", "", + "Instead of generating a webserver, generate a " + "ResourcePublisher which listens on the port given by " + "--port, or ~/%s " % (distrib.UserDirectory.userSocketName,) + + "if --port is not specified."], + ["notracebacks", "n", "Do not display tracebacks in broken web pages. " + + "Displaying tracebacks to users may be security risk!"], + ] + + zsh_actions = {"logfile" : "_files -g '*.log'", "certificate" : "_files -g '*.pem'", + "privkey" : "_files -g '*.pem'"} + + + longdesc = """\ +This starts a webserver. If you specify no arguments, it will be a +demo webserver that has the Test class from twisted.web.demo in it.""" + + def __init__(self): + usage.Options.__init__(self) + self['indexes'] = [] + self['root'] = None + + def opt_index(self, indexName): + """ + Add the name of a file used to check for directory indexes. + [default: index, index.html] + """ + self['indexes'].append(indexName) + + opt_i = opt_index + + def opt_user(self): + """ + Makes a server with ~/public_html and ~/.twistd-web-pb support for + users. + """ + self['root'] = distrib.UserDirectory() + + opt_u = opt_user + + def opt_path(self, path): + """ + is either a specific file or a directory to be set as the root + of the web server. Use this if you have a directory full of HTML, cgi, + php3, epy, or rpy files or any other files that you want to be served + up raw. + """ + def trp(*args, **kwargs): + # Help avoid actually importing twisted.web.trp until it is really + # needed. This avoids getting a deprecation warning if you're not + # using deprecated functionality. + from twisted.web import trp + return trp.ResourceUnpickler(*args, **kwargs) + + self['root'] = static.File(os.path.abspath(path)) + self['root'].processors = { + '.cgi': twcgi.CGIScript, + '.php3': twcgi.PHP3Script, + '.php': twcgi.PHPScript, + '.epy': script.PythonScript, + '.rpy': script.ResourceScript, + '.trp': trp, + } + + def opt_processor(self, proc): + """ + `ext=class' where `class' is added as a Processor for files ending + with `ext'. + """ + if not isinstance(self['root'], static.File): + raise usage.UsageError("You can only use --processor after --path.") + ext, klass = proc.split('=', 1) + self['root'].processors[ext] = reflect.namedClass(klass) + + def opt_class(self, className): + """ + Create a Resource subclass with a zero-argument constructor. + """ + classObj = reflect.namedClass(className) + self['root'] = classObj() + + + def opt_resource_script(self, name): + """ + An .rpy file to be used as the root resource of the webserver. + """ + self['root'] = script.ResourceScriptWrapper(name) + + + def opt_wsgi(self, name): + """ + The FQPN of a WSGI application object to serve as the root resource of + the webserver. + """ + pool = threadpool.ThreadPool() + reactor.callWhenRunning(pool.start) + reactor.addSystemEventTrigger('after', 'shutdown', pool.stop) + try: + application = reflect.namedAny(name) + except (AttributeError, ValueError): + raise usage.UsageError("No such WSGI application: %r" % (name,)) + self['root'] = wsgi.WSGIResource(reactor, pool, application) + + + def opt_mime_type(self, defaultType): + """ + Specify the default mime-type for static files. + """ + if not isinstance(self['root'], static.File): + raise usage.UsageError("You can only use --mime_type after --path.") + self['root'].defaultType = defaultType + opt_m = opt_mime_type + + + def opt_allow_ignore_ext(self): + """ + Specify whether or not a request for 'foo' should return 'foo.ext' + """ + if not isinstance(self['root'], static.File): + raise usage.UsageError("You can only use --allow_ignore_ext " + "after --path.") + self['root'].ignoreExt('*') + + def opt_ignore_ext(self, ext): + """ + Specify an extension to ignore. These will be processed in order. + """ + if not isinstance(self['root'], static.File): + raise usage.UsageError("You can only use --ignore_ext " + "after --path.") + self['root'].ignoreExt(ext) + + def postOptions(self): + """ + Set up conditional defaults and check for dependencies. + + If SSL is not available but an HTTPS server was configured, raise a + L{UsageError} indicating that this is not possible. + + If no server port was supplied, select a default appropriate for the + other options supplied. + """ + if self['https']: + try: + from twisted.internet.ssl import DefaultOpenSSLContextFactory + except ImportError: + raise usage.UsageError("SSL support not installed") + if self['port'] is None: + if self['personal']: + path = os.path.expanduser( + os.path.join('~', distrib.UserDirectory.userSocketName)) + self['port'] = 'unix:' + path + else: + self['port'] = 'tcp:8080' + + + +def makePersonalServerFactory(site): + """ + Create and return a factory which will respond to I{distrib} requests + against the given site. + + @type site: L{twisted.web.server.Site} + @rtype: L{twisted.internet.protocol.Factory} + """ + return pb.PBServerFactory(distrib.ResourcePublisher(site)) + + + +def makeService(config): + s = service.MultiService() + if config['root']: + root = config['root'] + if config['indexes']: + config['root'].indexNames = config['indexes'] + else: + # This really ought to be web.Admin or something + root = demo.Test() + + if isinstance(root, static.File): + root.registry.setComponent(interfaces.IServiceCollection, s) + + if config['logfile']: + site = server.Site(root, logPath=config['logfile']) + else: + site = server.Site(root) + + site.displayTracebacks = not config["notracebacks"] + + if config['personal']: + personal = strports.service( + config['port'], makePersonalServerFactory(site)) + personal.setServiceParent(s) + else: + if config['https']: + from twisted.internet.ssl import DefaultOpenSSLContextFactory + i = internet.SSLServer(int(config['https']), site, + DefaultOpenSSLContextFactory(config['privkey'], + config['certificate'])) + i.setServiceParent(s) + strports.service(config['port'], site).setServiceParent(s) + + return s diff --git a/vendor/Twisted-10.0.0/twisted/web/test/__init__.py b/vendor/Twisted-10.0.0/twisted/web/test/__init__.py new file mode 100644 index 000000000000..23b9105f69f4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web}. +""" + diff --git a/vendor/Twisted-10.0.0/twisted/web/test/_util.py b/vendor/Twisted-10.0.0/twisted/web/test/_util.py new file mode 100644 index 000000000000..c0daa641f79f --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/_util.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +General helpers for L{twisted.web} unit tests. +""" + +from twisted.internet.defer import succeed +from twisted.web import server + + +def _render(resource, request): + result = resource.render(request) + if isinstance(result, str): + request.write(result) + request.finish() + return succeed(None) + elif result is server.NOT_DONE_YET: + if request.finished: + return succeed(None) + else: + return request.notifyFinish() + else: + raise ValueError("Unexpected return value: %r" % (result,)) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_cgi.py b/vendor/Twisted-10.0.0/twisted/web/test/test_cgi.py new file mode 100755 index 000000000000..2da9c18e6597 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_cgi.py @@ -0,0 +1,190 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.twcgi}. +""" + +import sys, os + +from twisted.trial import unittest +from twisted.internet import reactor, interfaces, error +from twisted.python import util, failure +from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR +from twisted.web import client, twcgi, server, resource +from twisted.web.test._util import _render +from twisted.web.test.test_web import DummyRequest + +DUMMY_CGI = '''\ +print "Header: OK" +print +print "cgi output" +''' + +READINPUT_CGI = '''\ +# this is an example of a correctly-written CGI script which reads a body +# from stdin, which only reads env['CONTENT_LENGTH'] bytes. + +import os, sys + +body_length = int(os.environ.get('CONTENT_LENGTH',0)) +indata = sys.stdin.read(body_length) +print "Header: OK" +print +print "readinput ok" +''' + +READALLINPUT_CGI = '''\ +# this is an example of the typical (incorrect) CGI script which expects +# the server to close stdin when the body of the request is complete. +# A correct CGI should only read env['CONTENT_LENGTH'] bytes. + +import sys + +indata = sys.stdin.read() +print "Header: OK" +print +print "readallinput ok" +''' + +class PythonScript(twcgi.FilteredScript): + filter = sys.executable + filters = sys.executable, # web2's version + +class CGI(unittest.TestCase): + """ + Tests for L{twcgi.FilteredScript}. + """ + + if not interfaces.IReactorProcess.providedBy(reactor): + skip = "CGI tests require a functional reactor.spawnProcess()" + + def startServer(self, cgi): + root = resource.Resource() + cgipath = util.sibpath(__file__, cgi) + root.putChild("cgi", PythonScript(cgipath)) + site = server.Site(root) + self.p = reactor.listenTCP(0, site) + return self.p.getHost().port + + def tearDown(self): + if self.p: + return self.p.stopListening() + + + def testCGI(self): + cgiFilename = os.path.abspath(self.mktemp()) + cgiFile = file(cgiFilename, 'wt') + cgiFile.write(DUMMY_CGI) + cgiFile.close() + + portnum = self.startServer(cgiFilename) + d = client.getPage("http://localhost:%d/cgi" % portnum) + d.addCallback(self._testCGI_1) + return d + def _testCGI_1(self, res): + self.failUnlessEqual(res, "cgi output" + os.linesep) + + + def testReadEmptyInput(self): + cgiFilename = os.path.abspath(self.mktemp()) + cgiFile = file(cgiFilename, 'wt') + cgiFile.write(READINPUT_CGI) + cgiFile.close() + + portnum = self.startServer(cgiFilename) + d = client.getPage("http://localhost:%d/cgi" % portnum) + d.addCallback(self._testReadEmptyInput_1) + return d + testReadEmptyInput.timeout = 5 + def _testReadEmptyInput_1(self, res): + self.failUnlessEqual(res, "readinput ok%s" % os.linesep) + + def testReadInput(self): + cgiFilename = os.path.abspath(self.mktemp()) + cgiFile = file(cgiFilename, 'wt') + cgiFile.write(READINPUT_CGI) + cgiFile.close() + + portnum = self.startServer(cgiFilename) + d = client.getPage("http://localhost:%d/cgi" % portnum, + method="POST", + postdata="Here is your stdin") + d.addCallback(self._testReadInput_1) + return d + testReadInput.timeout = 5 + def _testReadInput_1(self, res): + self.failUnlessEqual(res, "readinput ok%s" % os.linesep) + + + def testReadAllInput(self): + cgiFilename = os.path.abspath(self.mktemp()) + cgiFile = file(cgiFilename, 'wt') + cgiFile.write(READALLINPUT_CGI) + cgiFile.close() + + portnum = self.startServer(cgiFilename) + d = client.getPage("http://localhost:%d/cgi" % portnum, + method="POST", + postdata="Here is your stdin") + d.addCallback(self._testReadAllInput_1) + return d + testReadAllInput.timeout = 5 + def _testReadAllInput_1(self, res): + self.failUnlessEqual(res, "readallinput ok%s" % os.linesep) + + + +class CGIDirectoryTests(unittest.TestCase): + """ + Tests for L{twcgi.CGIDirectory}. + """ + def test_render(self): + """ + L{twcgi.CGIDirectory.render} sets the HTTP response code to I{NOT + FOUND}. + """ + resource = twcgi.CGIDirectory(self.mktemp()) + request = DummyRequest(['']) + d = _render(resource, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d + + + def test_notFoundChild(self): + """ + L{twcgi.CGIDirectory.getChild} returns a resource which renders an + response with the HTTP I{NOT FOUND} status code if the indicated child + does not exist as an entry in the directory used to initialized the + L{twcgi.CGIDirectory}. + """ + path = self.mktemp() + os.makedirs(path) + resource = twcgi.CGIDirectory(path) + request = DummyRequest(['foo']) + child = resource.getChild("foo", request) + d = _render(child, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d + + + +class CGIProcessProtocolTests(unittest.TestCase): + """ + Tests for L{twcgi.CGIProcessProtocol}. + """ + def test_prematureEndOfHeaders(self): + """ + If the process communicating with L{CGIProcessProtocol} ends before + finishing writing out headers, the response has I{INTERNAL SERVER + ERROR} as its status code. + """ + request = DummyRequest(['']) + protocol = twcgi.CGIProcessProtocol(request) + protocol.processEnded(failure.Failure(error.ProcessTerminated())) + self.assertEqual(request.responseCode, INTERNAL_SERVER_ERROR) + diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_distrib.py b/vendor/Twisted-10.0.0/twisted/web/test/test_distrib.py new file mode 100755 index 000000000000..d7960d291fd9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_distrib.py @@ -0,0 +1,361 @@ +# Copyright (c) 2008-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.distrib}. +""" + +from os.path import abspath +from xml.dom.minidom import parseString +try: + import pwd +except ImportError: + pwd = None + +from zope.interface.verify import verifyObject + +from twisted.python import log, filepath +from twisted.internet import reactor, defer +from twisted.trial import unittest +from twisted.spread import pb +from twisted.spread.banana import SIZE_LIMIT +from twisted.web import http, distrib, client, resource, static, server +from twisted.web.test.test_web import DummyRequest +from twisted.web.test._util import _render + + +class MySite(server.Site): + def stopFactory(self): + if hasattr(self, "logFile"): + if self.logFile != log.logfile: + self.logFile.close() + del self.logFile + + + +class PBServerFactory(pb.PBServerFactory): + """ + A PB server factory which keeps track of the most recent protocol it + created. + + @ivar proto: L{None} or the L{Broker} instance most recently returned + from C{buildProtocol}. + """ + proto = None + + def buildProtocol(self, addr): + self.proto = pb.PBServerFactory.buildProtocol(self, addr) + return self.proto + + + +class DistribTest(unittest.TestCase): + port1 = None + port2 = None + sub = None + f1 = None + + def tearDown(self): + """ + Clean up all the event sources left behind by either directly by + test methods or indirectly via some distrib API. + """ + dl = [defer.Deferred(), defer.Deferred()] + if self.f1 is not None and self.f1.proto is not None: + self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None)) + else: + dl[0].callback(None) + if self.sub is not None and self.sub.publisher is not None: + self.sub.publisher.broker.notifyOnDisconnect( + lambda: dl[1].callback(None)) + self.sub.publisher.broker.transport.loseConnection() + else: + dl[1].callback(None) + http._logDateTimeStop() + if self.port1 is not None: + dl.append(self.port1.stopListening()) + if self.port2 is not None: + dl.append(self.port2.stopListening()) + return defer.gatherResults(dl) + + + def testDistrib(self): + # site1 is the publisher + r1 = resource.Resource() + r1.putChild("there", static.Data("root", "text/plain")) + site1 = server.Site(r1) + self.f1 = PBServerFactory(distrib.ResourcePublisher(site1)) + self.port1 = reactor.listenTCP(0, self.f1) + self.sub = distrib.ResourceSubscription("127.0.0.1", + self.port1.getHost().port) + r2 = resource.Resource() + r2.putChild("here", self.sub) + f2 = MySite(r2) + self.port2 = reactor.listenTCP(0, f2) + d = client.getPage("http://127.0.0.1:%d/here/there" % \ + self.port2.getHost().port) + d.addCallback(self.failUnlessEqual, 'root') + return d + + + def _requestTest(self, child, **kwargs): + """ + Set up a resource on a distrib site using L{ResourcePublisher} and + then retrieve it from a L{ResourceSubscription} via an HTTP client. + + @param child: The resource to publish using distrib. + @param **kwargs: Extra keyword arguments to pass to L{getPage} when + requesting the resource. + + @return: A L{Deferred} which fires with the result of the request. + """ + distribRoot = resource.Resource() + distribRoot.putChild("child", child) + distribSite = server.Site(distribRoot) + self.f1 = distribFactory = PBServerFactory( + distrib.ResourcePublisher(distribSite)) + distribPort = reactor.listenTCP( + 0, distribFactory, interface="127.0.0.1") + self.addCleanup(distribPort.stopListening) + addr = distribPort.getHost() + + self.sub = mainRoot = distrib.ResourceSubscription( + addr.host, addr.port) + mainSite = server.Site(mainRoot) + mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1") + self.addCleanup(mainPort.stopListening) + mainAddr = mainPort.getHost() + + return client.getPage("http://%s:%s/child" % ( + mainAddr.host, mainAddr.port), **kwargs) + + + + def test_requestHeaders(self): + """ + The request headers are available on the request object passed to a + distributed resource's C{render} method. + """ + requestHeaders = {} + + class ReportRequestHeaders(resource.Resource): + def render(self, request): + requestHeaders.update(dict( + request.requestHeaders.getAllRawHeaders())) + return "" + + request = self._requestTest( + ReportRequestHeaders(), headers={'foo': 'bar'}) + def cbRequested(result): + self.assertEquals(requestHeaders['Foo'], ['bar']) + request.addCallback(cbRequested) + return request + + + def test_largeWrite(self): + """ + If a string longer than the Banana size limit is passed to the + L{distrib.Request} passed to the remote resource, it is broken into + smaller strings to be transported over the PB connection. + """ + class LargeWrite(resource.Resource): + def render(self, request): + request.write('x' * SIZE_LIMIT + 'y') + request.finish() + return server.NOT_DONE_YET + + request = self._requestTest(LargeWrite()) + request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y') + return request + + + def test_largeReturn(self): + """ + Like L{test_largeWrite}, but for the case where C{render} returns a + long string rather than explicitly passing it to L{Request.write}. + """ + class LargeReturn(resource.Resource): + def render(self, request): + return 'x' * SIZE_LIMIT + 'y' + + request = self._requestTest(LargeReturn()) + request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y') + return request + + + def test_connectionLost(self): + """ + If there is an error issuing the request to the remote publisher, an + error response is returned. + """ + # Using pb.Root as a publisher will cause request calls to fail with an + # error every time. Just what we want to test. + self.f1 = serverFactory = PBServerFactory(pb.Root()) + self.port1 = serverPort = reactor.listenTCP(0, serverFactory) + + self.sub = subscription = distrib.ResourceSubscription( + "127.0.0.1", serverPort.getHost().port) + request = DummyRequest(['']) + d = _render(subscription, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, 500) + # This is the error we caused the request to fail with. It should + # have been logged. + self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1) + d.addCallback(cbRendered) + return d + + + +class _PasswordDatabase: + def __init__(self, users): + self._users = users + + + def getpwall(self): + return iter(self._users) + + + def getpwnam(self, username): + for user in self._users: + if user[0] == username: + return user + raise KeyError() + + + +class UserDirectoryTests(unittest.TestCase): + """ + Tests for L{UserDirectory}, a resource for listing all user resources + available on a system. + """ + def setUp(self): + self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh') + self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh') + self.database = _PasswordDatabase([self.alice, self.bob]) + self.directory = distrib.UserDirectory(self.database) + + + def test_interface(self): + """ + L{UserDirectory} instances provide L{resource.IResource}. + """ + self.assertTrue(verifyObject(resource.IResource, self.directory)) + + + def _404Test(self, name): + """ + Verify that requesting the C{name} child of C{self.directory} results + in a 404 response. + """ + request = DummyRequest([name]) + result = self.directory.getChild(name, request) + d = _render(result, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, 404) + d.addCallback(cbRendered) + return d + + + def test_getInvalidUser(self): + """ + L{UserDirectory.getChild} returns a resource which renders a 404 + response when passed a string which does not correspond to any known + user. + """ + return self._404Test('carol') + + + def test_getUserWithoutResource(self): + """ + L{UserDirectory.getChild} returns a resource which renders a 404 + response when passed a string which corresponds to a known user who has + neither a user directory nor a user distrib socket. + """ + return self._404Test('alice') + + + def test_getPublicHTMLChild(self): + """ + L{UserDirectory.getChild} returns a L{static.File} instance when passed + the name of a user with a home directory containing a I{public_html} + directory. + """ + home = filepath.FilePath(self.bob[-2]) + public_html = home.child('public_html') + public_html.makedirs() + request = DummyRequest(['bob']) + result = self.directory.getChild('bob', request) + self.assertIsInstance(result, static.File) + self.assertEqual(result.path, public_html.path) + + + def test_getDistribChild(self): + """ + L{UserDirectory.getChild} returns a L{ResourceSubscription} instance + when passed the name of a user suffixed with C{".twistd"} who has a + home directory containing a I{.twistd-web-pb} socket. + """ + home = filepath.FilePath(self.bob[-2]) + home.makedirs() + web = home.child('.twistd-web-pb') + request = DummyRequest(['bob']) + result = self.directory.getChild('bob.twistd', request) + self.assertIsInstance(result, distrib.ResourceSubscription) + self.assertEqual(result.host, 'unix') + self.assertEqual(abspath(result.port), web.path) + + + def test_invalidMethod(self): + """ + L{UserDirectory.render} raises L{UnsupportedMethod} in response to a + non-I{GET} request. + """ + request = DummyRequest(['']) + request.method = 'POST' + self.assertRaises( + server.UnsupportedMethod, self.directory.render, request) + + + def test_render(self): + """ + L{UserDirectory} renders a list of links to available user content + in response to a I{GET} request. + """ + public_html = filepath.FilePath(self.alice[-2]).child('public_html') + public_html.makedirs() + web = filepath.FilePath(self.bob[-2]) + web.makedirs() + # This really only works if it's a unix socket, but the implementation + # doesn't currently check for that. It probably should someday, and + # then skip users with non-sockets. + web.child('.twistd-web-pb').setContent("") + + request = DummyRequest(['']) + result = _render(self.directory, request) + def cbRendered(ignored): + document = parseString(''.join(request.written)) + + # Each user should have an li with a link to their page. + [alice, bob] = document.getElementsByTagName('li') + self.assertEqual(alice.firstChild.tagName, 'a') + self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/') + self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)') + self.assertEqual(bob.firstChild.tagName, 'a') + self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/') + self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)') + + result.addCallback(cbRendered) + return result + + + def test_passwordDatabase(self): + """ + If L{UserDirectory} is instantiated with no arguments, it uses the + L{pwd} module as its password database. + """ + directory = distrib.UserDirectory() + self.assertIdentical(directory._pwd, pwd) + if pwd is None: + test_passwordDatabase.skip = "pwd module required" + diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_domhelpers.py b/vendor/Twisted-10.0.0/twisted/web/test/test_domhelpers.py new file mode 100644 index 000000000000..17113f6752b9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_domhelpers.py @@ -0,0 +1,306 @@ +# -*- test-case-name: twisted.web.test.test_domhelpers -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Specific tests for (some of) the methods in L{twisted.web.domhelpers}. +""" + +from xml.dom import minidom + +from twisted.trial.unittest import TestCase + +from twisted.web import microdom + +from twisted.web import domhelpers + + +class DOMHelpersTestsMixin: + """ + A mixin for L{TestCase} subclasses which defines test methods for + domhelpers functionality based on a DOM creation function provided by a + subclass. + """ + dom = None + + def test_getElementsByTagName(self): + doc1 = self.dom.parseString('') + actual=domhelpers.getElementsByTagName(doc1, 'foo')[0].nodeName + expected='foo' + self.assertEquals(actual, expected) + el1=doc1.documentElement + actual=domhelpers.getElementsByTagName(el1, 'foo')[0].nodeName + self.assertEqual(actual, expected) + + doc2_xml='' + doc2 = self.dom.parseString(doc2_xml) + tag_list=domhelpers.getElementsByTagName(doc2, 'foo') + actual=''.join([node.getAttribute('in') for node in tag_list]) + expected='abcdefgh' + self.assertEquals(actual, expected) + el2=doc2.documentElement + tag_list=domhelpers.getElementsByTagName(el2, 'foo') + actual=''.join([node.getAttribute('in') for node in tag_list]) + self.assertEqual(actual, expected) + + doc3_xml=''' + + + + + + + + + + + + + + + +''' + doc3 = self.dom.parseString(doc3_xml) + tag_list=domhelpers.getElementsByTagName(doc3, 'foo') + actual=''.join([node.getAttribute('in') for node in tag_list]) + expected='abdgheicfj' + self.assertEquals(actual, expected) + el3=doc3.documentElement + tag_list=domhelpers.getElementsByTagName(el3, 'foo') + actual=''.join([node.getAttribute('in') for node in tag_list]) + self.assertEqual(actual, expected) + + doc4_xml='' + doc4 = self.dom.parseString(doc4_xml) + actual=domhelpers.getElementsByTagName(doc4, 'foo') + root=doc4.documentElement + expected=[root, root.childNodes[-1].childNodes[0]] + self.assertEquals(actual, expected) + actual=domhelpers.getElementsByTagName(root, 'foo') + self.assertEqual(actual, expected) + + + def test_gatherTextNodes(self): + doc1 = self.dom.parseString('foo') + actual=domhelpers.gatherTextNodes(doc1) + expected='foo' + self.assertEqual(actual, expected) + actual=domhelpers.gatherTextNodes(doc1.documentElement) + self.assertEqual(actual, expected) + + doc2_xml='abcdefgh' + doc2 = self.dom.parseString(doc2_xml) + actual=domhelpers.gatherTextNodes(doc2) + expected='abcdefgh' + self.assertEqual(actual, expected) + actual=domhelpers.gatherTextNodes(doc2.documentElement) + self.assertEqual(actual, expected) + + doc3_xml=('abdghei' + + 'cfj') + doc3 = self.dom.parseString(doc3_xml) + actual=domhelpers.gatherTextNodes(doc3) + expected='abdgheicfj' + self.assertEqual(actual, expected) + actual=domhelpers.gatherTextNodes(doc3.documentElement) + self.assertEqual(actual, expected) + + def test_clearNode(self): + doc1 = self.dom.parseString('') + a_node=doc1.documentElement + domhelpers.clearNode(a_node) + self.assertEqual( + a_node.toxml(), + self.dom.Element('a').toxml()) + + doc2 = self.dom.parseString('') + b_node=doc2.documentElement.childNodes[0] + domhelpers.clearNode(b_node) + actual=doc2.documentElement.toxml() + expected = self.dom.Element('a') + expected.appendChild(self.dom.Element('b')) + self.assertEqual(actual, expected.toxml()) + + + def test_get(self): + doc1 = self.dom.parseString('') + node=domhelpers.get(doc1, "foo") + actual=node.toxml() + expected = self.dom.Element('c') + expected.setAttribute('class', 'foo') + self.assertEqual(actual, expected.toxml()) + + node=domhelpers.get(doc1, "bar") + actual=node.toxml() + expected = self.dom.Element('b') + expected.setAttribute('id', 'bar') + self.assertEqual(actual, expected.toxml()) + + self.assertRaises(domhelpers.NodeLookupError, + domhelpers.get, + doc1, + "pzork") + + def test_getIfExists(self): + doc1 = self.dom.parseString('') + node=domhelpers.getIfExists(doc1, "foo") + actual=node.toxml() + expected = self.dom.Element('c') + expected.setAttribute('class', 'foo') + self.assertEqual(actual, expected.toxml()) + + node=domhelpers.getIfExists(doc1, "pzork") + self.assertIdentical(node, None) + + + def test_getAndClear(self): + doc1 = self.dom.parseString('') + node=domhelpers.getAndClear(doc1, "foo") + actual=node.toxml() + expected = self.dom.Element('b') + expected.setAttribute('id', 'foo') + self.assertEqual(actual, expected.toxml()) + + + def test_locateNodes(self): + doc1 = self.dom.parseString('') + node_list=domhelpers.locateNodes( + doc1.childNodes, 'foo', 'olive', noNesting=1) + actual=''.join([node.toxml() for node in node_list]) + expected = self.dom.Element('b') + expected.setAttribute('foo', 'olive') + c = self.dom.Element('c') + c.setAttribute('foo', 'olive') + expected.appendChild(c) + + self.assertEqual(actual, expected.toxml()) + + node_list=domhelpers.locateNodes( + doc1.childNodes, 'foo', 'olive', noNesting=0) + actual=''.join([node.toxml() for node in node_list]) + self.assertEqual(actual, expected.toxml() + c.toxml()) + + + def test_getParents(self): + doc1 = self.dom.parseString('') + node_list = domhelpers.getParents( + doc1.childNodes[0].childNodes[0].childNodes[0]) + actual = ''.join([node.tagName for node in node_list + if hasattr(node, 'tagName')]) + self.assertEqual(actual, 'cba') + + + def test_findElementsWithAttribute(self): + doc1 = self.dom.parseString('') + node_list = domhelpers.findElementsWithAttribute(doc1, 'foo') + actual = ''.join([node.tagName for node in node_list]) + self.assertEqual(actual, 'abc') + + node_list = domhelpers.findElementsWithAttribute(doc1, 'foo', '1') + actual = ''.join([node.tagName for node in node_list]) + self.assertEqual(actual, 'ac') + + + def test_findNodesNamed(self): + doc1 = self.dom.parseString('a') + node_list = domhelpers.findNodesNamed(doc1, 'foo') + actual = len(node_list) + self.assertEqual(actual, 2) + + # NOT SURE WHAT THESE ARE SUPPOSED TO DO.. + # def test_RawText FIXME + # def test_superSetAttribute FIXME + # def test_superPrependAttribute FIXME + # def test_superAppendAttribute FIXME + # def test_substitute FIXME + + def test_escape(self): + j='this string " contains many & characters> xml< won\'t like' + expected='this string " contains many & characters> xml< won\'t like' + self.assertEqual(domhelpers.escape(j), expected) + + def test_unescape(self): + j='this string " has && entities > < and some characters xml won\'t like<' + expected='this string " has && entities > < and some characters xml won\'t like<' + self.assertEqual(domhelpers.unescape(j), expected) + + + def test_getNodeText(self): + """ + L{getNodeText} returns the concatenation of all the text data at or + beneath the node passed to it. + """ + node = self.dom.parseString('bazquux') + self.assertEqual(domhelpers.getNodeText(node), "bazquux") + + + +class MicroDOMHelpersTests(DOMHelpersTestsMixin, TestCase): + dom = microdom + + def test_gatherTextNodesDropsWhitespace(self): + """ + Microdom discards whitespace-only text nodes, so L{gatherTextNodes} + returns only the text from nodes which had non-whitespace characters. + """ + doc4_xml=''' + + + + stuff + + +''' + doc4 = self.dom.parseString(doc4_xml) + actual = domhelpers.gatherTextNodes(doc4) + expected = '\n stuff\n ' + self.assertEqual(actual, expected) + actual = domhelpers.gatherTextNodes(doc4.documentElement) + self.assertEqual(actual, expected) + + + def test_textEntitiesNotDecoded(self): + """ + Microdom does not decode entities in text nodes. + """ + doc5_xml='Souffl&' + doc5 = self.dom.parseString(doc5_xml) + actual=domhelpers.gatherTextNodes(doc5) + expected='Souffl&' + self.assertEqual(actual, expected) + actual=domhelpers.gatherTextNodes(doc5.documentElement) + self.assertEqual(actual, expected) + + + +class MiniDOMHelpersTests(DOMHelpersTestsMixin, TestCase): + dom = minidom + + def test_textEntitiesDecoded(self): + """ + Minidom does decode entities in text nodes. + """ + doc5_xml='Souffl&' + doc5 = self.dom.parseString(doc5_xml) + actual=domhelpers.gatherTextNodes(doc5) + expected='Souffl&' + self.assertEqual(actual, expected) + actual=domhelpers.gatherTextNodes(doc5.documentElement) + self.assertEqual(actual, expected) + + + def test_getNodeUnicodeText(self): + """ + L{domhelpers.getNodeText} returns a C{unicode} string when text + nodes are represented in the DOM with unicode, whether or not there + are non-ASCII characters present. + """ + node = self.dom.parseString("bar") + text = domhelpers.getNodeText(node) + self.assertEqual(text, u"bar") + self.assertIsInstance(text, unicode) + + node = self.dom.parseString(u"\N{SNOWMAN}".encode('utf-8')) + text = domhelpers.getNodeText(node) + self.assertEqual(text, u"\N{SNOWMAN}") + self.assertIsInstance(text, unicode) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_error.py b/vendor/Twisted-10.0.0/twisted/web/test/test_error.py new file mode 100644 index 000000000000..4de7738dd5cd --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_error.py @@ -0,0 +1,151 @@ +# Copyright (c) 2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +HTTP errors. +""" + +from twisted.trial import unittest +from twisted.web import error + +class ErrorTestCase(unittest.TestCase): + """ + Tests for how L{Error} attributes are initialized. + """ + def test_noMessageValidStatus(self): + """ + If no C{message} argument is passed to the L{Error} constructor and the + C{code} argument is a valid HTTP status code, C{code} is mapped to a + descriptive string to which C{message} is assigned. + """ + e = error.Error("200") + self.assertEquals(e.message, "OK") + + + def test_noMessageInvalidStatus(self): + """ + If no C{message} argument is passed to the L{Error} constructor and + C{code} isn't a valid HTTP status code, C{message} stays C{None}. + """ + e = error.Error("InvalidCode") + self.assertEquals(e.message, None) + + + def test_messageExists(self): + """ + If a C{message} argument is passed to the L{Error} constructor, the + C{message} isn't affected by the value of C{status}. + """ + e = error.Error("200", "My own message") + self.assertEquals(e.message, "My own message") + + + +class PageRedirectTestCase(unittest.TestCase): + """ + Tests for how L{PageRedirect} attributes are initialized. + """ + def test_noMessageValidStatus(self): + """ + If no C{message} argument is passed to the L{PageRedirect} constructor + and the C{code} argument is a valid HTTP status code, C{code} is mapped + to a descriptive string to which C{message} is assigned. + """ + e = error.PageRedirect("200", location="/foo") + self.assertEquals(e.message, "OK to /foo") + + + def test_noMessageValidStatusNoLocation(self): + """ + If no C{message} argument is passed to the L{PageRedirect} constructor + and C{location} is also empty and the C{code} argument is a valid HTTP + status code, C{code} is mapped to a descriptive string to which + C{message} is assigned without trying to include an empty location. + """ + e = error.PageRedirect("200") + self.assertEquals(e.message, "OK") + + + def test_noMessageInvalidStatusLocationExists(self): + """ + If no C{message} argument is passed to the L{PageRedirect} constructor + and C{code} isn't a valid HTTP status code, C{message} stays C{None}. + """ + e = error.PageRedirect("InvalidCode", location="/foo") + self.assertEquals(e.message, None) + + + def test_messageExistsLocationExists(self): + """ + If a C{message} argument is passed to the L{PageRedirect} constructor, + the C{message} isn't affected by the value of C{status}. + """ + e = error.PageRedirect("200", "My own message", location="/foo") + self.assertEquals(e.message, "My own message to /foo") + + + def test_messageExistsNoLocation(self): + """ + If a C{message} argument is passed to the L{PageRedirect} constructor + and no location is provided, C{message} doesn't try to include the empty + location. + """ + e = error.PageRedirect("200", "My own message") + self.assertEquals(e.message, "My own message") + + + +class InfiniteRedirectionTestCase(unittest.TestCase): + """ + Tests for how L{InfiniteRedirection} attributes are initialized. + """ + def test_noMessageValidStatus(self): + """ + If no C{message} argument is passed to the L{InfiniteRedirection} + constructor and the C{code} argument is a valid HTTP status code, + C{code} is mapped to a descriptive string to which C{message} is + assigned. + """ + e = error.InfiniteRedirection("200", location="/foo") + self.assertEquals(e.message, "OK to /foo") + + + def test_noMessageValidStatusNoLocation(self): + """ + If no C{message} argument is passed to the L{InfiniteRedirection} + constructor and C{location} is also empty and the C{code} argument is a + valid HTTP status code, C{code} is mapped to a descriptive string to + which C{message} is assigned without trying to include an empty + location. + """ + e = error.InfiniteRedirection("200") + self.assertEquals(e.message, "OK") + + + def test_noMessageInvalidStatusLocationExists(self): + """ + If no C{message} argument is passed to the L{InfiniteRedirection} + constructor and C{code} isn't a valid HTTP status code, C{message} stays + C{None}. + """ + e = error.InfiniteRedirection("InvalidCode", location="/foo") + self.assertEquals(e.message, None) + + + def test_messageExistsLocationExists(self): + """ + If a C{message} argument is passed to the L{InfiniteRedirection} + constructor, the C{message} isn't affected by the value of C{status}. + """ + e = error.InfiniteRedirection("200", "My own message", location="/foo") + self.assertEquals(e.message, "My own message to /foo") + + + def test_messageExistsNoLocation(self): + """ + If a C{message} argument is passed to the L{InfiniteRedirection} + constructor and no location is provided, C{message} doesn't try to + include the empty location. + """ + e = error.InfiniteRedirection("200", "My own message") + self.assertEquals(e.message, "My own message") diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_http.py b/vendor/Twisted-10.0.0/twisted/web/test/test_http.py new file mode 100644 index 000000000000..d2b2f6c65d34 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_http.py @@ -0,0 +1,1531 @@ +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Test HTTP support. +""" + +from urlparse import urlparse, urlunsplit, clear_cache +import random, urllib, cgi + +from twisted.python.compat import set +from twisted.python.failure import Failure +from twisted.trial import unittest +from twisted.trial.unittest import TestCase +from twisted.web import http, http_headers +from twisted.web.http import PotentialDataLoss, _DataLoss +from twisted.web.http import _IdentityTransferDecoder +from twisted.protocols import loopback +from twisted.internet.task import Clock +from twisted.internet.error import ConnectionLost +from twisted.test.proto_helpers import StringTransport +from twisted.test.test_internet import DummyProducer +from twisted.web.test.test_web import DummyChannel + + +class DateTimeTest(unittest.TestCase): + """Test date parsing functions.""" + + def testRoundtrip(self): + for i in range(10000): + time = random.randint(0, 2000000000) + timestr = http.datetimeToString(time) + time2 = http.stringToDatetime(timestr) + self.assertEquals(time, time2) + + +class DummyHTTPHandler(http.Request): + + def process(self): + self.content.seek(0, 0) + data = self.content.read() + length = self.getHeader('content-length') + request = "'''\n"+str(length)+"\n"+data+"'''\n" + self.setResponseCode(200) + self.setHeader("Request", self.uri) + self.setHeader("Command", self.method) + self.setHeader("Version", self.clientproto) + self.setHeader("Content-Length", len(request)) + self.write(request) + self.finish() + + +class LoopbackHTTPClient(http.HTTPClient): + + def connectionMade(self): + self.sendCommand("GET", "/foo/bar") + self.sendHeader("Content-Length", 10) + self.endHeaders() + self.transport.write("0123456789") + + +class ResponseTestMixin(object): + """ + A mixin that provides a simple means of comparing an actual response string + to an expected response string by performing the minimal parsing. + """ + + def assertResponseEquals(self, responses, expected): + """ + Assert that the C{responses} matches the C{expected} responses. + + @type responses: C{str} + @param responses: The bytes sent in response to one or more requests. + + @type expected: C{list} of C{tuple} of C{str} + @param expected: The expected values for the responses. Each tuple + element of the list represents one response. Each string element + of the tuple is a full header line without delimiter, except for + the last element which gives the full response body. + """ + for response in expected: + expectedHeaders, expectedContent = response[:-1], response[-1] + headers, rest = responses.split('\r\n\r\n', 1) + headers = headers.splitlines() + self.assertEqual(set(headers), set(expectedHeaders)) + content = rest[:len(expectedContent)] + responses = rest[len(expectedContent):] + self.assertEqual(content, expectedContent) + + + +class HTTP1_0TestCase(unittest.TestCase, ResponseTestMixin): + requests = ( + "GET / HTTP/1.0\r\n" + "\r\n" + "GET / HTTP/1.1\r\n" + "Accept: text/html\r\n" + "\r\n") + + expected_response = [ + ("HTTP/1.0 200 OK", + "Request: /", + "Command: GET", + "Version: HTTP/1.0", + "Content-Length: 13", + "'''\nNone\n'''\n")] + + def test_buffer(self): + """ + Send requests over a channel and check responses match what is expected. + """ + b = StringTransport() + a = http.HTTPChannel() + a.requestFactory = DummyHTTPHandler + a.makeConnection(b) + # one byte at a time, to stress it. + for byte in self.requests: + a.dataReceived(byte) + a.connectionLost(IOError("all one")) + value = b.value() + self.assertResponseEquals(value, self.expected_response) + + + def test_requestBodyTimeout(self): + """ + L{HTTPChannel} resets its timeout whenever data from a request body is + delivered to it. + """ + clock = Clock() + transport = StringTransport() + protocol = http.HTTPChannel() + protocol.timeOut = 100 + protocol.callLater = clock.callLater + protocol.makeConnection(transport) + protocol.dataReceived('POST / HTTP/1.0\r\nContent-Length: 2\r\n\r\n') + clock.advance(99) + self.assertFalse(transport.disconnecting) + protocol.dataReceived('x') + clock.advance(99) + self.assertFalse(transport.disconnecting) + protocol.dataReceived('x') + self.assertEqual(len(protocol.requests), 1) + + + +class HTTP1_1TestCase(HTTP1_0TestCase): + + requests = ( + "GET / HTTP/1.1\r\n" + "Accept: text/html\r\n" + "\r\n" + "POST / HTTP/1.1\r\n" + "Content-Length: 10\r\n" + "\r\n" + "0123456789POST / HTTP/1.1\r\n" + "Content-Length: 10\r\n" + "\r\n" + "0123456789HEAD / HTTP/1.1\r\n" + "\r\n") + + expected_response = [ + ("HTTP/1.1 200 OK", + "Request: /", + "Command: GET", + "Version: HTTP/1.1", + "Content-Length: 13", + "'''\nNone\n'''\n"), + ("HTTP/1.1 200 OK", + "Request: /", + "Command: POST", + "Version: HTTP/1.1", + "Content-Length: 21", + "'''\n10\n0123456789'''\n"), + ("HTTP/1.1 200 OK", + "Request: /", + "Command: POST", + "Version: HTTP/1.1", + "Content-Length: 21", + "'''\n10\n0123456789'''\n"), + ("HTTP/1.1 200 OK", + "Request: /", + "Command: HEAD", + "Version: HTTP/1.1", + "Content-Length: 13", + "")] + + + +class HTTP1_1_close_TestCase(HTTP1_0TestCase): + + requests = ( + "GET / HTTP/1.1\r\n" + "Accept: text/html\r\n" + "Connection: close\r\n" + "\r\n" + "GET / HTTP/1.0\r\n" + "\r\n") + + expected_response = [ + ("HTTP/1.1 200 OK", + "Connection: close", + "Request: /", + "Command: GET", + "Version: HTTP/1.1", + "Content-Length: 13", + "'''\nNone\n'''\n")] + + + +class HTTP0_9TestCase(HTTP1_0TestCase): + + requests = ( + "GET /\r\n") + + expected_response = "HTTP/1.1 400 Bad Request\r\n\r\n" + + + def assertResponseEquals(self, response, expectedResponse): + self.assertEquals(response, expectedResponse) + + +class HTTPLoopbackTestCase(unittest.TestCase): + + expectedHeaders = {'request' : '/foo/bar', + 'command' : 'GET', + 'version' : 'HTTP/1.0', + 'content-length' : '21'} + numHeaders = 0 + gotStatus = 0 + gotResponse = 0 + gotEndHeaders = 0 + + def _handleStatus(self, version, status, message): + self.gotStatus = 1 + self.assertEquals(version, "HTTP/1.0") + self.assertEquals(status, "200") + + def _handleResponse(self, data): + self.gotResponse = 1 + self.assertEquals(data, "'''\n10\n0123456789'''\n") + + def _handleHeader(self, key, value): + self.numHeaders = self.numHeaders + 1 + self.assertEquals(self.expectedHeaders[key.lower()], value) + + def _handleEndHeaders(self): + self.gotEndHeaders = 1 + self.assertEquals(self.numHeaders, 4) + + def testLoopback(self): + server = http.HTTPChannel() + server.requestFactory = DummyHTTPHandler + client = LoopbackHTTPClient() + client.handleResponse = self._handleResponse + client.handleHeader = self._handleHeader + client.handleEndHeaders = self._handleEndHeaders + client.handleStatus = self._handleStatus + d = loopback.loopbackAsync(server, client) + d.addCallback(self._cbTestLoopback) + return d + + def _cbTestLoopback(self, ignored): + if not (self.gotStatus and self.gotResponse and self.gotEndHeaders): + raise RuntimeError( + "didn't got all callbacks %s" + % [self.gotStatus, self.gotResponse, self.gotEndHeaders]) + del self.gotEndHeaders + del self.gotResponse + del self.gotStatus + del self.numHeaders + + + +def _prequest(**headers): + """ + Make a request with the given request headers for the persistence tests. + """ + request = http.Request(DummyChannel(), None) + for k, v in headers.iteritems(): + request.requestHeaders.setRawHeaders(k, v) + return request + + +class PersistenceTestCase(unittest.TestCase): + """ + Tests for persistent HTTP connections. + """ + + ptests = [#(PRequest(connection="Keep-Alive"), "HTTP/1.0", 1, {'connection' : 'Keep-Alive'}), + (_prequest(), "HTTP/1.0", 0, {'connection': None}), + (_prequest(connection=["close"]), "HTTP/1.1", 0, {'connection' : ['close']}), + (_prequest(), "HTTP/1.1", 1, {'connection': None}), + (_prequest(), "HTTP/0.9", 0, {'connection': None}), + ] + + + def testAlgorithm(self): + c = http.HTTPChannel() + for req, version, correctResult, resultHeaders in self.ptests: + result = c.checkPersistence(req, version) + self.assertEquals(result, correctResult) + for header in resultHeaders.keys(): + self.assertEquals(req.responseHeaders.getRawHeaders(header, None), resultHeaders[header]) + + + +class IdentityTransferEncodingTests(TestCase): + """ + Tests for L{_IdentityTransferDecoder}. + """ + def setUp(self): + """ + Create an L{_IdentityTransferDecoder} with callbacks hooked up so that + calls to them can be inspected. + """ + self.data = [] + self.finish = [] + self.contentLength = 10 + self.decoder = _IdentityTransferDecoder( + self.contentLength, self.data.append, self.finish.append) + + + def test_exactAmountReceived(self): + """ + If L{_IdentityTransferDecoder.dataReceived} is called with a string + with length equal to the content length passed to + L{_IdentityTransferDecoder}'s initializer, the data callback is invoked + with that string and the finish callback is invoked with a zero-length + string. + """ + self.decoder.dataReceived('x' * self.contentLength) + self.assertEqual(self.data, ['x' * self.contentLength]) + self.assertEqual(self.finish, ['']) + + + def test_shortStrings(self): + """ + If L{_IdentityTransferDecoder.dataReceived} is called multiple times + with strings which, when concatenated, are as long as the content + length provided, the data callback is invoked with each string and the + finish callback is invoked only after the second call. + """ + self.decoder.dataReceived('x') + self.assertEqual(self.data, ['x']) + self.assertEqual(self.finish, []) + self.decoder.dataReceived('y' * (self.contentLength - 1)) + self.assertEqual(self.data, ['x', 'y' * (self.contentLength - 1)]) + self.assertEqual(self.finish, ['']) + + + def test_longString(self): + """ + If L{_IdentityTransferDecoder.dataReceived} is called with a string + with length greater than the provided content length, only the prefix + of that string up to the content length is passed to the data callback + and the remainder is passed to the finish callback. + """ + self.decoder.dataReceived('x' * self.contentLength + 'y') + self.assertEqual(self.data, ['x' * self.contentLength]) + self.assertEqual(self.finish, ['y']) + + + def test_rejectDataAfterFinished(self): + """ + If data is passed to L{_IdentityTransferDecoder.dataReceived} after the + finish callback has been invoked, L{RuntimeError} is raised. + """ + failures = [] + def finish(bytes): + try: + decoder.dataReceived('foo') + except: + failures.append(Failure()) + decoder = _IdentityTransferDecoder(5, self.data.append, finish) + decoder.dataReceived('x' * 4) + self.assertEqual(failures, []) + decoder.dataReceived('y') + failures[0].trap(RuntimeError) + self.assertEqual( + str(failures[0].value), + "_IdentityTransferDecoder cannot decode data after finishing") + + + def test_unknownContentLength(self): + """ + If L{_IdentityTransferDecoder} is constructed with C{None} for the + content length, it passes all data delivered to it through to the data + callback. + """ + data = [] + finish = [] + decoder = _IdentityTransferDecoder(None, data.append, finish.append) + decoder.dataReceived('x') + self.assertEqual(data, ['x']) + decoder.dataReceived('y') + self.assertEqual(data, ['x', 'y']) + self.assertEqual(finish, []) + + + def _verifyCallbacksUnreferenced(self, decoder): + """ + Check the decoder's data and finish callbacks and make sure they are + None in order to help avoid references cycles. + """ + self.assertIdentical(decoder.dataCallback, None) + self.assertIdentical(decoder.finishCallback, None) + + + def test_earlyConnectionLose(self): + """ + L{_IdentityTransferDecoder.noMoreData} raises L{_DataLoss} if it is + called and the content length is known but not enough bytes have been + delivered. + """ + self.decoder.dataReceived('x' * (self.contentLength - 1)) + self.assertRaises(_DataLoss, self.decoder.noMoreData) + self._verifyCallbacksUnreferenced(self.decoder) + + + def test_unknownContentLengthConnectionLose(self): + """ + L{_IdentityTransferDecoder.noMoreData} calls the finish callback and + raises L{PotentialDataLoss} if it is called and the content length is + unknown. + """ + body = [] + finished = [] + decoder = _IdentityTransferDecoder(None, body.append, finished.append) + self.assertRaises(PotentialDataLoss, decoder.noMoreData) + self.assertEqual(body, []) + self.assertEqual(finished, ['']) + self._verifyCallbacksUnreferenced(decoder) + + + def test_finishedConnectionLose(self): + """ + L{_IdentityTransferDecoder.noMoreData} does not raise any exception if + it is called when the content length is known and that many bytes have + been delivered. + """ + self.decoder.dataReceived('x' * self.contentLength) + self.decoder.noMoreData() + self._verifyCallbacksUnreferenced(self.decoder) + + + +class ChunkedTransferEncodingTests(unittest.TestCase): + """ + Tests for L{_ChunkedTransferDecoder}, which turns a byte stream encoded + using HTTP I{chunked} C{Transfer-Encoding} back into the original byte + stream. + """ + def test_decoding(self): + """ + L{_ChunkedTransferDecoder.dataReceived} decodes chunked-encoded data + and passes the result to the specified callback. + """ + L = [] + p = http._ChunkedTransferDecoder(L.append, None) + p.dataReceived('3\r\nabc\r\n5\r\n12345\r\n') + p.dataReceived('a\r\n0123456789\r\n') + self.assertEqual(L, ['abc', '12345', '0123456789']) + + + def test_short(self): + """ + L{_ChunkedTransferDecoder.dataReceived} decodes chunks broken up and + delivered in multiple calls. + """ + L = [] + finished = [] + p = http._ChunkedTransferDecoder(L.append, finished.append) + for s in '3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\n': + p.dataReceived(s) + self.assertEqual(L, ['a', 'b', 'c', '1', '2', '3', '4', '5']) + self.assertEqual(finished, ['']) + + + def test_newlines(self): + """ + L{_ChunkedTransferDecoder.dataReceived} doesn't treat CR LF pairs + embedded in chunk bodies specially. + """ + L = [] + p = http._ChunkedTransferDecoder(L.append, None) + p.dataReceived('2\r\n\r\n\r\n') + self.assertEqual(L, ['\r\n']) + + + def test_extensions(self): + """ + L{_ChunkedTransferDecoder.dataReceived} disregards chunk-extension + fields. + """ + L = [] + p = http._ChunkedTransferDecoder(L.append, None) + p.dataReceived('3; x-foo=bar\r\nabc\r\n') + self.assertEqual(L, ['abc']) + + + def test_finish(self): + """ + L{_ChunkedTransferDecoder.dataReceived} interprets a zero-length + chunk as the end of the chunked data stream and calls the completion + callback. + """ + finished = [] + p = http._ChunkedTransferDecoder(None, finished.append) + p.dataReceived('0\r\n\r\n') + self.assertEqual(finished, ['']) + + + def test_extra(self): + """ + L{_ChunkedTransferDecoder.dataReceived} passes any bytes which come + after the terminating zero-length chunk to the completion callback. + """ + finished = [] + p = http._ChunkedTransferDecoder(None, finished.append) + p.dataReceived('0\r\n\r\nhello') + self.assertEqual(finished, ['hello']) + + + def test_afterFinished(self): + """ + L{_ChunkedTransferDecoder.dataReceived} raises L{RuntimeError} if it + is called after it has seen the last chunk. + """ + p = http._ChunkedTransferDecoder(None, lambda bytes: None) + p.dataReceived('0\r\n\r\n') + self.assertRaises(RuntimeError, p.dataReceived, 'hello') + + + def test_earlyConnectionLose(self): + """ + L{_ChunkedTransferDecoder.noMoreData} raises L{_DataLoss} if it is + called and the end of the last trailer has not yet been received. + """ + parser = http._ChunkedTransferDecoder(None, lambda bytes: None) + parser.dataReceived('0\r\n\r') + exc = self.assertRaises(_DataLoss, parser.noMoreData) + self.assertEqual( + str(exc), + "Chunked decoder in 'trailer' state, still expecting more data " + "to get to finished state.") + + + def test_finishedConnectionLose(self): + """ + L{_ChunkedTransferDecoder.noMoreData} does not raise any exception if + it is called after the terminal zero length chunk is received. + """ + parser = http._ChunkedTransferDecoder(None, lambda bytes: None) + parser.dataReceived('0\r\n\r\n') + parser.noMoreData() + + + def test_reentrantFinishedNoMoreData(self): + """ + L{_ChunkedTransferDecoder.noMoreData} can be called from the finished + callback without raising an exception. + """ + errors = [] + successes = [] + def finished(extra): + try: + parser.noMoreData() + except: + errors.append(Failure()) + else: + successes.append(True) + parser = http._ChunkedTransferDecoder(None, finished) + parser.dataReceived('0\r\n\r\n') + self.assertEqual(errors, []) + self.assertEqual(successes, [True]) + + + +class ChunkingTestCase(unittest.TestCase): + + strings = ["abcv", "", "fdfsd423", "Ffasfas\r\n", + "523523\n\rfsdf", "4234"] + + def testChunks(self): + for s in self.strings: + self.assertEquals((s, ''), http.fromChunk(''.join(http.toChunk(s)))) + self.assertRaises(ValueError, http.fromChunk, '-5\r\nmalformed!\r\n') + + def testConcatenatedChunks(self): + chunked = ''.join([''.join(http.toChunk(t)) for t in self.strings]) + result = [] + buffer = "" + for c in chunked: + buffer = buffer + c + try: + data, buffer = http.fromChunk(buffer) + result.append(data) + except ValueError: + pass + self.assertEquals(result, self.strings) + + + +class ParsingTestCase(unittest.TestCase): + """ + Tests for protocol parsing in L{HTTPChannel}. + """ + def runRequest(self, httpRequest, requestClass, success=1): + httpRequest = httpRequest.replace("\n", "\r\n") + b = StringTransport() + a = http.HTTPChannel() + a.requestFactory = requestClass + a.makeConnection(b) + # one byte at a time, to stress it. + for byte in httpRequest: + if a.transport.disconnecting: + break + a.dataReceived(byte) + a.connectionLost(IOError("all done")) + if success: + self.assertEquals(self.didRequest, 1) + del self.didRequest + else: + self.assert_(not hasattr(self, "didRequest")) + return a + + + def test_basicAuth(self): + """ + L{HTTPChannel} provides username and password information supplied in + an I{Authorization} header to the L{Request} which makes it available + via its C{getUser} and C{getPassword} methods. + """ + testcase = self + class Request(http.Request): + l = [] + def process(self): + testcase.assertEquals(self.getUser(), self.l[0]) + testcase.assertEquals(self.getPassword(), self.l[1]) + for u, p in [("foo", "bar"), ("hello", "there:z")]: + Request.l[:] = [u, p] + s = "%s:%s" % (u, p) + f = "GET / HTTP/1.0\nAuthorization: Basic %s\n\n" % (s.encode("base64").strip(), ) + self.runRequest(f, Request, 0) + + + def test_headers(self): + """ + Headers received by L{HTTPChannel} in a request are made available to + the L{Request}. + """ + processed = [] + class MyRequest(http.Request): + def process(self): + processed.append(self) + self.finish() + + requestLines = [ + "GET / HTTP/1.0", + "Foo: bar", + "baz: Quux", + "baz: quux", + "", + ""] + + self.runRequest('\n'.join(requestLines), MyRequest, 0) + [request] = processed + self.assertEquals( + request.requestHeaders.getRawHeaders('foo'), ['bar']) + self.assertEquals( + request.requestHeaders.getRawHeaders('bAz'), ['Quux', 'quux']) + + + def test_tooManyHeaders(self): + """ + L{HTTPChannel} enforces a limit of C{HTTPChannel.maxHeaders} on the + number of headers received per request. + """ + processed = [] + class MyRequest(http.Request): + def process(self): + processed.append(self) + + requestLines = ["GET / HTTP/1.0"] + for i in range(http.HTTPChannel.maxHeaders + 2): + requestLines.append("%s: foo" % (i,)) + requestLines.extend(["", ""]) + + channel = self.runRequest("\n".join(requestLines), MyRequest, 0) + self.assertEqual(processed, []) + self.assertEqual( + channel.transport.value(), + "HTTP/1.1 400 Bad Request\r\n\r\n") + + + def test_headerLimitPerRequest(self): + """ + L{HTTPChannel} enforces the limit of C{HTTPChannel.maxHeaders} per + request so that headers received in an earlier request do not count + towards the limit when processing a later request. + """ + processed = [] + class MyRequest(http.Request): + def process(self): + processed.append(self) + self.finish() + + self.patch(http.HTTPChannel, 'maxHeaders', 1) + requestLines = [ + "GET / HTTP/1.1", + "Foo: bar", + "", + "", + "GET / HTTP/1.1", + "Bar: baz", + "", + ""] + + channel = self.runRequest("\n".join(requestLines), MyRequest, 0) + [first, second] = processed + self.assertEqual(first.getHeader('foo'), 'bar') + self.assertEqual(second.getHeader('bar'), 'baz') + self.assertEqual( + channel.transport.value(), + 'HTTP/1.1 200 OK\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '0\r\n' + '\r\n' + 'HTTP/1.1 200 OK\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '0\r\n' + '\r\n') + + + def testCookies(self): + """ + Test cookies parsing and reading. + """ + httpRequest = '''\ +GET / HTTP/1.0 +Cookie: rabbit="eat carrot"; ninja=secret; spam="hey 1=1!" + +''' + testcase = self + + class MyRequest(http.Request): + def process(self): + testcase.assertEquals(self.getCookie('rabbit'), '"eat carrot"') + testcase.assertEquals(self.getCookie('ninja'), 'secret') + testcase.assertEquals(self.getCookie('spam'), '"hey 1=1!"') + testcase.didRequest = 1 + self.finish() + + self.runRequest(httpRequest, MyRequest) + + def testGET(self): + httpRequest = '''\ +GET /?key=value&multiple=two+words&multiple=more%20words&empty= HTTP/1.0 + +''' + testcase = self + class MyRequest(http.Request): + def process(self): + testcase.assertEquals(self.method, "GET") + testcase.assertEquals(self.args["key"], ["value"]) + testcase.assertEquals(self.args["empty"], [""]) + testcase.assertEquals(self.args["multiple"], ["two words", "more words"]) + testcase.didRequest = 1 + self.finish() + + self.runRequest(httpRequest, MyRequest) + + + def test_extraQuestionMark(self): + """ + While only a single '?' is allowed in an URL, several other servers + allow several and pass all after the first through as part of the + query arguments. Test that we emulate this behavior. + """ + httpRequest = 'GET /foo?bar=?&baz=quux HTTP/1.0\n\n' + + testcase = self + class MyRequest(http.Request): + def process(self): + testcase.assertEqual(self.method, 'GET') + testcase.assertEqual(self.path, '/foo') + testcase.assertEqual(self.args['bar'], ['?']) + testcase.assertEqual(self.args['baz'], ['quux']) + testcase.didRequest = 1 + self.finish() + + self.runRequest(httpRequest, MyRequest) + + + def test_formPOSTRequest(self): + """ + The request body of a I{POST} request with a I{Content-Type} header + of I{application/x-www-form-urlencoded} is parsed according to that + content type and made available in the C{args} attribute of the + request object. The original bytes of the request may still be read + from the C{content} attribute. + """ + query = 'key=value&multiple=two+words&multiple=more%20words&empty=' + httpRequest = '''\ +POST / HTTP/1.0 +Content-Length: %d +Content-Type: application/x-www-form-urlencoded + +%s''' % (len(query), query) + + testcase = self + class MyRequest(http.Request): + def process(self): + testcase.assertEquals(self.method, "POST") + testcase.assertEquals(self.args["key"], ["value"]) + testcase.assertEquals(self.args["empty"], [""]) + testcase.assertEquals(self.args["multiple"], ["two words", "more words"]) + + # Reading from the content file-like must produce the entire + # request body. + testcase.assertEquals(self.content.read(), query) + testcase.didRequest = 1 + self.finish() + + self.runRequest(httpRequest, MyRequest) + + def testMissingContentDisposition(self): + req = '''\ +POST / HTTP/1.0 +Content-Type: multipart/form-data; boundary=AaB03x +Content-Length: 103 + +--AaB03x +Content-Type: text/plain +Content-Transfer-Encoding: quoted-printable + +abasdfg +--AaB03x-- +''' + self.runRequest(req, http.Request, success=False) + + def test_chunkedEncoding(self): + """ + If a request uses the I{chunked} transfer encoding, the request body is + decoded accordingly before it is made available on the request. + """ + httpRequest = '''\ +GET / HTTP/1.0 +Content-Type: text/plain +Transfer-Encoding: chunked + +6 +Hello, +14 + spam,eggs spam spam +0 + +''' + testcase = self + class MyRequest(http.Request): + def process(self): + # The tempfile API used to create content returns an + # instance of a different type depending on what platform + # we're running on. The point here is to verify that the + # request body is in a file that's on the filesystem. + # Having a fileno method that returns an int is a somewhat + # close approximation of this. -exarkun + testcase.assertIsInstance(self.content.fileno(), int) + testcase.assertEqual(self.method, 'GET') + testcase.assertEqual(self.path, '/') + content = self.content.read() + testcase.assertEqual(content, 'Hello, spam,eggs spam spam') + testcase.assertIdentical(self.channel._transferDecoder, None) + testcase.didRequest = 1 + self.finish() + + self.runRequest(httpRequest, MyRequest) + + + +class QueryArgumentsTestCase(unittest.TestCase): + def testUnquote(self): + try: + from twisted.protocols import _c_urlarg + except ImportError: + raise unittest.SkipTest("_c_urlarg module is not available") + # work exactly like urllib.unquote, including stupid things + # % followed by a non-hexdigit in the middle and in the end + self.failUnlessEqual(urllib.unquote("%notreally%n"), + _c_urlarg.unquote("%notreally%n")) + # % followed by hexdigit, followed by non-hexdigit + self.failUnlessEqual(urllib.unquote("%1quite%1"), + _c_urlarg.unquote("%1quite%1")) + # unquoted text, followed by some quoted chars, ends in a trailing % + self.failUnlessEqual(urllib.unquote("blah%21%40%23blah%"), + _c_urlarg.unquote("blah%21%40%23blah%")) + # Empty string + self.failUnlessEqual(urllib.unquote(""), _c_urlarg.unquote("")) + + def testParseqs(self): + self.failUnlessEqual(cgi.parse_qs("a=b&d=c;+=f"), + http.parse_qs("a=b&d=c;+=f")) + self.failUnlessRaises(ValueError, http.parse_qs, "blah", + strict_parsing = 1) + self.failUnlessEqual(cgi.parse_qs("a=&b=c", keep_blank_values = 1), + http.parse_qs("a=&b=c", keep_blank_values = 1)) + self.failUnlessEqual(cgi.parse_qs("a=&b=c"), + http.parse_qs("a=&b=c")) + + + def test_urlparse(self): + """ + For a given URL, L{http.urlparse} should behave the same as + L{urlparse}, except it should always return C{str}, never C{unicode}. + """ + def urls(): + for scheme in ('http', 'https'): + for host in ('example.com',): + for port in (None, 100): + for path in ('', 'path'): + if port is not None: + host = host + ':' + str(port) + yield urlunsplit((scheme, host, path, '', '')) + + + def assertSameParsing(url, decode): + """ + Verify that C{url} is parsed into the same objects by both + L{http.urlparse} and L{urlparse}. + """ + urlToStandardImplementation = url + if decode: + urlToStandardImplementation = url.decode('ascii') + standardResult = urlparse(urlToStandardImplementation) + scheme, netloc, path, params, query, fragment = http.urlparse(url) + self.assertEqual( + (scheme, netloc, path, params, query, fragment), + standardResult) + self.assertTrue(isinstance(scheme, str)) + self.assertTrue(isinstance(netloc, str)) + self.assertTrue(isinstance(path, str)) + self.assertTrue(isinstance(params, str)) + self.assertTrue(isinstance(query, str)) + self.assertTrue(isinstance(fragment, str)) + + # With caching, unicode then str + clear_cache() + for url in urls(): + assertSameParsing(url, True) + assertSameParsing(url, False) + + # With caching, str then unicode + clear_cache() + for url in urls(): + assertSameParsing(url, False) + assertSameParsing(url, True) + + # Without caching + for url in urls(): + clear_cache() + assertSameParsing(url, True) + clear_cache() + assertSameParsing(url, False) + + + def test_urlparseRejectsUnicode(self): + """ + L{http.urlparse} should reject unicode input early. + """ + self.assertRaises(TypeError, http.urlparse, u'http://example.org/path') + + + def testEscchar(self): + try: + from twisted.protocols import _c_urlarg + except ImportError: + raise unittest.SkipTest("_c_urlarg module is not available") + self.failUnlessEqual("!@#+b", + _c_urlarg.unquote("+21+40+23+b", "+")) + +class ClientDriver(http.HTTPClient): + def handleStatus(self, version, status, message): + self.version = version + self.status = status + self.message = message + +class ClientStatusParsing(unittest.TestCase): + def testBaseline(self): + c = ClientDriver() + c.lineReceived('HTTP/1.0 201 foo') + self.failUnlessEqual(c.version, 'HTTP/1.0') + self.failUnlessEqual(c.status, '201') + self.failUnlessEqual(c.message, 'foo') + + def testNoMessage(self): + c = ClientDriver() + c.lineReceived('HTTP/1.0 201') + self.failUnlessEqual(c.version, 'HTTP/1.0') + self.failUnlessEqual(c.status, '201') + self.failUnlessEqual(c.message, '') + + def testNoMessage_trailingSpace(self): + c = ClientDriver() + c.lineReceived('HTTP/1.0 201 ') + self.failUnlessEqual(c.version, 'HTTP/1.0') + self.failUnlessEqual(c.status, '201') + self.failUnlessEqual(c.message, '') + + + +class RequestTests(unittest.TestCase, ResponseTestMixin): + """ + Tests for L{http.Request} + """ + def _compatHeadersTest(self, oldName, newName): + """ + Verify that each of two different attributes which are associated with + the same state properly reflect changes made through the other. + + This is used to test that the C{headers}/C{responseHeaders} and + C{received_headers}/C{requestHeaders} pairs interact properly. + """ + req = http.Request(DummyChannel(), None) + getattr(req, newName).setRawHeaders("test", ["lemur"]) + self.assertEqual(getattr(req, oldName)["test"], "lemur") + setattr(req, oldName, {"foo": "bar"}) + self.assertEqual( + list(getattr(req, newName).getAllRawHeaders()), + [("Foo", ["bar"])]) + setattr(req, newName, http_headers.Headers()) + self.assertEqual(getattr(req, oldName), {}) + + + def test_received_headers(self): + """ + L{Request.received_headers} is a backwards compatible API which + accesses and allows mutation of the state at L{Request.requestHeaders}. + """ + self._compatHeadersTest('received_headers', 'requestHeaders') + + + def test_headers(self): + """ + L{Request.headers} is a backwards compatible API which accesses and + allows mutation of the state at L{Request.responseHeaders}. + """ + self._compatHeadersTest('headers', 'responseHeaders') + + + def test_getHeader(self): + """ + L{http.Request.getHeader} returns the value of the named request + header. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders("test", ["lemur"]) + self.assertEquals(req.getHeader("test"), "lemur") + + + def test_getHeaderReceivedMultiples(self): + """ + When there are multiple values for a single request header, + L{http.Request.getHeader} returns the last value. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders("test", ["lemur", "panda"]) + self.assertEquals(req.getHeader("test"), "panda") + + + def test_getHeaderNotFound(self): + """ + L{http.Request.getHeader} returns C{None} when asked for the value of a + request header which is not present. + """ + req = http.Request(DummyChannel(), None) + self.assertEquals(req.getHeader("test"), None) + + + def test_getAllHeaders(self): + """ + L{http.Request.getAllheaders} returns a C{dict} mapping all request + header names to their corresponding values. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders("test", ["lemur"]) + self.assertEquals(req.getAllHeaders(), {"test": "lemur"}) + + + def test_getAllHeadersNoHeaders(self): + """ + L{http.Request.getAllHeaders} returns an empty C{dict} if there are no + request headers. + """ + req = http.Request(DummyChannel(), None) + self.assertEquals(req.getAllHeaders(), {}) + + + def test_getAllHeadersMultipleHeaders(self): + """ + When there are multiple values for a single request header, + L{http.Request.getAllHeaders} returns only the last value. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders("test", ["lemur", "panda"]) + self.assertEquals(req.getAllHeaders(), {"test": "panda"}) + + + def test_setResponseCode(self): + """ + L{http.Request.setResponseCode} takes a status code and causes it to be + used as the response status. + """ + channel = DummyChannel() + req = http.Request(channel, None) + req.setResponseCode(201) + req.write('') + self.assertEqual( + channel.transport.written.getvalue().splitlines()[0], + '%s 201 Created' % (req.clientproto,)) + + + def test_setResponseCodeAndMessage(self): + """ + L{http.Request.setResponseCode} takes a status code and a message and + causes them to be used as the response status. + """ + channel = DummyChannel() + req = http.Request(channel, None) + req.setResponseCode(202, "happily accepted") + req.write('') + self.assertEqual( + channel.transport.written.getvalue().splitlines()[0], + '%s 202 happily accepted' % (req.clientproto,)) + + + def test_setResponseCodeAcceptsIntegers(self): + """ + L{http.Request.setResponseCode} accepts C{int} or C{long} for the code + parameter and raises L{TypeError} if passed anything else. + """ + req = http.Request(DummyChannel(), None) + req.setResponseCode(1) + req.setResponseCode(1L) + self.assertRaises(TypeError, req.setResponseCode, "1") + + + def test_setHost(self): + """ + L{http.Request.setHost} sets the value of the host request header. + """ + req = http.Request(DummyChannel(), None) + req.setHost("example.com", 443) + self.assertEqual( + req.requestHeaders.getRawHeaders("host"), ["example.com"]) + + + def test_setHeader(self): + """ + L{http.Request.setHeader} sets the value of the given response header. + """ + req = http.Request(DummyChannel(), None) + req.setHeader("test", "lemur") + self.assertEquals(req.responseHeaders.getRawHeaders("test"), ["lemur"]) + + + def test_firstWrite(self): + """ + For an HTTP 1.0 request, L{http.Request.write} sends an HTTP 1.0 + Response-Line and whatever response headers are set. + """ + req = http.Request(DummyChannel(), None) + trans = StringTransport() + + req.transport = trans + + req.setResponseCode(200) + req.clientproto = "HTTP/1.0" + req.responseHeaders.setRawHeaders("test", ["lemur"]) + req.write('Hello') + + self.assertResponseEquals( + trans.value(), + [("HTTP/1.0 200 OK", + "Test: lemur", + "Hello")]) + + + def test_firstWriteHTTP11Chunked(self): + """ + For an HTTP 1.1 request, L{http.Request.write} sends an HTTP 1.1 + Response-Line, whatever response headers are set, and uses chunked + encoding for the response body. + """ + req = http.Request(DummyChannel(), None) + trans = StringTransport() + + req.transport = trans + + req.setResponseCode(200) + req.clientproto = "HTTP/1.1" + req.responseHeaders.setRawHeaders("test", ["lemur"]) + req.write('Hello') + req.write('World!') + + self.assertResponseEquals( + trans.value(), + [("HTTP/1.1 200 OK", + "Test: lemur", + "Transfer-Encoding: chunked", + "5\r\nHello\r\n6\r\nWorld!\r\n")]) + + + def test_firstWriteLastModified(self): + """ + For an HTTP 1.0 request for a resource with a known last modified time, + L{http.Request.write} sends an HTTP Response-Line, whatever response + headers are set, and a last-modified header with that time. + """ + req = http.Request(DummyChannel(), None) + trans = StringTransport() + + req.transport = trans + + req.setResponseCode(200) + req.clientproto = "HTTP/1.0" + req.lastModified = 0 + req.responseHeaders.setRawHeaders("test", ["lemur"]) + req.write('Hello') + + self.assertResponseEquals( + trans.value(), + [("HTTP/1.0 200 OK", + "Test: lemur", + "Last-Modified: Thu, 01 Jan 1970 00:00:00 GMT", + "Hello")]) + + + def test_parseCookies(self): + """ + L{http.Request.parseCookies} extracts cookies from C{requestHeaders} + and adds them to C{received_cookies}. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders( + "cookie", ['test="lemur"; test2="panda"']) + req.parseCookies() + self.assertEquals(req.received_cookies, {"test": '"lemur"', + "test2": '"panda"'}) + + + def test_parseCookiesMultipleHeaders(self): + """ + L{http.Request.parseCookies} can extract cookies from multiple Cookie + headers. + """ + req = http.Request(DummyChannel(), None) + req.requestHeaders.setRawHeaders( + "cookie", ['test="lemur"', 'test2="panda"']) + req.parseCookies() + self.assertEquals(req.received_cookies, {"test": '"lemur"', + "test2": '"panda"'}) + + + def test_connectionLost(self): + """ + L{http.Request.connectionLost} closes L{Request.content} and drops the + reference to the L{HTTPChannel} to assist with garbage collection. + """ + req = http.Request(DummyChannel(), None) + + # Cause Request.content to be created at all. + req.gotLength(10) + + # Grab a reference to content in case the Request drops it later on. + content = req.content + + # Put some bytes into it + req.handleContentChunk("hello") + + # Then something goes wrong and content should get closed. + req.connectionLost(Failure(ConnectionLost("Finished"))) + self.assertTrue(content.closed) + self.assertIdentical(req.channel, None) + + + def test_registerProducerTwiceFails(self): + """ + Calling L{Request.registerProducer} when a producer is already + registered raises ValueError. + """ + req = http.Request(DummyChannel(), None) + req.registerProducer(DummyProducer(), True) + self.assertRaises( + ValueError, req.registerProducer, DummyProducer(), True) + + + def test_registerProducerWhenQueuedPausesPushProducer(self): + """ + Calling L{Request.registerProducer} with an IPushProducer when the + request is queued pauses the producer. + """ + req = http.Request(DummyChannel(), True) + producer = DummyProducer() + req.registerProducer(producer, True) + self.assertEquals(['pause'], producer.events) + + + def test_registerProducerWhenQueuedDoesntPausePullProducer(self): + """ + Calling L{Request.registerProducer} with an IPullProducer when the + request is queued does not pause the producer, because it doesn't make + sense to pause a pull producer. + """ + req = http.Request(DummyChannel(), True) + producer = DummyProducer() + req.registerProducer(producer, False) + self.assertEquals([], producer.events) + + + def test_registerProducerWhenQueuedDoesntRegisterPushProducer(self): + """ + Calling L{Request.registerProducer} with an IPushProducer when the + request is queued does not register the producer on the request's + transport. + """ + self.assertIdentical( + None, getattr(http.StringTransport, 'registerProducer', None), + "StringTransport cannot implement registerProducer for this test " + "to be valid.") + req = http.Request(DummyChannel(), True) + producer = DummyProducer() + req.registerProducer(producer, True) + # This is a roundabout assertion: http.StringTransport doesn't + # implement registerProducer, so Request.registerProducer can't have + # tried to call registerProducer on the transport. + self.assertIsInstance(req.transport, http.StringTransport) + + + def test_registerProducerWhenQueuedDoesntRegisterPullProducer(self): + """ + Calling L{Request.registerProducer} with an IPullProducer when the + request is queued does not register the producer on the request's + transport. + """ + self.assertIdentical( + None, getattr(http.StringTransport, 'registerProducer', None), + "StringTransport cannot implement registerProducer for this test " + "to be valid.") + req = http.Request(DummyChannel(), True) + producer = DummyProducer() + req.registerProducer(producer, False) + # This is a roundabout assertion: http.StringTransport doesn't + # implement registerProducer, so Request.registerProducer can't have + # tried to call registerProducer on the transport. + self.assertIsInstance(req.transport, http.StringTransport) + + + def test_registerProducerWhenNotQueuedRegistersPushProducer(self): + """ + Calling L{Request.registerProducer} with an IPushProducer when the + request is not queued registers the producer as a push producer on the + request's transport. + """ + req = http.Request(DummyChannel(), False) + producer = DummyProducer() + req.registerProducer(producer, True) + self.assertEquals([(producer, True)], req.transport.producers) + + + def test_registerProducerWhenNotQueuedRegistersPullProducer(self): + """ + Calling L{Request.registerProducer} with an IPullProducer when the + request is not queued registers the producer as a pull producer on the + request's transport. + """ + req = http.Request(DummyChannel(), False) + producer = DummyProducer() + req.registerProducer(producer, False) + self.assertEquals([(producer, False)], req.transport.producers) + + + def test_connectionLostNotification(self): + """ + L{Request.connectionLost} triggers all finish notification Deferreds + and cleans up per-request state. + """ + d = DummyChannel() + request = http.Request(d, True) + finished = request.notifyFinish() + request.connectionLost(Failure(ConnectionLost("Connection done"))) + self.assertIdentical(request.channel, None) + return self.assertFailure(finished, ConnectionLost) + + + def test_finishNotification(self): + """ + L{Request.finish} triggers all finish notification Deferreds. + """ + request = http.Request(DummyChannel(), False) + finished = request.notifyFinish() + # Force the request to have a non-None content attribute. This is + # probably a bug in Request. + request.gotLength(1) + request.finish() + return finished + + + def test_finishAfterConnectionLost(self): + """ + Calling L{Request.finish} after L{Request.connectionLost} has been + called results in a L{RuntimeError} being raised. + """ + channel = DummyChannel() + transport = channel.transport + req = http.Request(channel, False) + req.connectionLost(Failure(ConnectionLost("The end."))) + self.assertRaises(RuntimeError, req.finish) + + + +class MultilineHeadersTestCase(unittest.TestCase): + """ + Tests to exercise handling of multiline headers by L{HTTPClient}. RFCs 1945 + (HTTP 1.0) and 2616 (HTTP 1.1) state that HTTP message header fields can + span multiple lines if each extra line is preceded by at least one space or + horizontal tab. + """ + def setUp(self): + """ + Initialize variables used to verify that the header-processing functions + are getting called. + """ + self.handleHeaderCalled = False + self.handleEndHeadersCalled = False + + # Dictionary of sample complete HTTP header key/value pairs, including + # multiline headers. + expectedHeaders = {'Content-Length': '10', + 'X-Multiline' : 'line-0\tline-1', + 'X-Multiline2' : 'line-2 line-3'} + + def ourHandleHeader(self, key, val): + """ + Dummy implementation of L{HTTPClient.handleHeader}. + """ + self.handleHeaderCalled = True + self.assertEquals(val, self.expectedHeaders[key]) + + + def ourHandleEndHeaders(self): + """ + Dummy implementation of L{HTTPClient.handleEndHeaders}. + """ + self.handleEndHeadersCalled = True + + + def test_extractHeader(self): + """ + A header isn't processed by L{HTTPClient.extractHeader} until it is + confirmed in L{HTTPClient.lineReceived} that the header has been + received completely. + """ + c = ClientDriver() + c.handleHeader = self.ourHandleHeader + c.handleEndHeaders = self.ourHandleEndHeaders + + c.lineReceived('HTTP/1.0 201') + c.lineReceived('Content-Length: 10') + self.assertIdentical(c.length, None) + self.assertFalse(self.handleHeaderCalled) + self.assertFalse(self.handleEndHeadersCalled) + + # Signal end of headers. + c.lineReceived('') + self.assertTrue(self.handleHeaderCalled) + self.assertTrue(self.handleEndHeadersCalled) + + self.assertEquals(c.length, 10) + + + def test_noHeaders(self): + """ + An HTTP request with no headers will not cause any calls to + L{handleHeader} but will cause L{handleEndHeaders} to be called on + L{HTTPClient} subclasses. + """ + c = ClientDriver() + c.handleHeader = self.ourHandleHeader + c.handleEndHeaders = self.ourHandleEndHeaders + c.lineReceived('HTTP/1.0 201') + + # Signal end of headers. + c.lineReceived('') + self.assertFalse(self.handleHeaderCalled) + self.assertTrue(self.handleEndHeadersCalled) + + self.assertEquals(c.version, 'HTTP/1.0') + self.assertEquals(c.status, '201') + + + def test_multilineHeaders(self): + """ + L{HTTPClient} parses multiline headers by buffering header lines until + an empty line or a line that does not start with whitespace hits + lineReceived, confirming that the header has been received completely. + """ + c = ClientDriver() + c.handleHeader = self.ourHandleHeader + c.handleEndHeaders = self.ourHandleEndHeaders + + c.lineReceived('HTTP/1.0 201') + c.lineReceived('X-Multiline: line-0') + self.assertFalse(self.handleHeaderCalled) + # Start continuing line with a tab. + c.lineReceived('\tline-1') + c.lineReceived('X-Multiline2: line-2') + # The previous header must be complete, so now it can be processed. + self.assertTrue(self.handleHeaderCalled) + # Start continuing line with a space. + c.lineReceived(' line-3') + c.lineReceived('Content-Length: 10') + + # Signal end of headers. + c.lineReceived('') + self.assertTrue(self.handleEndHeadersCalled) + + self.assertEquals(c.version, 'HTTP/1.0') + self.assertEquals(c.status, '201') + self.assertEquals(c.length, 10) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_http_headers.py b/vendor/Twisted-10.0.0/twisted/web/test/test_http_headers.py new file mode 100644 index 000000000000..3bdb26f19a38 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_http_headers.py @@ -0,0 +1,585 @@ +# Copyright (c) 2008-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.http_headers}. +""" + +import sys + +from twisted.python.compat import set +from twisted.trial.unittest import TestCase +from twisted.web.http_headers import _DictHeaders, Headers + + +class HeadersTests(TestCase): + """ + Tests for L{Headers}. + """ + def test_initializer(self): + """ + The header values passed to L{Headers.__init__} can be retrieved via + L{Headers.getRawHeaders}. + """ + h = Headers({'Foo': ['bar']}) + self.assertEqual(h.getRawHeaders('foo'), ['bar']) + + + def test_setRawHeaders(self): + """ + L{Headers.setRawHeaders} sets the header values for the given + header name to the sequence of string values. + """ + rawValue = ["value1", "value2"] + h = Headers() + h.setRawHeaders("test", rawValue) + self.assertTrue(h.hasHeader("test")) + self.assertTrue(h.hasHeader("Test")) + self.assertEqual(h.getRawHeaders("test"), rawValue) + + + def test_addRawHeader(self): + """ + L{Headers.addRawHeader} adds a new value for a given header. + """ + h = Headers() + h.addRawHeader("test", "lemur") + self.assertEqual(h.getRawHeaders("test"), ["lemur"]) + h.addRawHeader("test", "panda") + self.assertEqual(h.getRawHeaders("test"), ["lemur", "panda"]) + + + def test_getRawHeadersNoDefault(self): + """ + L{Headers.getRawHeaders} returns C{None} if the header is not found and + no default is specified. + """ + self.assertIdentical(Headers().getRawHeaders("test"), None) + + + def test_getRawHeadersDefaultValue(self): + """ + L{Headers.getRawHeaders} returns the specified default value when no + header is found. + """ + h = Headers() + default = object() + self.assertIdentical(h.getRawHeaders("test", default), default) + + + def test_getRawHeaders(self): + """ + L{Headers.getRawHeaders} returns the values which have been set for a + given header. + """ + h = Headers() + h.setRawHeaders("test", ["lemur"]) + self.assertEqual(h.getRawHeaders("test"), ["lemur"]) + self.assertEqual(h.getRawHeaders("Test"), ["lemur"]) + + + def test_hasHeaderTrue(self): + """ + Check that L{Headers.hasHeader} returns C{True} when the given header + is found. + """ + h = Headers() + h.setRawHeaders("test", ["lemur"]) + self.assertTrue(h.hasHeader("test")) + self.assertTrue(h.hasHeader("Test")) + + + def test_hasHeaderFalse(self): + """ + L{Headers.hasHeader} returns C{False} when the given header is not + found. + """ + self.assertFalse(Headers().hasHeader("test")) + + + def test_removeHeader(self): + """ + Check that L{Headers.removeHeader} removes the given header. + """ + h = Headers() + + h.setRawHeaders("foo", ["lemur"]) + self.assertTrue(h.hasHeader("foo")) + h.removeHeader("foo") + self.assertFalse(h.hasHeader("foo")) + + h.setRawHeaders("bar", ["panda"]) + self.assertTrue(h.hasHeader("bar")) + h.removeHeader("Bar") + self.assertFalse(h.hasHeader("bar")) + + + def test_removeHeaderDoesntExist(self): + """ + L{Headers.removeHeader} is a no-operation when the specified header is + not found. + """ + h = Headers() + h.removeHeader("test") + self.assertEqual(list(h.getAllRawHeaders()), []) + + + def test_canonicalNameCaps(self): + """ + L{Headers._canonicalNameCaps} returns the canonical capitalization for + the given header. + """ + h = Headers() + self.assertEqual(h._canonicalNameCaps("test"), "Test") + self.assertEqual(h._canonicalNameCaps("test-stuff"), "Test-Stuff") + self.assertEqual(h._canonicalNameCaps("www-authenticate"), + "WWW-Authenticate") + + + def test_getAllRawHeaders(self): + """ + L{Headers.getAllRawHeaders} returns an iterable of (k, v) pairs, where + C{k} is the canonicalized representation of the header name, and C{v} + is a sequence of values. + """ + h = Headers() + h.setRawHeaders("test", ["lemurs"]) + h.setRawHeaders("www-authenticate", ["basic aksljdlk="]) + + allHeaders = set([(k, tuple(v)) for k, v in h.getAllRawHeaders()]) + + self.assertEqual(allHeaders, + set([("WWW-Authenticate", ("basic aksljdlk=",)), + ("Test", ("lemurs",))])) + + + def test_headersComparison(self): + """ + A L{Headers} instance compares equal to itself and to another + L{Headers} instance with the same values. + """ + first = Headers() + first.setRawHeaders("foo", ["panda"]) + second = Headers() + second.setRawHeaders("foo", ["panda"]) + third = Headers() + third.setRawHeaders("foo", ["lemur", "panda"]) + self.assertEqual(first, first) + self.assertEqual(first, second) + self.assertNotEqual(first, third) + + + def test_otherComparison(self): + """ + An instance of L{Headers} does not compare equal to other unrelated + objects. + """ + h = Headers() + self.assertNotEqual(h, ()) + self.assertNotEqual(h, object()) + self.assertNotEqual(h, "foo") + + + def test_repr(self): + """ + The L{repr} of a L{Headers} instance shows the names and values of all + the headers it contains. + """ + self.assertEqual( + repr(Headers({"foo": ["bar", "baz"]})), + "Headers({'foo': ['bar', 'baz']})") + + + def test_subclassRepr(self): + """ + The L{repr} of an instance of a subclass of L{Headers} uses the name + of the subclass instead of the string C{"Headers"}. + """ + class FunnyHeaders(Headers): + pass + self.assertEqual( + repr(FunnyHeaders({"foo": ["bar", "baz"]})), + "FunnyHeaders({'foo': ['bar', 'baz']})") + + + +class HeaderDictTests(TestCase): + """ + Tests for the backwards compatible C{dict} interface for L{Headers} + provided by L{_DictHeaders}. + """ + def headers(self, **kw): + """ + Create a L{Headers} instance populated with the header name/values + specified by C{kw} and a L{_DictHeaders} wrapped around it and return + them both. + """ + h = Headers() + for k, v in kw.iteritems(): + h.setRawHeaders(k, v) + return h, _DictHeaders(h) + + + def test_getItem(self): + """ + L{_DictHeaders.__getitem__} returns a single header for the given name. + """ + headers, wrapper = self.headers(test=["lemur"]) + self.assertEqual(wrapper["test"], "lemur") + + + def test_getItemMultiple(self): + """ + L{_DictHeaders.__getitem__} returns only the last header value for a + given name. + """ + headers, wrapper = self.headers(test=["lemur", "panda"]) + self.assertEqual(wrapper["test"], "panda") + + + def test_getItemMissing(self): + """ + L{_DictHeaders.__getitem__} raises L{KeyError} if called with a header + which is not present. + """ + headers, wrapper = self.headers() + exc = self.assertRaises(KeyError, wrapper.__getitem__, "test") + self.assertEqual(exc.args, ("test",)) + + + def test_iteration(self): + """ + L{_DictHeaders.__iter__} returns an iterator the elements of which + are the lowercase name of each header present. + """ + headers, wrapper = self.headers(foo=["lemur", "panda"], bar=["baz"]) + self.assertEqual(set(list(wrapper)), set(["foo", "bar"])) + + + def test_length(self): + """ + L{_DictHeaders.__len__} returns the number of headers present. + """ + headers, wrapper = self.headers() + self.assertEqual(len(wrapper), 0) + headers.setRawHeaders("foo", ["bar"]) + self.assertEqual(len(wrapper), 1) + headers.setRawHeaders("test", ["lemur", "panda"]) + self.assertEqual(len(wrapper), 2) + + + def test_setItem(self): + """ + L{_DictHeaders.__setitem__} sets a single header value for the given + name. + """ + headers, wrapper = self.headers() + wrapper["test"] = "lemur" + self.assertEqual(headers.getRawHeaders("test"), ["lemur"]) + + + def test_setItemOverwrites(self): + """ + L{_DictHeaders.__setitem__} will replace any previous header values for + the given name. + """ + headers, wrapper = self.headers(test=["lemur", "panda"]) + wrapper["test"] = "lemur" + self.assertEqual(headers.getRawHeaders("test"), ["lemur"]) + + + def test_delItem(self): + """ + L{_DictHeaders.__delitem__} will remove the header values for the given + name. + """ + headers, wrapper = self.headers(test=["lemur"]) + del wrapper["test"] + self.assertFalse(headers.hasHeader("test")) + + + def test_delItemMissing(self): + """ + L{_DictHeaders.__delitem__} will raise L{KeyError} if the given name is + not present. + """ + headers, wrapper = self.headers() + exc = self.assertRaises(KeyError, wrapper.__delitem__, "test") + self.assertEqual(exc.args, ("test",)) + + + def test_keys(self, _method='keys', _requireList=True): + """ + L{_DictHeaders.keys} will return a list of all present header names. + """ + headers, wrapper = self.headers(test=["lemur"], foo=["bar"]) + keys = getattr(wrapper, _method)() + if _requireList: + self.assertIsInstance(keys, list) + self.assertEqual(set(keys), set(["foo", "test"])) + + + def test_iterkeys(self): + """ + L{_DictHeaders.iterkeys} will return all present header names. + """ + self.test_keys('iterkeys', False) + + + def test_values(self, _method='values', _requireList=True): + """ + L{_DictHeaders.values} will return a list of all present header values, + returning only the last value for headers with more than one. + """ + headers, wrapper = self.headers(foo=["lemur"], bar=["marmot", "panda"]) + values = getattr(wrapper, _method)() + if _requireList: + self.assertIsInstance(values, list) + self.assertEqual(set(values), set(["lemur", "panda"])) + + + def test_itervalues(self): + """ + L{_DictHeaders.itervalues} will return all present header values, + returning only the last value for headers with more than one. + """ + self.test_values('itervalues', False) + + + def test_items(self, _method='items', _requireList=True): + """ + L{_DictHeaders.items} will return a list of all present header names + and values as tuples, returning only the last value for headers with + more than one. + """ + headers, wrapper = self.headers(foo=["lemur"], bar=["marmot", "panda"]) + items = getattr(wrapper, _method)() + if _requireList: + self.assertIsInstance(items, list) + self.assertEqual(set(items), set([("foo", "lemur"), ("bar", "panda")])) + + + def test_iteritems(self): + """ + L{_DictHeaders.iteritems} will return all present header names and + values as tuples, returning only the last value for headers with more + than one. + """ + self.test_items('iteritems', False) + + + def test_clear(self): + """ + L{_DictHeaders.clear} will remove all headers. + """ + headers, wrapper = self.headers(foo=["lemur"], bar=["panda"]) + wrapper.clear() + self.assertEqual(list(headers.getAllRawHeaders()), []) + + + def test_copy(self): + """ + L{_DictHeaders.copy} will return a C{dict} with all the same headers + and the last value for each. + """ + headers, wrapper = self.headers(foo=["lemur", "panda"], bar=["marmot"]) + duplicate = wrapper.copy() + self.assertEqual(duplicate, {"foo": "panda", "bar": "marmot"}) + + + def test_get(self): + """ + L{_DictHeaders.get} returns the last value for the given header name. + """ + headers, wrapper = self.headers(foo=["lemur", "panda"]) + self.assertEqual(wrapper.get("foo"), "panda") + + + def test_getMissing(self): + """ + L{_DictHeaders.get} returns C{None} for a header which is not present. + """ + headers, wrapper = self.headers() + self.assertIdentical(wrapper.get("foo"), None) + + + def test_getDefault(self): + """ + L{_DictHeaders.get} returns the last value for the given header name + even when it is invoked with a default value. + """ + headers, wrapper = self.headers(foo=["lemur"]) + self.assertEqual(wrapper.get("foo", "bar"), "lemur") + + + def test_getDefaultMissing(self): + """ + L{_DictHeaders.get} returns the default value specified if asked for a + header which is not present. + """ + headers, wrapper = self.headers() + self.assertEqual(wrapper.get("foo", "bar"), "bar") + + + def test_has_key(self): + """ + L{_DictHeaders.has_key} returns C{True} if the given header is present, + C{False} otherwise. + """ + headers, wrapper = self.headers(foo=["lemur"]) + self.assertTrue(wrapper.has_key("foo")) + self.assertFalse(wrapper.has_key("bar")) + + + def test_contains(self): + """ + L{_DictHeaders.__contains__} returns C{True} if the given header is + present, C{False} otherwise. + """ + headers, wrapper = self.headers(foo=["lemur"]) + self.assertIn("foo", wrapper) + self.assertNotIn("bar", wrapper) + + + def test_pop(self): + """ + L{_DictHeaders.pop} returns the last header value associated with the + given header name and removes the header. + """ + headers, wrapper = self.headers(foo=["lemur", "panda"]) + self.assertEqual(wrapper.pop("foo"), "panda") + self.assertIdentical(headers.getRawHeaders("foo"), None) + + + def test_popMissing(self): + """ + L{_DictHeaders.pop} raises L{KeyError} if passed a header name which is + not present. + """ + headers, wrapper = self.headers() + self.assertRaises(KeyError, wrapper.pop, "foo") + + + def test_popDefault(self): + """ + L{_DictHeaders.pop} returns the last header value associated with the + given header name and removes the header, even if it is supplied with a + default value. + """ + headers, wrapper = self.headers(foo=["lemur"]) + self.assertEqual(wrapper.pop("foo", "bar"), "lemur") + self.assertIdentical(headers.getRawHeaders("foo"), None) + + + def test_popDefaultMissing(self): + """ + L{_DictHeaders.pop} returns the default value is asked for a header + name which is not present. + """ + headers, wrapper = self.headers(foo=["lemur"]) + self.assertEqual(wrapper.pop("bar", "baz"), "baz") + self.assertEqual(headers.getRawHeaders("foo"), ["lemur"]) + + + def test_popitem(self): + """ + L{_DictHeaders.popitem} returns some header name/value pair. + """ + headers, wrapper = self.headers(foo=["lemur", "panda"]) + self.assertEqual(wrapper.popitem(), ("foo", "panda")) + self.assertIdentical(headers.getRawHeaders("foo"), None) + + + def test_popitemEmpty(self): + """ + L{_DictHeaders.popitem} raises L{KeyError} if there are no headers + present. + """ + headers, wrapper = self.headers() + self.assertRaises(KeyError, wrapper.popitem) + + + def test_update(self): + """ + L{_DictHeaders.update} adds the header/value pairs in the C{dict} it is + passed, overriding any existing values for those headers. + """ + headers, wrapper = self.headers(foo=["lemur"]) + wrapper.update({"foo": "panda", "bar": "marmot"}) + self.assertEqual(headers.getRawHeaders("foo"), ["panda"]) + self.assertEqual(headers.getRawHeaders("bar"), ["marmot"]) + + + def test_updateWithKeywords(self): + """ + L{_DictHeaders.update} adds header names given as keyword arguments + with the keyword values as the header value. + """ + headers, wrapper = self.headers(foo=["lemur"]) + wrapper.update(foo="panda", bar="marmot") + self.assertEqual(headers.getRawHeaders("foo"), ["panda"]) + self.assertEqual(headers.getRawHeaders("bar"), ["marmot"]) + + if sys.version_info < (2, 4): + test_updateWithKeywords.skip = ( + "Python 2.3 does not support keyword arguments to dict.update.") + + + def test_setdefaultMissing(self): + """ + If passed the name of a header which is not present, + L{_DictHeaders.setdefault} sets the value of the given header to the + specified default value and returns it. + """ + headers, wrapper = self.headers(foo=["bar"]) + self.assertEqual(wrapper.setdefault("baz", "quux"), "quux") + self.assertEqual(headers.getRawHeaders("foo"), ["bar"]) + self.assertEqual(headers.getRawHeaders("baz"), ["quux"]) + + + def test_setdefaultPresent(self): + """ + If passed the name of a header which is present, + L{_DictHeaders.setdefault} makes no changes to the headers and + returns the last value already associated with that header. + """ + headers, wrapper = self.headers(foo=["bar", "baz"]) + self.assertEqual(wrapper.setdefault("foo", "quux"), "baz") + self.assertEqual(headers.getRawHeaders("foo"), ["bar", "baz"]) + + + def test_setdefaultDefault(self): + """ + If a value is not passed to L{_DictHeaders.setdefault}, C{None} is + used. + """ + # This results in an invalid state for the headers, but maybe some + # application is doing this an intermediate step towards some other + # state. Anyway, it was broken with the old implementation so it's + # broken with the new implementation. Compatibility, for the win. + # -exarkun + headers, wrapper = self.headers() + self.assertIdentical(wrapper.setdefault("foo"), None) + self.assertEqual(headers.getRawHeaders("foo"), [None]) + + + def test_dictComparison(self): + """ + An instance of L{_DictHeaders} compares equal to a C{dict} which + contains the same header/value pairs. For header names with multiple + values, the last value only is considered. + """ + headers, wrapper = self.headers(foo=["lemur"], bar=["panda", "marmot"]) + self.assertNotEqual(wrapper, {"foo": "lemur", "bar": "panda"}) + self.assertEqual(wrapper, {"foo": "lemur", "bar": "marmot"}) + + + def test_otherComparison(self): + """ + An instance of L{_DictHeaders} does not compare equal to other + unrelated objects. + """ + headers, wrapper = self.headers() + self.assertNotEqual(wrapper, ()) + self.assertNotEqual(wrapper, object()) + self.assertNotEqual(wrapper, "foo") diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_httpauth.py b/vendor/Twisted-10.0.0/twisted/web/test/test_httpauth.py new file mode 100644 index 000000000000..03003fa8eec4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_httpauth.py @@ -0,0 +1,586 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web._auth}. +""" + + +from zope.interface import implements +from zope.interface.verify import verifyObject + +from twisted.trial import unittest + +from twisted.internet.address import IPv4Address + +from twisted.cred import error, portal +from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse +from twisted.cred.checkers import ANONYMOUS, AllowAnonymousAccess +from twisted.cred.credentials import IUsernamePassword + +from twisted.web.iweb import ICredentialFactory +from twisted.web.resource import IResource, Resource, getChildForRequest +from twisted.web._auth import basic, digest +from twisted.web._auth.wrapper import HTTPAuthSessionWrapper, UnauthorizedResource +from twisted.web._auth.basic import BasicCredentialFactory + +from twisted.web.server import NOT_DONE_YET +from twisted.web.static import Data + +from twisted.web.test.test_web import DummyRequest + + +def b64encode(s): + return s.encode('base64').strip() + + +class BasicAuthTestsMixin: + """ + L{TestCase} mixin class which defines a number of tests for + L{basic.BasicCredentialFactory}. Because this mixin defines C{setUp}, it + must be inherited before L{TestCase}. + """ + def setUp(self): + self.request = self.makeRequest() + self.realm = 'foo' + self.username = 'dreid' + self.password = 'S3CuR1Ty' + self.credentialFactory = basic.BasicCredentialFactory(self.realm) + + + def makeRequest(self, method='GET', clientAddress=None): + """ + Create a request object to be passed to + L{basic.BasicCredentialFactory.decode} along with a response value. + Override this in a subclass. + """ + raise NotImplementedError("%r did not implement makeRequest" % ( + self.__class__,)) + + + def test_interface(self): + """ + L{BasicCredentialFactory} implements L{ICredentialFactory}. + """ + self.assertTrue( + verifyObject(ICredentialFactory, self.credentialFactory)) + + + def test_usernamePassword(self): + """ + L{basic.BasicCredentialFactory.decode} turns a base64-encoded response + into a L{UsernamePassword} object with a password which reflects the + one which was encoded in the response. + """ + response = b64encode('%s:%s' % (self.username, self.password)) + + creds = self.credentialFactory.decode(response, self.request) + self.assertTrue(IUsernamePassword.providedBy(creds)) + self.assertTrue(creds.checkPassword(self.password)) + self.assertFalse(creds.checkPassword(self.password + 'wrong')) + + + def test_incorrectPadding(self): + """ + L{basic.BasicCredentialFactory.decode} decodes a base64-encoded + response with incorrect padding. + """ + response = b64encode('%s:%s' % (self.username, self.password)) + response = response.strip('=') + + creds = self.credentialFactory.decode(response, self.request) + self.assertTrue(verifyObject(IUsernamePassword, creds)) + self.assertTrue(creds.checkPassword(self.password)) + + + def test_invalidEncoding(self): + """ + L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} if passed + a response which is not base64-encoded. + """ + response = 'x' # one byte cannot be valid base64 text + self.assertRaises( + error.LoginFailed, + self.credentialFactory.decode, response, self.makeRequest()) + + + def test_invalidCredentials(self): + """ + L{basic.BasicCredentialFactory.decode} raises L{LoginFailed} when + passed a response which is not valid base64-encoded text. + """ + response = b64encode('123abc+/') + self.assertRaises( + error.LoginFailed, + self.credentialFactory.decode, + response, self.makeRequest()) + + +class RequestMixin: + def makeRequest(self, method='GET', clientAddress=None): + """ + Create a L{DummyRequest} (change me to create a + L{twisted.web.http.Request} instead). + """ + request = DummyRequest('/') + request.method = method + request.client = clientAddress + return request + + + +class BasicAuthTestCase(RequestMixin, BasicAuthTestsMixin, unittest.TestCase): + """ + Basic authentication tests which use L{twisted.web.http.Request}. + """ + + + +class DigestAuthTestCase(RequestMixin, unittest.TestCase): + """ + Digest authentication tests which use L{twisted.web.http.Request}. + """ + + def setUp(self): + """ + Create a DigestCredentialFactory for testing + """ + self.realm = "test realm" + self.algorithm = "md5" + self.credentialFactory = digest.DigestCredentialFactory( + self.algorithm, self.realm) + self.request = self.makeRequest() + + + def test_decode(self): + """ + L{digest.DigestCredentialFactory.decode} calls the C{decode} method on + L{twisted.cred.digest.DigestCredentialFactory} with the HTTP method and + host of the request. + """ + host = '169.254.0.1' + method = 'GET' + done = [False] + response = object() + def check(_response, _method, _host): + self.assertEqual(response, _response) + self.assertEqual(method, _method) + self.assertEqual(host, _host) + done[0] = True + + self.patch(self.credentialFactory.digest, 'decode', check) + req = self.makeRequest(method, IPv4Address('TCP', host, 81)) + self.credentialFactory.decode(response, req) + self.assertTrue(done[0]) + + + def test_interface(self): + """ + L{DigestCredentialFactory} implements L{ICredentialFactory}. + """ + self.assertTrue( + verifyObject(ICredentialFactory, self.credentialFactory)) + + + def test_getChallenge(self): + """ + The challenge issued by L{DigestCredentialFactory.getChallenge} must + include C{'qop'}, C{'realm'}, C{'algorithm'}, C{'nonce'}, and + C{'opaque'} keys. The values for the C{'realm'} and C{'algorithm'} + keys must match the values supplied to the factory's initializer. + None of the values may have newlines in them. + """ + challenge = self.credentialFactory.getChallenge(self.request) + self.assertEquals(challenge['qop'], 'auth') + self.assertEquals(challenge['realm'], 'test realm') + self.assertEquals(challenge['algorithm'], 'md5') + self.assertIn('nonce', challenge) + self.assertIn('opaque', challenge) + for v in challenge.values(): + self.assertNotIn('\n', v) + + + def test_getChallengeWithoutClientIP(self): + """ + L{DigestCredentialFactory.getChallenge} can issue a challenge even if + the L{Request} it is passed returns C{None} from C{getClientIP}. + """ + request = self.makeRequest('GET', None) + challenge = self.credentialFactory.getChallenge(request) + self.assertEqual(challenge['qop'], 'auth') + self.assertEqual(challenge['realm'], 'test realm') + self.assertEqual(challenge['algorithm'], 'md5') + self.assertIn('nonce', challenge) + self.assertIn('opaque', challenge) + + + +class UnauthorizedResourceTests(unittest.TestCase): + """ + Tests for L{UnauthorizedResource}. + """ + def test_getChildWithDefault(self): + """ + An L{UnauthorizedResource} is every child of itself. + """ + resource = UnauthorizedResource([]) + self.assertIdentical( + resource.getChildWithDefault("foo", None), resource) + self.assertIdentical( + resource.getChildWithDefault("bar", None), resource) + + + def test_render(self): + """ + L{UnauthorizedResource} renders with a 401 response code and a + I{WWW-Authenticate} header and puts a simple unauthorized message + into the response body. + """ + resource = UnauthorizedResource([ + BasicCredentialFactory('example.com')]) + request = DummyRequest(['']) + request.render(resource) + self.assertEqual(request.responseCode, 401) + self.assertEqual( + request.responseHeaders.getRawHeaders('www-authenticate'), + ['basic realm="example.com"']) + self.assertEqual(request.written, ['Unauthorized']) + + + def test_renderQuotesRealm(self): + """ + The realm value included in the I{WWW-Authenticate} header set in + the response when L{UnauthorizedResounrce} is rendered has quotes + and backslashes escaped. + """ + resource = UnauthorizedResource([ + BasicCredentialFactory('example\\"foo')]) + request = DummyRequest(['']) + request.render(resource) + self.assertEqual( + request.responseHeaders.getRawHeaders('www-authenticate'), + ['basic realm="example\\\\\\"foo"']) + + + +class Realm(object): + """ + A simple L{IRealm} implementation which gives out L{WebAvatar} for any + avatarId. + + @type loggedIn: C{int} + @ivar loggedIn: The number of times C{requestAvatar} has been invoked for + L{IResource}. + + @type loggedOut: C{int} + @ivar loggedOut: The number of times the logout callback has been invoked. + """ + implements(portal.IRealm) + + def __init__(self, avatarFactory): + self.loggedOut = 0 + self.loggedIn = 0 + self.avatarFactory = avatarFactory + + + def requestAvatar(self, avatarId, mind, *interfaces): + if IResource in interfaces: + self.loggedIn += 1 + return IResource, self.avatarFactory(avatarId), self.logout + raise NotImplementedError() + + + def logout(self): + self.loggedOut += 1 + + + +class HTTPAuthHeaderTests(unittest.TestCase): + """ + Tests for L{HTTPAuthSessionWrapper}. + """ + makeRequest = DummyRequest + + def setUp(self): + """ + Create a realm, portal, and L{HTTPAuthSessionWrapper} to use in the tests. + """ + self.username = 'foo bar' + self.password = 'bar baz' + self.avatarContent = "contents of the avatar resource itself" + self.childName = "foo-child" + self.childContent = "contents of the foo child of the avatar" + self.checker = InMemoryUsernamePasswordDatabaseDontUse() + self.checker.addUser(self.username, self.password) + self.avatar = Data(self.avatarContent, 'text/plain') + self.avatar.putChild( + self.childName, Data(self.childContent, 'text/plain')) + self.avatars = {self.username: self.avatar} + self.realm = Realm(self.avatars.get) + self.portal = portal.Portal(self.realm, [self.checker]) + self.credentialFactories = [] + self.wrapper = HTTPAuthSessionWrapper( + self.portal, self.credentialFactories) + + + def _authorizedBasicLogin(self, request): + """ + Add an I{basic authorization} header to the given request and then + dispatch it, starting from C{self.wrapper} and returning the resulting + L{IResource}. + """ + authorization = b64encode(self.username + ':' + self.password) + request.headers['authorization'] = 'Basic ' + authorization + return getChildForRequest(self.wrapper, request) + + + def test_getChildWithDefault(self): + """ + Resource traversal which encounters an L{HTTPAuthSessionWrapper} + results in an L{UnauthorizedResource} instance when the request does + not have the required I{Authorization} headers. + """ + request = self.makeRequest([self.childName]) + child = getChildForRequest(self.wrapper, request) + d = request.notifyFinish() + def cbFinished(result): + self.assertEquals(request.responseCode, 401) + d.addCallback(cbFinished) + request.render(child) + return d + + + def _invalidAuthorizationTest(self, response): + """ + Create a request with the given value as the value of an + I{Authorization} header and perform resource traversal with it, + starting at C{self.wrapper}. Assert that the result is a 401 response + code. Return a L{Deferred} which fires when this is all done. + """ + self.credentialFactories.append(BasicCredentialFactory('example.com')) + request = self.makeRequest([self.childName]) + request.headers['authorization'] = response + child = getChildForRequest(self.wrapper, request) + d = request.notifyFinish() + def cbFinished(result): + self.assertEqual(request.responseCode, 401) + d.addCallback(cbFinished) + request.render(child) + return d + + + def test_getChildWithDefaultUnauthorizedUser(self): + """ + Resource traversal which enouncters an L{HTTPAuthSessionWrapper} + results in an L{UnauthorizedResource} when the request has an + I{Authorization} header with a user which does not exist. + """ + return self._invalidAuthorizationTest('Basic ' + b64encode('foo:bar')) + + + def test_getChildWithDefaultUnauthorizedPassword(self): + """ + Resource traversal which enouncters an L{HTTPAuthSessionWrapper} + results in an L{UnauthorizedResource} when the request has an + I{Authorization} header with a user which exists and the wrong + password. + """ + return self._invalidAuthorizationTest( + 'Basic ' + b64encode(self.username + ':bar')) + + + def test_getChildWithDefaultUnrecognizedScheme(self): + """ + Resource traversal which enouncters an L{HTTPAuthSessionWrapper} + results in an L{UnauthorizedResource} when the request has an + I{Authorization} header with an unrecognized scheme. + """ + return self._invalidAuthorizationTest('Quux foo bar baz') + + + def test_getChildWithDefaultAuthorized(self): + """ + Resource traversal which encounters an L{HTTPAuthSessionWrapper} + results in an L{IResource} which renders the L{IResource} avatar + retrieved from the portal when the request has a valid I{Authorization} + header. + """ + self.credentialFactories.append(BasicCredentialFactory('example.com')) + request = self.makeRequest([self.childName]) + child = self._authorizedBasicLogin(request) + d = request.notifyFinish() + def cbFinished(ignored): + self.assertEquals(request.written, [self.childContent]) + d.addCallback(cbFinished) + request.render(child) + return d + + + def test_renderAuthorized(self): + """ + Resource traversal which terminates at an L{HTTPAuthSessionWrapper} + and includes correct authentication headers results in the + L{IResource} avatar (not one of its children) retrieved from the + portal being rendered. + """ + self.credentialFactories.append(BasicCredentialFactory('example.com')) + # Request it exactly, not any of its children. + request = self.makeRequest([]) + child = self._authorizedBasicLogin(request) + d = request.notifyFinish() + def cbFinished(ignored): + self.assertEquals(request.written, [self.avatarContent]) + d.addCallback(cbFinished) + request.render(child) + return d + + + def test_getChallengeCalledWithRequest(self): + """ + When L{HTTPAuthSessionWrapper} finds an L{ICredentialFactory} to issue + a challenge, it calls the C{getChallenge} method with the request as an + argument. + """ + class DumbCredentialFactory(object): + implements(ICredentialFactory) + scheme = 'dumb' + + def __init__(self): + self.requests = [] + + def getChallenge(self, request): + self.requests.append(request) + return {} + + factory = DumbCredentialFactory() + self.credentialFactories.append(factory) + request = self.makeRequest([self.childName]) + child = getChildForRequest(self.wrapper, request) + d = request.notifyFinish() + def cbFinished(ignored): + self.assertEqual(factory.requests, [request]) + d.addCallback(cbFinished) + request.render(child) + return d + + + def test_logout(self): + """ + The realm's logout callback is invoked after the resource is rendered. + """ + self.credentialFactories.append(BasicCredentialFactory('example.com')) + + class SlowerResource(Resource): + def render(self, request): + return NOT_DONE_YET + + self.avatar.putChild(self.childName, SlowerResource()) + request = self.makeRequest([self.childName]) + child = self._authorizedBasicLogin(request) + request.render(child) + self.assertEqual(self.realm.loggedOut, 0) + request.finish() + self.assertEqual(self.realm.loggedOut, 1) + + + def test_decodeRaises(self): + """ + Resource traversal which enouncters an L{HTTPAuthSessionWrapper} + results in an L{UnauthorizedResource} when the request has a I{Basic + Authorization} header which cannot be decoded using base64. + """ + self.credentialFactories.append(BasicCredentialFactory('example.com')) + request = self.makeRequest([self.childName]) + request.headers['authorization'] = 'Basic decode should fail' + child = getChildForRequest(self.wrapper, request) + self.assertIsInstance(child, UnauthorizedResource) + + + def test_selectParseResponse(self): + """ + L{HTTPAuthSessionWrapper._selectParseHeader} returns a two-tuple giving + the L{ICredentialFactory} to use to parse the header and a string + containing the portion of the header which remains to be parsed. + """ + basicAuthorization = 'Basic abcdef123456' + self.assertEqual( + self.wrapper._selectParseHeader(basicAuthorization), + (None, None)) + factory = BasicCredentialFactory('example.com') + self.credentialFactories.append(factory) + self.assertEqual( + self.wrapper._selectParseHeader(basicAuthorization), + (factory, 'abcdef123456')) + + + def test_unexpectedDecodeError(self): + """ + Any unexpected exception raised by the credential factory's C{decode} + method results in a 500 response code and causes the exception to be + logged. + """ + class UnexpectedException(Exception): + pass + + class BadFactory(object): + scheme = 'bad' + + def getChallenge(self, client): + return {} + + def decode(self, response, request): + raise UnexpectedException() + + self.credentialFactories.append(BadFactory()) + request = self.makeRequest([self.childName]) + request.headers['authorization'] = 'Bad abc' + child = getChildForRequest(self.wrapper, request) + request.render(child) + self.assertEqual(request.responseCode, 500) + self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1) + + + def test_unexpectedLoginError(self): + """ + Any unexpected failure from L{Portal.login} results in a 500 response + code and causes the failure to be logged. + """ + class UnexpectedException(Exception): + pass + + class BrokenChecker(object): + credentialInterfaces = (IUsernamePassword,) + + def requestAvatarId(self, credentials): + raise UnexpectedException() + + self.portal.registerChecker(BrokenChecker()) + self.credentialFactories.append(BasicCredentialFactory('example.com')) + request = self.makeRequest([self.childName]) + child = self._authorizedBasicLogin(request) + request.render(child) + self.assertEqual(request.responseCode, 500) + self.assertEqual(len(self.flushLoggedErrors(UnexpectedException)), 1) + + + def test_anonymousAccess(self): + """ + Anonymous requests are allowed if a L{Portal} has an anonymous checker + registered. + """ + unprotectedContents = "contents of the unprotected child resource" + + self.avatars[ANONYMOUS] = Resource() + self.avatars[ANONYMOUS].putChild( + self.childName, Data(unprotectedContents, 'text/plain')) + self.portal.registerChecker(AllowAnonymousAccess()) + + self.credentialFactories.append(BasicCredentialFactory('example.com')) + request = self.makeRequest([self.childName]) + child = getChildForRequest(self.wrapper, request) + d = request.notifyFinish() + def cbFinished(ignored): + self.assertEquals(request.written, [unprotectedContents]) + d.addCallback(cbFinished) + request.render(child) + return d diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_newclient.py b/vendor/Twisted-10.0.0/twisted/web/test/test_newclient.py new file mode 100644 index 000000000000..3654da572071 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_newclient.py @@ -0,0 +1,2082 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web._newclient}. +""" + +__metaclass__ = type + +from zope.interface import implements +from zope.interface.verify import verifyObject + +from twisted.python.failure import Failure +from twisted.internet.interfaces import IConsumer, IPushProducer +from twisted.internet.error import ConnectionDone +from twisted.internet.defer import Deferred, succeed, fail +from twisted.internet.protocol import Protocol +from twisted.trial.unittest import TestCase +from twisted.test.proto_helpers import StringTransport, AccumulatingProtocol +from twisted.web._newclient import UNKNOWN_LENGTH, STATUS, HEADER, BODY, DONE +from twisted.web._newclient import Request, Response, HTTPParser, HTTPClientParser +from twisted.web._newclient import BadResponseVersion, ParseError, HTTP11ClientProtocol +from twisted.web._newclient import ChunkedEncoder, RequestGenerationFailed, RequestTransmissionFailed, ResponseFailed, WrongBodyLength, RequestNotSent +from twisted.web._newclient import BadHeaders, ResponseDone, PotentialDataLoss, ExcessWrite +from twisted.web._newclient import TransportProxyProducer, LengthEnforcingConsumer, makeStatefulDispatcher +from twisted.web.http_headers import Headers +from twisted.web.http import _DataLoss +from twisted.web.iweb import IBodyProducer + + + +class ArbitraryException(Exception): + """ + A unique, arbitrary exception type which L{twisted.web._newclient} knows + nothing about. + """ + + +class AnotherArbitraryException(Exception): + """ + Similar to L{ArbitraryException} but with a different identity. + """ + + +# A re-usable Headers instance for tests which don't really care what headers +# they're sending. +_boringHeaders = Headers({'host': ['example.com']}) + + +def assertWrapperExceptionTypes(self, deferred, mainType, reasonTypes): + """ + Assert that the given L{Deferred} fails with the exception given by + C{mainType} and that the exceptions wrapped by the instance of C{mainType} + it fails with match the list of exception types given by C{reasonTypes}. + + This is a helper for testing failures of exceptions which subclass + L{_newclient._WrapperException}. + + @param self: A L{TestCase} instance which will be used to make the + assertions. + + @param deferred: The L{Deferred} which is expected to fail with + C{mainType}. + + @param mainType: A L{_newclient._WrapperException} subclass which will be + trapped on C{deferred}. + + @param reasonTypes: A sequence of exception types which will be trapped on + the resulting L{mainType} exception instance's C{reasons} sequence. + + @return: A L{Deferred} which fires with the C{mainType} instance + C{deferred} fails with, or which fails somehow. + """ + def cbFailed(err): + for reason, type in zip(err.reasons, reasonTypes): + reason.trap(type) + self.assertEqual(len(err.reasons), len(reasonTypes)) + return err + d = self.assertFailure(deferred, mainType) + d.addCallback(cbFailed) + return d + + + +def assertResponseFailed(self, deferred, reasonTypes): + """ + A simple helper to invoke L{assertWrapperExceptionTypes} with a C{mainType} + of L{ResponseFailed}. + """ + return assertWrapperExceptionTypes(self, deferred, ResponseFailed, reasonTypes) + + + +def assertRequestGenerationFailed(self, deferred, reasonTypes): + """ + A simple helper to invoke L{assertWrapperExceptionTypes} with a C{mainType} + of L{RequestGenerationFailed}. + """ + return assertWrapperExceptionTypes(self, deferred, RequestGenerationFailed, reasonTypes) + + + +def assertRequestTransmissionFailed(self, deferred, reasonTypes): + """ + A simple helper to invoke L{assertWrapperExceptionTypes} with a C{mainType} + of L{RequestTransmissionFailed}. + """ + return assertWrapperExceptionTypes(self, deferred, RequestTransmissionFailed, reasonTypes) + + + +def justTransportResponse(transport): + """ + Helper function for creating a Response which uses the given transport. + All of the other parameters to L{Response.__init__} are filled with + arbitrary values. Only use this method if you don't care about any of + them. + """ + return Response(('HTTP', 1, 1), 200, 'OK', _boringHeaders, transport) + + +class MakeStatefulDispatcherTests(TestCase): + """ + Tests for L{makeStatefulDispatcher}. + """ + def test_functionCalledByState(self): + """ + A method defined with L{makeStatefulDispatcher} invokes a second + method based on the current state of the object. + """ + class Foo: + _state = 'A' + + def bar(self): + pass + bar = makeStatefulDispatcher('quux', bar) + + def _quux_A(self): + return 'a' + + def _quux_B(self): + return 'b' + + stateful = Foo() + self.assertEqual(stateful.bar(), 'a') + stateful._state = 'B' + self.assertEqual(stateful.bar(), 'b') + stateful._state = 'C' + self.assertRaises(RuntimeError, stateful.bar) + + + +class HTTPParserTests(TestCase): + """ + Tests for L{HTTPParser} which is responsible for the bulk of the task of + parsing HTTP bytes. + """ + def test_statusCallback(self): + """ + L{HTTPParser} calls its C{statusReceived} method when it receives a + status line. + """ + status = [] + protocol = HTTPParser() + protocol.statusReceived = status.append + protocol.makeConnection(StringTransport()) + self.assertEqual(protocol.state, STATUS) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + self.assertEqual(status, ['HTTP/1.1 200 OK']) + self.assertEqual(protocol.state, HEADER) + + + def _headerTestSetup(self): + header = {} + protocol = HTTPParser() + protocol.headerReceived = header.__setitem__ + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + return header, protocol + + + def test_headerCallback(self): + """ + L{HTTPParser} calls its C{headerReceived} method when it receives a + header. + """ + header, protocol = self._headerTestSetup() + protocol.dataReceived('X-Foo:bar\r\n') + # Cannot tell it's not a continue header until the next line arrives + # and is not a continuation + protocol.dataReceived('\r\n') + self.assertEqual(header, {'X-Foo': 'bar'}) + self.assertEqual(protocol.state, BODY) + + + def test_continuedHeaderCallback(self): + """ + If a header is split over multiple lines, L{HTTPParser} calls + C{headerReceived} with the entire value once it is received. + """ + header, protocol = self._headerTestSetup() + protocol.dataReceived('X-Foo: bar\r\n') + protocol.dataReceived(' baz\r\n') + protocol.dataReceived('\tquux\r\n') + protocol.dataReceived('\r\n') + self.assertEqual(header, {'X-Foo': 'bar baz\tquux'}) + self.assertEqual(protocol.state, BODY) + + + def test_fieldContentWhitespace(self): + """ + Leading and trailing linear whitespace is stripped from the header + value passed to the C{headerReceived} callback. + """ + header, protocol = self._headerTestSetup() + value = ' \t \r\n bar \t\r\n \t\r\n' + protocol.dataReceived('X-Bar:' + value) + protocol.dataReceived('X-Foo:' + value) + protocol.dataReceived('\r\n') + self.assertEqual(header, {'X-Foo': 'bar', + 'X-Bar': 'bar'}) + + + def test_allHeadersCallback(self): + """ + After the last header is received, L{HTTPParser} calls + C{allHeadersReceived}. + """ + called = [] + header, protocol = self._headerTestSetup() + def allHeadersReceived(): + called.append(protocol.state) + protocol.state = STATUS + protocol.allHeadersReceived = allHeadersReceived + protocol.dataReceived('\r\n') + self.assertEqual(called, [HEADER]) + self.assertEqual(protocol.state, STATUS) + + + def test_noHeaderCallback(self): + """ + If there are no headers in the message, L{HTTPParser} does not call + C{headerReceived}. + """ + header, protocol = self._headerTestSetup() + protocol.dataReceived('\r\n') + self.assertEqual(header, {}) + self.assertEqual(protocol.state, BODY) + + + def test_headersSavedOnResponse(self): + """ + All headers received by L{HTTPParser} are added to + L{HTTPParser.headers}. + """ + protocol = HTTPParser() + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('X-Foo: bar\r\n') + protocol.dataReceived('X-Foo: baz\r\n') + protocol.dataReceived('\r\n') + self.assertEqual( + list(protocol.headers.getAllRawHeaders()), + [('X-Foo', ['bar', 'baz'])]) + + + def test_connectionControlHeaders(self): + """ + L{HTTPParser.isConnectionControlHeader} returns C{True} for headers + which are always connection control headers (similar to "hop-by-hop" + headers from RFC 2616 section 13.5.1) and C{False} for other headers. + """ + protocol = HTTPParser() + connHeaderNames = [ + 'content-length', 'connection', 'keep-alive', 'te', 'trailers', + 'transfer-encoding', 'upgrade', 'proxy-connection'] + + for header in connHeaderNames: + self.assertTrue( + protocol.isConnectionControlHeader(header), + "Expecting %r to be a connection control header, but " + "wasn't" % (header,)) + self.assertFalse( + protocol.isConnectionControlHeader("date"), + "Expecting the arbitrarily selected 'date' header to not be " + "a connection control header, but was.") + + + def test_switchToBodyMode(self): + """ + L{HTTPParser.switchToBodyMode} raises L{RuntimeError} if called more + than once. + """ + protocol = HTTPParser() + protocol.makeConnection(StringTransport()) + protocol.switchToBodyMode(object()) + self.assertRaises(RuntimeError, protocol.switchToBodyMode, object()) + + + +class HTTPClientParserTests(TestCase): + """ + Tests for L{HTTPClientParser} which is responsible for parsing HTTP + response messages. + """ + def test_parseVersion(self): + """ + L{HTTPClientParser.parseVersion} parses a status line into its three + components. + """ + protocol = HTTPClientParser(None, None) + self.assertEqual( + protocol.parseVersion('CANDY/7.2'), + ('CANDY', 7, 2)) + + + def test_parseBadVersion(self): + """ + L{HTTPClientParser.parseVersion} raises L{ValueError} when passed an + unparsable version. + """ + protocol = HTTPClientParser(None, None) + e = BadResponseVersion + f = protocol.parseVersion + + def checkParsing(s): + exc = self.assertRaises(e, f, s) + self.assertEqual(exc.data, s) + + checkParsing('foo') + checkParsing('foo/bar/baz') + + checkParsing('foo/') + checkParsing('foo/..') + + checkParsing('foo/a.b') + checkParsing('foo/-1.-1') + + + def test_responseStatusParsing(self): + """ + L{HTTPClientParser.statusReceived} parses the version, code, and phrase + from the status line and stores them on the response object. + """ + request = Request('GET', '/', _boringHeaders, None) + protocol = HTTPClientParser(request, None) + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + self.assertEqual(protocol.response.version, ('HTTP', 1, 1)) + self.assertEqual(protocol.response.code, 200) + self.assertEqual(protocol.response.phrase, 'OK') + + + def test_badResponseStatus(self): + """ + L{HTTPClientParser.statusReceived} raises L{ParseError} if it is called + with a status line which cannot be parsed. + """ + protocol = HTTPClientParser(None, None) + + def checkParsing(s): + exc = self.assertRaises(ParseError, protocol.statusReceived, s) + self.assertEqual(exc.data, s) + + # If there are fewer than three whitespace-delimited parts to the + # status line, it is not valid and cannot be parsed. + checkParsing('foo') + checkParsing('HTTP/1.1 200') + + # If the response code is not an integer, the status line is not valid + # and cannot be parsed. + checkParsing('HTTP/1.1 bar OK') + + + def _noBodyTest(self, request, response): + """ + Assert that L{HTTPClientParser} parses the given C{response} to + C{request}, resulting in a response with no body and no extra bytes and + leaving the transport in the producing state. + + @param request: A L{Request} instance which might have caused a server + to return the given response. + @param response: A string giving the response to be parsed. + + @return: A C{dict} of headers from the response. + """ + header = {} + finished = [] + protocol = HTTPClientParser(request, finished.append) + protocol.headerReceived = header.__setitem__ + body = [] + protocol._bodyDataReceived = body.append + transport = StringTransport() + protocol.makeConnection(transport) + protocol.dataReceived(response) + self.assertEqual(transport.producerState, 'producing') + self.assertEqual(protocol.state, DONE) + self.assertEqual(body, []) + self.assertEqual(finished, ['']) + self.assertEqual(protocol.response.length, 0) + return header + + + def test_headResponse(self): + """ + If the response is to a HEAD request, no body is expected, the body + callback is not invoked, and the I{Content-Length} header is passed to + the header callback. + """ + request = Request('HEAD', '/', _boringHeaders, None) + status = ( + 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 10\r\n' + '\r\n') + header = self._noBodyTest(request, status) + self.assertEqual(header, {'Content-Length': '10'}) + + + def test_noContentResponse(self): + """ + If the response code is I{NO CONTENT} (204), no body is expected and + the body callback is not invoked. + """ + request = Request('GET', '/', _boringHeaders, None) + status = ( + 'HTTP/1.1 204 NO CONTENT\r\n' + '\r\n') + self._noBodyTest(request, status) + + + def test_notModifiedResponse(self): + """ + If the response code is I{NOT MODIFIED} (304), no body is expected and + the body callback is not invoked. + """ + request = Request('GET', '/', _boringHeaders, None) + status = ( + 'HTTP/1.1 304 NOT MODIFIED\r\n' + '\r\n') + self._noBodyTest(request, status) + + + def test_responseHeaders(self): + """ + The response headers are added to the response object's C{headers} + L{Headers} instance. + """ + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + lambda rest: None) + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('X-Foo: bar\r\n') + protocol.dataReceived('\r\n') + self.assertEqual( + protocol.connHeaders, + Headers({})) + self.assertEqual( + protocol.response.headers, + Headers({'x-foo': ['bar']})) + self.assertIdentical(protocol.response.length, UNKNOWN_LENGTH) + + + def test_connectionHeaders(self): + """ + The connection control headers are added to the parser's C{connHeaders} + L{Headers} instance. + """ + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + lambda rest: None) + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('Content-Length: 123\r\n') + protocol.dataReceived('Connection: close\r\n') + protocol.dataReceived('\r\n') + self.assertEqual( + protocol.response.headers, + Headers({})) + self.assertEqual( + protocol.connHeaders, + Headers({'content-length': ['123'], + 'connection': ['close']})) + self.assertEqual(protocol.response.length, 123) + + + def test_headResponseContentLengthEntityHeader(self): + """ + If a HEAD request is made, the I{Content-Length} header in the response + is added to the response headers, not the connection control headers. + """ + protocol = HTTPClientParser( + Request('HEAD', '/', _boringHeaders, None), + lambda rest: None) + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('Content-Length: 123\r\n') + protocol.dataReceived('\r\n') + self.assertEqual( + protocol.response.headers, + Headers({'content-length': ['123']})) + self.assertEqual( + protocol.connHeaders, + Headers({})) + self.assertEqual(protocol.response.length, 0) + + + def test_contentLength(self): + """ + If a response includes a body with a length given by the + I{Content-Length} header, the bytes which make up the body are passed + to the C{_bodyDataReceived} callback on the L{HTTPParser}. + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + finished.append) + transport = StringTransport() + protocol.makeConnection(transport) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + body = [] + protocol.response._bodyDataReceived = body.append + protocol.dataReceived('Content-Length: 10\r\n') + protocol.dataReceived('\r\n') + + # Incidentally, the transport should be paused now. It is the response + # object's responsibility to resume this when it is ready for bytes. + self.assertEqual(transport.producerState, 'paused') + + self.assertEqual(protocol.state, BODY) + protocol.dataReceived('x' * 6) + self.assertEqual(body, ['x' * 6]) + self.assertEqual(protocol.state, BODY) + protocol.dataReceived('y' * 4) + self.assertEqual(body, ['x' * 6, 'y' * 4]) + self.assertEqual(protocol.state, DONE) + self.assertTrue(finished, ['']) + + + def test_zeroContentLength(self): + """ + If a response includes a I{Content-Length} header indicating zero bytes + in the response, L{Response.length} is set accordingly and no data is + delivered to L{Response._bodyDataReceived}. + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + finished.append) + + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + + body = [] + protocol.response._bodyDataReceived = body.append + + protocol.dataReceived('Content-Length: 0\r\n') + protocol.dataReceived('\r\n') + + self.assertEqual(protocol.state, DONE) + self.assertEqual(body, []) + self.assertTrue(finished, ['']) + self.assertEqual(protocol.response.length, 0) + + + + def test_multipleContentLengthHeaders(self): + """ + If a response includes multiple I{Content-Length} headers, + L{HTTPClientParser.dataReceived} raises L{ValueError} to indicate that + the response is invalid and the transport is now unusable. + """ + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + None) + + protocol.makeConnection(StringTransport()) + self.assertRaises( + ValueError, + protocol.dataReceived, + 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 1\r\n' + 'Content-Length: 2\r\n' + '\r\n') + + + def test_extraBytesPassedBack(self): + """ + If extra bytes are received past the end of a response, they are passed + to the finish callback. + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + finished.append) + + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('Content-Length: 0\r\n') + protocol.dataReceived('\r\nHere is another thing!') + self.assertEqual(protocol.state, DONE) + self.assertEqual(finished, ['Here is another thing!']) + + + def test_extraBytesPassedBackHEAD(self): + """ + If extra bytes are received past the end of the headers of a response + to a HEAD request, they are passed to the finish callback. + """ + finished = [] + protocol = HTTPClientParser( + Request('HEAD', '/', _boringHeaders, None), + finished.append) + + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + protocol.dataReceived('Content-Length: 12\r\n') + protocol.dataReceived('\r\nHere is another thing!') + self.assertEqual(protocol.state, DONE) + self.assertEqual(finished, ['Here is another thing!']) + + + def test_chunkedResponseBody(self): + """ + If the response headers indicate the response body is encoded with the + I{chunked} transfer encoding, the body is decoded according to that + transfer encoding before being passed to L{Response._bodyDataReceived}. + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), + finished.append) + protocol.makeConnection(StringTransport()) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + + body = [] + protocol.response._bodyDataReceived = body.append + + protocol.dataReceived('Transfer-Encoding: chunked\r\n') + protocol.dataReceived('\r\n') + + # No data delivered yet + self.assertEqual(body, []) + + # Cannot predict the length of a chunked encoded response body. + self.assertIdentical(protocol.response.length, UNKNOWN_LENGTH) + + # Deliver some chunks and make sure the data arrives + protocol.dataReceived('3\r\na') + self.assertEqual(body, ['a']) + protocol.dataReceived('bc\r\n') + self.assertEqual(body, ['a', 'bc']) + + # The response's _bodyDataFinished method should be called when the last + # chunk is received. Extra data should be passed to the finished + # callback. + protocol.dataReceived('0\r\n\r\nextra') + self.assertEqual(finished, ['extra']) + + + def test_unknownContentLength(self): + """ + If a response does not include a I{Transfer-Encoding} or a + I{Content-Length}, the end of response body is indicated by the + connection being closed. + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), finished.append) + transport = StringTransport() + protocol.makeConnection(transport) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + + body = [] + protocol.response._bodyDataReceived = body.append + + protocol.dataReceived('\r\n') + protocol.dataReceived('foo') + protocol.dataReceived('bar') + self.assertEqual(body, ['foo', 'bar']) + protocol.connectionLost(ConnectionDone("simulated end of connection")) + self.assertEqual(finished, ['']) + + + def test_contentLengthAndTransferEncoding(self): + """ + According to RFC 2616, section 4.4, point 3, if I{Content-Length} and + I{Transfer-Encoding: chunked} are present, I{Content-Length} MUST be + ignored + """ + finished = [] + protocol = HTTPClientParser( + Request('GET', '/', _boringHeaders, None), finished.append) + transport = StringTransport() + protocol.makeConnection(transport) + protocol.dataReceived('HTTP/1.1 200 OK\r\n') + + body = [] + protocol.response._bodyDataReceived = body.append + + protocol.dataReceived( + 'Content-Length: 102\r\n' + 'Transfer-Encoding: chunked\r\n' + '\r\n' + '3\r\n' + 'abc\r\n' + '0\r\n' + '\r\n') + + self.assertEqual(body, ['abc']) + self.assertEqual(finished, ['']) + + + def test_connectionLostBeforeBody(self): + """ + If L{HTTPClientParser.connectionLost} is called before the headers are + finished, the C{_responseDeferred} is fired with the L{Failure} passed + to C{connectionLost}. + """ + transport = StringTransport() + protocol = HTTPClientParser(Request('GET', '/', _boringHeaders, None), None) + protocol.makeConnection(transport) + # Grab this here because connectionLost gets rid of the attribute + responseDeferred = protocol._responseDeferred + protocol.connectionLost(Failure(ArbitraryException())) + + return assertResponseFailed( + self, responseDeferred, [ArbitraryException]) + + + def test_connectionLostWithError(self): + """ + If one of the L{Response} methods called by + L{HTTPClientParser.connectionLost} raises an exception, the exception + is logged and not re-raised. + """ + transport = StringTransport() + protocol = HTTPClientParser(Request('GET', '/', _boringHeaders, None), None) + protocol.makeConnection(transport) + + response = [] + protocol._responseDeferred.addCallback(response.append) + protocol.dataReceived( + 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 1\r\n' + '\r\n') + response = response[0] + + # Arrange for an exception + def fakeBodyDataFinished(err=None): + raise ArbitraryException() + response._bodyDataFinished = fakeBodyDataFinished + + protocol.connectionLost(None) + + self.assertEqual(len(self.flushLoggedErrors(ArbitraryException)), 1) + + + +class SlowRequest: + """ + L{SlowRequest} is a fake implementation of L{Request} which is easily + controlled externally (for example, by code in a test method). + + @ivar stopped: A flag indicating whether C{stopWriting} has been called. + + @ivar finished: After C{writeTo} is called, a L{Deferred} which was + returned by that method. L{SlowRequest} will never fire this + L{Deferred}. + """ + method = 'GET' + stopped = False + + def writeTo(self, transport): + self.finished = Deferred() + return self.finished + + + def stopWriting(self): + self.stopped = True + + + +class SimpleRequest: + """ + L{SimpleRequest} is a fake implementation of L{Request} which writes a + short, fixed string to the transport passed to its C{writeTo} method and + returns a succeeded L{Deferred}. This vaguely emulates the behavior of a + L{Request} with no body producer. + """ + def writeTo(self, transport): + transport.write('SOME BYTES') + return succeed(None) + + + +class HTTP11ClientProtocolTests(TestCase): + """ + Tests for the HTTP 1.1 client protocol implementation, + L{HTTP11ClientProtocol}. + """ + def setUp(self): + """ + Create an L{HTTP11ClientProtocol} connected to a fake transport. + """ + self.transport = StringTransport() + self.protocol = HTTP11ClientProtocol() + self.protocol.makeConnection(self.transport) + + + def test_request(self): + """ + L{HTTP11ClientProtocol.request} accepts a L{Request} and calls its + C{writeTo} method with its own transport. + """ + self.protocol.request(SimpleRequest()) + self.assertEqual(self.transport.value(), 'SOME BYTES') + + + def test_secondRequest(self): + """ + The second time L{HTTP11ClientProtocol.request} is called, it returns a + L{Deferred} which immediately fires with a L{Failure} wrapping a + L{RequestNotSent} exception. + """ + self.protocol.request(SlowRequest()) + def cbNotSent(ignored): + self.assertEqual(self.transport.value(), '') + d = self.assertFailure( + self.protocol.request(SimpleRequest()), RequestNotSent) + d.addCallback(cbNotSent) + return d + + + def test_requestAfterConnectionLost(self): + """ + L{HTTP11ClientProtocol.request} returns a L{Deferred} which immediately + fires with a L{Failure} wrapping a L{RequestNotSent} if called after + the protocol has been disconnected. + """ + self.protocol.connectionLost( + Failure(ConnectionDone("sad transport"))) + def cbNotSent(ignored): + self.assertEqual(self.transport.value(), '') + d = self.assertFailure( + self.protocol.request(SimpleRequest()), RequestNotSent) + d.addCallback(cbNotSent) + return d + + + def test_failedWriteTo(self): + """ + If the L{Deferred} returned by L{Request.writeTo} fires with a + L{Failure}, L{HTTP11ClientProtocol.request} disconnects its transport + and returns a L{Deferred} which fires with a L{Failure} of + L{RequestGenerationFailed} wrapping the underlying failure. + """ + class BrokenRequest: + def writeTo(self, transport): + return fail(ArbitraryException()) + + d = self.protocol.request(BrokenRequest()) + def cbFailed(ignored): + self.assertTrue(self.transport.disconnecting) + # Simulate what would happen if the protocol had a real transport + # and make sure no exception is raised. + self.protocol.connectionLost( + Failure(ConnectionDone("you asked for it"))) + d = assertRequestGenerationFailed(self, d, [ArbitraryException]) + d.addCallback(cbFailed) + return d + + + def test_synchronousWriteToError(self): + """ + If L{Request.writeTo} raises an exception, + L{HTTP11ClientProtocol.request} returns a L{Deferred} which fires with + a L{Failure} of L{RequestGenerationFailed} wrapping that exception. + """ + class BrokenRequest: + def writeTo(self, transport): + raise ArbitraryException() + + d = self.protocol.request(BrokenRequest()) + return assertRequestGenerationFailed(self, d, [ArbitraryException]) + + + def test_connectionLostDuringRequestGeneration(self, mode=None): + """ + If L{HTTP11ClientProtocol}'s transport is disconnected before the + L{Deferred} returned by L{Request.writeTo} fires, the L{Deferred} + returned by L{HTTP11ClientProtocol.request} fires with a L{Failure} of + L{RequestTransmissionFailed} wrapping the underlying failure. + """ + request = SlowRequest() + d = self.protocol.request(request) + d = assertRequestTransmissionFailed(self, d, [ArbitraryException]) + + # The connection hasn't been lost yet. The request should still be + # allowed to do its thing. + self.assertFalse(request.stopped) + + self.protocol.connectionLost(Failure(ArbitraryException())) + + # Now the connection has been lost. The request should have been told + # to stop writing itself. + self.assertTrue(request.stopped) + + if mode == 'callback': + request.finished.callback(None) + elif mode == 'errback': + request.finished.errback(Failure(AnotherArbitraryException())) + errors = self.flushLoggedErrors(AnotherArbitraryException) + self.assertEqual(len(errors), 1) + else: + # Don't fire the writeTo Deferred at all. + pass + return d + + + def test_connectionLostBeforeGenerationFinished(self): + """ + If the request passed to L{HTTP11ClientProtocol} finishes generation + successfully after the L{HTTP11ClientProtocol}'s connection has been + lost, nothing happens. + """ + return self.test_connectionLostDuringRequestGeneration('callback') + + + def test_connectionLostBeforeGenerationFailed(self): + """ + If the request passed to L{HTTP11ClientProtocol} finished generation + with an error after the L{HTTP11ClientProtocol}'s connection has been + lost, nothing happens. + """ + return self.test_connectionLostDuringRequestGeneration('errback') + + + def test_receiveSimplestResponse(self): + """ + When a response is delivered to L{HTTP11ClientProtocol}, the + L{Deferred} previously returned by the C{request} method is called back + with a L{Response} instance and the connection is closed. + """ + d = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + def cbRequest(response): + self.assertEqual(response.code, 200) + self.assertEqual(response.headers, Headers()) + self.assertTrue(self.transport.disconnecting) + d.addCallback(cbRequest) + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "Content-Length: 0\r\n" + "\r\n") + return d + + + def test_receiveResponseHeaders(self): + """ + The headers included in a response delivered to L{HTTP11ClientProtocol} + are included on the L{Response} instance passed to the callback + returned by the C{request} method. + """ + d = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + def cbRequest(response): + expected = Headers({'x-foo': ['bar', 'baz']}) + self.assertEqual(response.headers, expected) + d.addCallback(cbRequest) + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "X-Foo: bar\r\n" + "X-Foo: baz\r\n" + "\r\n") + return d + + + def test_receiveResponseBeforeRequestGenerationDone(self): + """ + If response bytes are delivered to L{HTTP11ClientProtocol} before the + L{Deferred} returned by L{Request.writeTo} fires, those response bytes + are parsed as part of the response. + """ + request = SlowRequest() + d = self.protocol.request(request) + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "X-Foo: bar\r\n" + "Content-Length: 6\r\n" + "\r\n" + "foobar") + def cbResponse(response): + p = AccumulatingProtocol() + whenFinished = p.closedDeferred = Deferred() + response.deliverBody(p) + return whenFinished.addCallback( + lambda ign: (response, p.data)) + d.addCallback(cbResponse) + def cbAllResponse((response, body)): + self.assertEqual(response.version, ('HTTP', 1, 1)) + self.assertEqual(response.code, 200) + self.assertEqual(response.phrase, 'OK') + self.assertEqual(response.headers, Headers({'x-foo': ['bar']})) + self.assertEqual(body, "foobar") + + # Also nothing bad should happen if the request does finally + # finish, even though it is completely irrelevant. + request.finished.callback(None) + + d.addCallback(cbAllResponse) + return d + + + def test_receiveResponseBody(self): + """ + The C{deliverBody} method of the response object with which the + L{Deferred} returned by L{HTTP11ClientProtocol.request} fires can be + used to get the body of the response. + """ + protocol = AccumulatingProtocol() + whenFinished = protocol.closedDeferred = Deferred() + requestDeferred = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "Content-Length: 6\r\n" + "\r") + + # Here's what's going on: all the response headers have been delivered + # by this point, so the request Deferred can fire with a Response + # object. The body is yet to come, but that's okay, because the + # Response object is how you *get* the body. + result = [] + requestDeferred.addCallback(result.append) + + self.assertEqual(result, []) + # Deliver the very last byte of the response. It is exactly at this + # point which the Deferred returned by request should fire. + self.protocol.dataReceived("\n") + response = result[0] + + response.deliverBody(protocol) + + self.protocol.dataReceived("foo") + self.protocol.dataReceived("bar") + + def cbAllResponse(ignored): + self.assertEqual(protocol.data, "foobar") + protocol.closedReason.trap(ResponseDone) + whenFinished.addCallback(cbAllResponse) + return whenFinished + + + def test_responseBodyFinishedWhenConnectionLostWhenContentLengthIsUnknown( + self): + """ + If the length of the response body is unknown, the protocol passed to + the response's C{deliverBody} method has its C{connectionLost} + method called with a L{Failure} wrapping a L{PotentialDataLoss} + exception. + """ + requestDeferred = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "\r\n") + + result = [] + requestDeferred.addCallback(result.append) + response = result[0] + + protocol = AccumulatingProtocol() + response.deliverBody(protocol) + + self.protocol.dataReceived("foo") + self.protocol.dataReceived("bar") + + self.assertEqual(protocol.data, "foobar") + self.protocol.connectionLost( + Failure(ConnectionDone("low-level transport disconnected"))) + + protocol.closedReason.trap(PotentialDataLoss) + + + def test_chunkedResponseBodyUnfinishedWhenConnectionLost(self): + """ + If the final chunk has not been received when the connection is lost + (for any reason), the protocol passed to C{deliverBody} has its + C{connectionLost} method called with a L{Failure} wrapping the + exception for that reason. + """ + requestDeferred = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + self.protocol.dataReceived( + "HTTP/1.1 200 OK\r\n" + "Transfer-Encoding: chunked\r\n" + "\r\n") + + result = [] + requestDeferred.addCallback(result.append) + response = result[0] + + protocol = AccumulatingProtocol() + response.deliverBody(protocol) + + self.protocol.dataReceived("3\r\nfoo\r\n") + self.protocol.dataReceived("3\r\nbar\r\n") + + self.assertEqual(protocol.data, "foobar") + + self.protocol.connectionLost(Failure(ArbitraryException())) + + return assertResponseFailed( + self, fail(protocol.closedReason), [ArbitraryException, _DataLoss]) + + + def test_parserDataReceivedException(self): + """ + If the parser L{HTTP11ClientProtocol} delivers bytes to in + C{dataReceived} raises an exception, the exception is wrapped in a + L{Failure} and passed to the parser's C{connectionLost} and then the + L{HTTP11ClientProtocol}'s transport is disconnected. + """ + requestDeferred = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + self.protocol.dataReceived('unparseable garbage goes here\r\n') + d = assertResponseFailed(self, requestDeferred, [ParseError]) + def cbFailed(exc): + self.assertTrue(self.transport.disconnecting) + self.assertEqual( + exc.reasons[0].value.data, 'unparseable garbage goes here') + + # Now do what StringTransport doesn't do but a real transport would + # have, call connectionLost on the HTTP11ClientProtocol. Nothing + # is asserted about this, but it's important for it to not raise an + # exception. + self.protocol.connectionLost(Failure(ConnectionDone("it is done"))) + + d.addCallback(cbFailed) + return d + + + def test_proxyStopped(self): + """ + When the HTTP response parser is disconnected, the + L{TransportProxyProducer} which was connected to it as a transport is + stopped. + """ + requestDeferred = self.protocol.request(Request('GET', '/', _boringHeaders, None)) + transport = self.protocol._parser.transport + self.assertIdentical(transport._producer, self.transport) + self.protocol._disconnectParser(Failure(ConnectionDone("connection done"))) + self.assertIdentical(transport._producer, None) + return assertResponseFailed(self, requestDeferred, [ConnectionDone]) + + + +class StringProducer: + """ + L{StringProducer} is a dummy body producer. + + @ivar stopped: A flag which indicates whether or not C{stopProducing} has + been called. + @ivar consumer: After C{startProducing} is called, the value of the + C{consumer} argument to that method. + @ivar finished: After C{startProducing} is called, a L{Deferred} which was + returned by that method. L{StringProducer} will never fire this + L{Deferred}. + """ + implements(IBodyProducer) + + stopped = False + + def __init__(self, length): + self.length = length + + + def startProducing(self, consumer): + self.consumer = consumer + self.finished = Deferred() + return self.finished + + + def stopProducing(self): + self.stopped = True + + + +class RequestTests(TestCase): + """ + Tests for L{Request}. + """ + def setUp(self): + self.transport = StringTransport() + + + def test_sendSimplestRequest(self): + """ + L{Request.writeTo} formats the request data and writes it to the given + transport. + """ + Request('GET', '/', _boringHeaders, None).writeTo(self.transport) + self.assertEqual( + self.transport.value(), + "GET / HTTP/1.1\r\n" + "Connection: close\r\n" + "Host: example.com\r\n" + "\r\n") + + + def test_sendRequestHeaders(self): + """ + L{Request.writeTo} formats header data and writes it to the given + transport. + """ + headers = Headers({'x-foo': ['bar', 'baz'], 'host': ['example.com']}) + Request('GET', '/foo', headers, None).writeTo(self.transport) + lines = self.transport.value().split('\r\n') + self.assertEqual(lines[0], "GET /foo HTTP/1.1") + self.assertEqual(lines[-2:], ["", ""]) + del lines[0], lines[-2:] + lines.sort() + self.assertEqual( + lines, + ["Connection: close", + "Host: example.com", + "X-Foo: bar", + "X-Foo: baz"]) + + + def test_sendChunkedRequestBody(self): + """ + L{Request.writeTo} uses chunked encoding to write data from the request + body producer to the given transport. It registers the request body + producer with the transport. + """ + producer = StringProducer(UNKNOWN_LENGTH) + request = Request('POST', '/bar', _boringHeaders, producer) + request.writeTo(self.transport) + + self.assertNotIdentical(producer.consumer, None) + self.assertIdentical(self.transport.producer, producer) + self.assertTrue(self.transport.streaming) + + self.assertEqual( + self.transport.value(), + "POST /bar HTTP/1.1\r\n" + "Connection: close\r\n" + "Transfer-Encoding: chunked\r\n" + "Host: example.com\r\n" + "\r\n") + self.transport.clear() + + producer.consumer.write('x' * 3) + producer.consumer.write('y' * 15) + producer.finished.callback(None) + self.assertIdentical(self.transport.producer, None) + self.assertEqual( + self.transport.value(), + "3\r\n" + "xxx\r\n" + "f\r\n" + "yyyyyyyyyyyyyyy\r\n" + "0\r\n" + "\r\n") + + + def test_sendChunkedRequestBodyWithError(self): + """ + If L{Request} is created with a C{bodyProducer} without a known length + and the L{Deferred} returned from its C{startProducing} method fires + with a L{Failure}, the L{Deferred} returned by L{Request.writeTo} fires + with that L{Failure} and the body producer is unregistered from the + transport. The final zero-length chunk is not written to the + transport. + """ + producer = StringProducer(UNKNOWN_LENGTH) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + self.transport.clear() + producer.finished.errback(ArbitraryException()) + def cbFailed(ignored): + self.assertEqual(self.transport.value(), "") + self.assertIdentical(self.transport.producer, None) + d = self.assertFailure(writeDeferred, ArbitraryException) + d.addCallback(cbFailed) + return d + + + def test_sendRequestBodyWithLength(self): + """ + If L{Request} is created with a C{bodyProducer} with a known length, + that length is sent as the value for the I{Content-Length} header and + chunked encoding is not used. + """ + producer = StringProducer(3) + request = Request('POST', '/bar', _boringHeaders, producer) + request.writeTo(self.transport) + + self.assertNotIdentical(producer.consumer, None) + self.assertIdentical(self.transport.producer, producer) + self.assertTrue(self.transport.streaming) + + self.assertEqual( + self.transport.value(), + "POST /bar HTTP/1.1\r\n" + "Connection: close\r\n" + "Content-Length: 3\r\n" + "Host: example.com\r\n" + "\r\n") + self.transport.clear() + + producer.consumer.write('abc') + producer.finished.callback(None) + self.assertIdentical(self.transport.producer, None) + self.assertEqual(self.transport.value(), "abc") + + + def test_sendRequestBodyWithTooFewBytes(self): + """ + If L{Request} is created with a C{bodyProducer} with a known length and + the producer does not produce that many bytes, the L{Deferred} returned + by L{Request.writeTo} fires with a L{Failure} wrapping a + L{WrongBodyLength} exception. + """ + producer = StringProducer(3) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + producer.consumer.write('ab') + producer.finished.callback(None) + self.assertIdentical(self.transport.producer, None) + return self.assertFailure(writeDeferred, WrongBodyLength) + + + def _sendRequestBodyWithTooManyBytesTest(self, finisher): + """ + Verify that when too many bytes have been written by a body producer + and then the body producer's C{startProducing} L{Deferred} fires that + the producer is unregistered from the transport and that the + L{Deferred} returned from L{Request.writeTo} is fired with a L{Failure} + wrapping a L{WrongBodyLength}. + + @param finisher: A callable which will be invoked with the body + producer after too many bytes have been written to the transport. + It should fire the startProducing Deferred somehow. + """ + producer = StringProducer(3) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + + producer.consumer.write('ab') + + # The producer hasn't misbehaved yet, so it shouldn't have been + # stopped. + self.assertFalse(producer.stopped) + + producer.consumer.write('cd') + + # Now the producer *has* misbehaved, so we should have tried to + # make it stop. + self.assertTrue(producer.stopped) + + # The transport should have had the producer unregistered from it as + # well. + self.assertIdentical(self.transport.producer, None) + + def cbFailed(exc): + # The "cd" should not have been written to the transport because + # the request can now locally be recognized to be invalid. If we + # had written the extra bytes, the server could have decided to + # start processing the request, which would be bad since we're + # going to indicate failure locally. + self.assertEqual( + self.transport.value(), + "POST /bar HTTP/1.1\r\n" + "Connection: close\r\n" + "Content-Length: 3\r\n" + "Host: example.com\r\n" + "\r\n" + "ab") + self.transport.clear() + + # Subsequent writes should be ignored, as should firing the + # Deferred returned from startProducing. + self.assertRaises(ExcessWrite, producer.consumer.write, 'ef') + + # Likewise, if the Deferred returned from startProducing fires, + # this should more or less be ignored (aside from possibly logging + # an error). + finisher(producer) + + # There should have been nothing further written to the transport. + self.assertEqual(self.transport.value(), "") + + d = self.assertFailure(writeDeferred, WrongBodyLength) + d.addCallback(cbFailed) + return d + + + def test_sendRequestBodyWithTooManyBytes(self): + """ + If L{Request} is created with a C{bodyProducer} with a known length and + the producer tries to produce more than than many bytes, the + L{Deferred} returned by L{Request.writeTo} fires with a L{Failure} + wrapping a L{WrongBodyLength} exception. + """ + def finisher(producer): + producer.finished.callback(None) + return self._sendRequestBodyWithTooManyBytesTest(finisher) + + + def test_sendRequestBodyErrorWithTooManyBytes(self): + """ + If L{Request} is created with a C{bodyProducer} with a known length and + the producer tries to produce more than than many bytes, the + L{Deferred} returned by L{Request.writeTo} fires with a L{Failure} + wrapping a L{WrongBodyLength} exception. + """ + def finisher(producer): + producer.finished.errback(ArbitraryException()) + errors = self.flushLoggedErrors(ArbitraryException) + self.assertEqual(len(errors), 1) + return self._sendRequestBodyWithTooManyBytesTest(finisher) + + + def test_sendRequestBodyErrorWithConsumerError(self): + """ + Though there should be no way for the internal C{finishedConsuming} + L{Deferred} in L{Request._writeToContentLength} to fire a L{Failure} + after the C{finishedProducing} L{Deferred} has fired, in case this does + happen, the error should be logged with a message about how there's + probably a bug in L{Request}. + + This is a whitebox test. + """ + producer = StringProducer(3) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + + finishedConsuming = producer.consumer._finished + + producer.consumer.write('abc') + producer.finished.callback(None) + + finishedConsuming.errback(ArbitraryException()) + self.assertEqual(len(self.flushLoggedErrors(ArbitraryException)), 1) + + + def _sendRequestBodyFinishedEarlyThenTooManyBytes(self, finisher): + """ + Verify that if the body producer fires its Deferred and then keeps + writing to the consumer that the extra writes are ignored and the + L{Deferred} returned by L{Request.writeTo} fires with a L{Failure} + wrapping the most appropriate exception type. + """ + producer = StringProducer(3) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + + producer.consumer.write('ab') + finisher(producer) + self.assertIdentical(self.transport.producer, None) + self.transport.clear() + self.assertRaises(ExcessWrite, producer.consumer.write, 'cd') + self.assertEqual(self.transport.value(), "") + return writeDeferred + + + def test_sendRequestBodyFinishedEarlyThenTooManyBytes(self): + """ + If the request body producer indicates it is done by firing the + L{Deferred} returned from its C{startProducing} method but then goes on + to write too many bytes, the L{Deferred} returned by {Request.writeTo} + fires with a L{Failure} wrapping L{WrongBodyLength}. + """ + def finisher(producer): + producer.finished.callback(None) + return self.assertFailure( + self._sendRequestBodyFinishedEarlyThenTooManyBytes(finisher), + WrongBodyLength) + + + def test_sendRequestBodyErroredEarlyThenTooManyBytes(self): + """ + If the request body producer indicates an error by firing the + L{Deferred} returned from its C{startProducing} method but then goes on + to write too many bytes, the L{Deferred} returned by {Request.writeTo} + fires with that L{Failure} and L{WrongBodyLength} is logged. + """ + def finisher(producer): + producer.finished.errback(ArbitraryException()) + return self.assertFailure( + self._sendRequestBodyFinishedEarlyThenTooManyBytes(finisher), + ArbitraryException) + + + def test_sendChunkedRequestBodyFinishedThenWriteMore(self, _with=None): + """ + If the request body producer with an unknown length tries to write + after firing the L{Deferred} returned by its C{startProducing} method, + the C{write} call raises an exception and does not write anything to + the underlying transport. + """ + producer = StringProducer(UNKNOWN_LENGTH) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + producer.finished.callback(_with) + self.transport.clear() + + self.assertRaises(ExcessWrite, producer.consumer.write, 'foo') + self.assertEqual(self.transport.value(), "") + return writeDeferred + + + def test_sendChunkedRequestBodyFinishedWithErrorThenWriteMore(self): + """ + If the request body producer with an unknown length tries to write + after firing the L{Deferred} returned by its C{startProducing} method + with a L{Failure}, the C{write} call raises an exception and does not + write anything to the underlying transport. + """ + d = self.test_sendChunkedRequestBodyFinishedThenWriteMore( + Failure(ArbitraryException())) + return self.assertFailure(d, ArbitraryException) + + + def test_sendRequestBodyWithError(self): + """ + If the L{Deferred} returned from the C{startProducing} method of the + L{IBodyProducer} passed to L{Request} fires with a L{Failure}, the + L{Deferred} returned from L{Request.writeTo} fails with that + L{Failure}. + """ + producer = StringProducer(5) + request = Request('POST', '/bar', _boringHeaders, producer) + writeDeferred = request.writeTo(self.transport) + + # Sanity check - the producer should be registered with the underlying + # transport. + self.assertIdentical(self.transport.producer, producer) + self.assertTrue(self.transport.streaming) + + producer.consumer.write('ab') + self.assertEqual( + self.transport.value(), + "POST /bar HTTP/1.1\r\n" + "Connection: close\r\n" + "Content-Length: 5\r\n" + "Host: example.com\r\n" + "\r\n" + "ab") + + self.assertFalse(self.transport.disconnecting) + producer.finished.errback(Failure(ArbitraryException())) + + # Disconnection is handled by a higher level. Request should leave the + # transport alone in this case. + self.assertFalse(self.transport.disconnecting) + + # Oh. Except it should unregister the producer that it registered. + self.assertIdentical(self.transport.producer, None) + + return self.assertFailure(writeDeferred, ArbitraryException) + + def test_hostHeaderRequired(self): + """ + L{Request.writeTo} raises L{BadHeaders} if there is not exactly one + I{Host} header and writes nothing to the given transport. + """ + request = Request('GET', '/', Headers({}), None) + self.assertRaises(BadHeaders, request.writeTo, self.transport) + self.assertEqual(self.transport.value(), '') + + request = Request('GET', '/', Headers({'Host': ['example.com', 'example.org']}), None) + self.assertRaises(BadHeaders, request.writeTo, self.transport) + self.assertEqual(self.transport.value(), '') + + + def test_stopWriting(self): + """ + L{Request.stopWriting} calls its body producer's C{stopProducing} + method. + """ + producer = StringProducer(3) + request = Request('GET', '/', _boringHeaders, producer) + d = request.writeTo(self.transport) + self.assertFalse(producer.stopped) + request.stopWriting() + self.assertTrue(producer.stopped) + + + def test_brokenStopProducing(self): + """ + If the body producer's C{stopProducing} method raises an exception, + L{Request.stopWriting} logs it and does not re-raise it. + """ + producer = StringProducer(3) + def brokenStopProducing(): + raise ArbitraryException("stopProducing is busted") + producer.stopProducing = brokenStopProducing + + request = Request('GET', '/', _boringHeaders, producer) + d = request.writeTo(self.transport) + request.stopWriting() + self.assertEqual( + len(self.flushLoggedErrors(ArbitraryException)), 1) + + + +class LengthEnforcingConsumerTests(TestCase): + """ + Tests for L{LengthEnforcingConsumer}. + """ + def setUp(self): + self.result = Deferred() + self.producer = StringProducer(10) + self.transport = StringTransport() + self.enforcer = LengthEnforcingConsumer( + self.producer, self.transport, self.result) + + + def test_write(self): + """ + L{LengthEnforcingConsumer.write} calls the wrapped consumer's C{write} + method with the bytes it is passed as long as there are fewer of them + than the C{length} attribute indicates remain to be received. + """ + self.enforcer.write('abc') + self.assertEqual(self.transport.value(), 'abc') + self.transport.clear() + self.enforcer.write('def') + self.assertEqual(self.transport.value(), 'def') + + + def test_finishedEarly(self): + """ + L{LengthEnforcingConsumer._noMoreWritesExpected} raises + L{WrongBodyLength} if it is called before the indicated number of bytes + have been written. + """ + self.enforcer.write('x' * 9) + self.assertRaises(WrongBodyLength, self.enforcer._noMoreWritesExpected) + + + def test_writeTooMany(self, _unregisterAfter=False): + """ + If it is called with a total number of bytes exceeding the indicated + limit passed to L{LengthEnforcingConsumer.__init__}, + L{LengthEnforcingConsumer.write} fires the L{Deferred} with a + L{Failure} wrapping a L{WrongBodyLength} and also calls the + C{stopProducing} method of the producer. + """ + self.enforcer.write('x' * 10) + self.assertFalse(self.producer.stopped) + self.enforcer.write('x') + self.assertTrue(self.producer.stopped) + if _unregisterAfter: + self.enforcer._noMoreWritesExpected() + return self.assertFailure(self.result, WrongBodyLength) + + + def test_writeAfterNoMoreExpected(self): + """ + If L{LengthEnforcingConsumer.write} is called after + L{LengthEnforcingConsumer._noMoreWritesExpected}, it calls the + producer's C{stopProducing} method and raises L{ExcessWrite}. + """ + self.enforcer.write('x' * 10) + self.enforcer._noMoreWritesExpected() + self.assertFalse(self.producer.stopped) + self.assertRaises(ExcessWrite, self.enforcer.write, 'x') + self.assertTrue(self.producer.stopped) + + + def test_finishedLate(self): + """ + L{LengthEnforcingConsumer._noMoreWritesExpected} does nothing (in + particular, it does not raise any exception) if called after too many + bytes have been passed to C{write}. + """ + return self.test_writeTooMany(True) + + + def test_finished(self): + """ + If L{LengthEnforcingConsumer._noMoreWritesExpected} is called after + the correct number of bytes have been written it returns C{None}. + """ + self.enforcer.write('x' * 10) + self.assertIdentical(self.enforcer._noMoreWritesExpected(), None) + + + def test_stopProducingRaises(self): + """ + If L{LengthEnforcingConsumer.write} calls the producer's + C{stopProducing} because too many bytes were written and the + C{stopProducing} method raises an exception, the exception is logged + and the L{LengthEnforcingConsumer} still errbacks the finished + L{Deferred}. + """ + def brokenStopProducing(): + StringProducer.stopProducing(self.producer) + raise ArbitraryException("stopProducing is busted") + self.producer.stopProducing = brokenStopProducing + + def cbFinished(ignored): + self.assertEqual( + len(self.flushLoggedErrors(ArbitraryException)), 1) + d = self.test_writeTooMany() + d.addCallback(cbFinished) + return d + + + +class RequestBodyConsumerTests(TestCase): + """ + Tests for L{ChunkedEncoder} which sits between an L{ITransport} and a + request/response body producer and chunked encodes everything written to + it. + """ + def test_interface(self): + """ + L{ChunkedEncoder} instances provide L{IConsumer}. + """ + self.assertTrue( + verifyObject(IConsumer, ChunkedEncoder(StringTransport()))) + + + def test_write(self): + """ + L{ChunkedEncoder.write} writes to the transport the chunked encoded + form of the bytes passed to it. + """ + transport = StringTransport() + encoder = ChunkedEncoder(transport) + encoder.write('foo') + self.assertEqual(transport.value(), '3\r\nfoo\r\n') + transport.clear() + encoder.write('x' * 16) + self.assertEqual(transport.value(), '10\r\n' + 'x' * 16 + '\r\n') + + + def test_producerRegistration(self): + """ + L{ChunkedEncoder.registerProducer} registers the given streaming + producer with its transport and L{ChunkedEncoder.unregisterProducer} + writes a zero-length chunk to its transport and unregisters the + transport's producer. + """ + transport = StringTransport() + producer = object() + encoder = ChunkedEncoder(transport) + encoder.registerProducer(producer, True) + self.assertIdentical(transport.producer, producer) + self.assertTrue(transport.streaming) + encoder.unregisterProducer() + self.assertIdentical(transport.producer, None) + self.assertEqual(transport.value(), '0\r\n\r\n') + + + +class TransportProxyProducerTests(TestCase): + """ + Tests for L{TransportProxyProducer} which proxies the L{IPushProducer} + interface of a transport. + """ + def test_interface(self): + """ + L{TransportProxyProducer} instances provide L{IPushProducer}. + """ + self.assertTrue( + verifyObject(IPushProducer, TransportProxyProducer(None))) + + + def test_stopProxyingUnreferencesProducer(self): + """ + L{TransportProxyProducer._stopProxying} drops the reference to the + wrapped L{IPushProducer} provider. + """ + transport = StringTransport() + proxy = TransportProxyProducer(transport) + self.assertIdentical(proxy._producer, transport) + proxy._stopProxying() + self.assertIdentical(proxy._producer, None) + + + def test_resumeProducing(self): + """ + L{TransportProxyProducer.resumeProducing} calls the wrapped + transport's C{resumeProducing} method unless told to stop proxying. + """ + transport = StringTransport() + transport.pauseProducing() + + proxy = TransportProxyProducer(transport) + # The transport should still be paused. + self.assertEqual(transport.producerState, 'paused') + proxy.resumeProducing() + # The transport should now be resumed. + self.assertEqual(transport.producerState, 'producing') + + transport.pauseProducing() + proxy._stopProxying() + + # The proxy should no longer do anything to the transport. + proxy.resumeProducing() + self.assertEqual(transport.producerState, 'paused') + + + def test_pauseProducing(self): + """ + L{TransportProxyProducer.pauseProducing} calls the wrapped transport's + C{pauseProducing} method unless told to stop proxying. + """ + transport = StringTransport() + + proxy = TransportProxyProducer(transport) + # The transport should still be producing. + self.assertEqual(transport.producerState, 'producing') + proxy.pauseProducing() + # The transport should now be paused. + self.assertEqual(transport.producerState, 'paused') + + transport.resumeProducing() + proxy._stopProxying() + + # The proxy should no longer do anything to the transport. + proxy.pauseProducing() + self.assertEqual(transport.producerState, 'producing') + + + def test_stopProducing(self): + """ + L{TransportProxyProducer.stopProducing} calls the wrapped transport's + C{stopProducing} method unless told to stop proxying. + """ + transport = StringTransport() + proxy = TransportProxyProducer(transport) + # The transport should still be producing. + self.assertEqual(transport.producerState, 'producing') + proxy.stopProducing() + # The transport should now be stopped. + self.assertEqual(transport.producerState, 'stopped') + + transport = StringTransport() + proxy = TransportProxyProducer(transport) + proxy._stopProxying() + proxy.stopProducing() + # The transport should not have been stopped. + self.assertEqual(transport.producerState, 'producing') + + + +class ResponseTests(TestCase): + """ + Tests for L{Response}. + """ + def test_makeConnection(self): + """ + The L{IProtocol} provider passed to L{Response.deliverBody} has its + C{makeConnection} method called with an L{IPushProducer} provider + hooked up to the response as an argument. + """ + producers = [] + transport = StringTransport() + class SomeProtocol(Protocol): + def makeConnection(self, producer): + producers.append(producer) + + consumer = SomeProtocol() + response = justTransportResponse(transport) + response.deliverBody(consumer) + [theProducer] = producers + theProducer.pauseProducing() + self.assertEqual(transport.producerState, 'paused') + theProducer.resumeProducing() + self.assertEqual(transport.producerState, 'producing') + + + def test_dataReceived(self): + """ + The L{IProtocol} provider passed to L{Response.deliverBody} has its + C{dataReceived} method called with bytes received as part of the + response body. + """ + bytes = [] + class ListConsumer(Protocol): + def dataReceived(self, data): + bytes.append(data) + + + consumer = ListConsumer() + response = justTransportResponse(StringTransport()) + response.deliverBody(consumer) + + response._bodyDataReceived('foo') + self.assertEqual(bytes, ['foo']) + + + def test_connectionLost(self): + """ + The L{IProtocol} provider passed to L{Response.deliverBody} has its + C{connectionLost} method called with a L{Failure} wrapping + L{ResponseDone} when the response's C{_bodyDataFinished} method is + called. + """ + lost = [] + class ListConsumer(Protocol): + def connectionLost(self, reason): + lost.append(reason) + + consumer = ListConsumer() + response = justTransportResponse(StringTransport()) + response.deliverBody(consumer) + + response._bodyDataFinished() + lost[0].trap(ResponseDone) + self.assertEqual(len(lost), 1) + + # The protocol reference should be dropped, too, to facilitate GC or + # whatever. + self.assertIdentical(response._bodyProtocol, None) + + + def test_bufferEarlyData(self): + """ + If data is delivered to the L{Response} before a protocol is registered + with C{deliverBody}, that data is buffered until the protocol is + registered and then is delivered. + """ + bytes = [] + class ListConsumer(Protocol): + def dataReceived(self, data): + bytes.append(data) + + protocol = ListConsumer() + response = justTransportResponse(StringTransport()) + response._bodyDataReceived('foo') + response._bodyDataReceived('bar') + response.deliverBody(protocol) + response._bodyDataReceived('baz') + self.assertEqual(bytes, ['foo', 'bar', 'baz']) + # Make sure the implementation-detail-byte-buffer is cleared because + # not clearing it wastes memory. + self.assertIdentical(response._bodyBuffer, None) + + + def test_multipleStartProducingFails(self): + """ + L{Response.deliverBody} raises L{RuntimeError} if called more than + once. + """ + response = justTransportResponse(StringTransport()) + response.deliverBody(Protocol()) + self.assertRaises(RuntimeError, response.deliverBody, Protocol()) + + + def test_startProducingAfterFinishedFails(self): + """ + L{Response.deliverBody} raises L{RuntimeError} if called after + L{Response._bodyDataFinished}. + """ + response = justTransportResponse(StringTransport()) + response.deliverBody(Protocol()) + response._bodyDataFinished() + self.assertRaises(RuntimeError, response.deliverBody, Protocol()) + + + def test_bodyDataReceivedAfterFinishedFails(self): + """ + L{Response._bodyDataReceived} raises L{RuntimeError} if called after + L{Response._bodyDataFinished} but before L{Response.deliverBody}. + """ + response = justTransportResponse(StringTransport()) + response._bodyDataFinished() + self.assertRaises(RuntimeError, response._bodyDataReceived, 'foo') + + + def test_bodyDataReceivedAfterDeliveryFails(self): + """ + L{Response._bodyDataReceived} raises L{RuntimeError} if called after + L{Response._bodyDataFinished} and after L{Response.deliverBody}. + """ + response = justTransportResponse(StringTransport()) + response._bodyDataFinished() + response.deliverBody(Protocol()) + self.assertRaises(RuntimeError, response._bodyDataReceived, 'foo') + + + def test_bodyDataFinishedAfterFinishedFails(self): + """ + L{Response._bodyDataFinished} raises L{RuntimeError} if called more + than once. + """ + response = justTransportResponse(StringTransport()) + response._bodyDataFinished() + self.assertRaises(RuntimeError, response._bodyDataFinished) + + + def test_bodyDataFinishedAfterDeliveryFails(self): + """ + L{Response._bodyDataFinished} raises L{RuntimeError} if called after + the body has been delivered. + """ + response = justTransportResponse(StringTransport()) + response._bodyDataFinished() + response.deliverBody(Protocol()) + self.assertRaises(RuntimeError, response._bodyDataFinished) + + + def test_transportResumed(self): + """ + L{Response.deliverBody} resumes the HTTP connection's transport + before passing it to the transport's C{makeConnection} method. + """ + transportState = [] + class ListConsumer(Protocol): + def makeConnection(self, transport): + transportState.append(transport.producerState) + + transport = StringTransport() + transport.pauseProducing() + protocol = ListConsumer() + response = justTransportResponse(transport) + self.assertEqual(transport.producerState, 'paused') + response.deliverBody(protocol) + self.assertEqual(transportState, ['producing']) + + + def test_bodyDataFinishedBeforeStartProducing(self): + """ + If the entire body is delivered to the L{Response} before the + response's C{deliverBody} method is called, the protocol passed to + C{deliverBody} is immediately given the body data and then + disconnected. + """ + transport = StringTransport() + response = justTransportResponse(transport) + response._bodyDataReceived('foo') + response._bodyDataReceived('bar') + response._bodyDataFinished() + + protocol = AccumulatingProtocol() + response.deliverBody(protocol) + self.assertEqual(protocol.data, 'foobar') + protocol.closedReason.trap(ResponseDone) + + + def test_finishedWithErrorWhenConnected(self): + """ + The L{Failure} passed to L{Response._bodyDataFinished} when the response + is in the I{connected} state is passed to the C{connectionLost} method + of the L{IProtocol} provider passed to the L{Response}'s + C{deliverBody} method. + """ + transport = StringTransport() + response = justTransportResponse(transport) + + protocol = AccumulatingProtocol() + response.deliverBody(protocol) + + # Sanity check - this test is for the connected state + self.assertEqual(response._state, 'CONNECTED') + response._bodyDataFinished(Failure(ArbitraryException())) + + protocol.closedReason.trap(ArbitraryException) + + + def test_finishedWithErrorWhenInitial(self): + """ + The L{Failure} passed to L{Response._bodyDataFinished} when the response + is in the I{initial} state is passed to the C{connectionLost} method of + the L{IProtocol} provider passed to the L{Response}'s C{deliverBody} + method. + """ + transport = StringTransport() + response = justTransportResponse(transport) + + # Sanity check - this test is for the initial state + self.assertEqual(response._state, 'INITIAL') + response._bodyDataFinished(Failure(ArbitraryException())) + + protocol = AccumulatingProtocol() + response.deliverBody(protocol) + + protocol.closedReason.trap(ArbitraryException) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py b/vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py new file mode 100644 index 000000000000..7cad1a354e72 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_proxy.py @@ -0,0 +1,541 @@ +# Copyright (c) 2007-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Test for L{twisted.web.proxy}. +""" + +from twisted.trial.unittest import TestCase +from twisted.test.proto_helpers import StringTransportWithDisconnection +from twisted.test.proto_helpers import MemoryReactor + +from twisted.web.resource import Resource +from twisted.web.server import Site +from twisted.web.proxy import ReverseProxyResource, ProxyClientFactory +from twisted.web.proxy import ProxyClient, ProxyRequest, ReverseProxyRequest +from twisted.web.test.test_web import DummyRequest + + +class ReverseProxyResourceTestCase(TestCase): + """ + Tests for L{ReverseProxyResource}. + """ + + def _testRender(self, uri, expectedURI): + """ + Check that a request pointing at C{uri} produce a new proxy connection, + with the path of this request pointing at C{expectedURI}. + """ + root = Resource() + reactor = MemoryReactor() + resource = ReverseProxyResource("127.0.0.1", 1234, "/path", reactor) + root.putChild('index', resource) + site = Site(root) + + transport = StringTransportWithDisconnection() + channel = site.buildProtocol(None) + channel.makeConnection(transport) + # Clear the timeout if the tests failed + self.addCleanup(channel.connectionLost, None) + + channel.dataReceived("GET %s HTTP/1.1\r\nAccept: text/html\r\n\r\n" % + (uri,)) + + # Check that one connection has been created, to the good host/port + self.assertEquals(len(reactor.tcpClients), 1) + self.assertEquals(reactor.tcpClients[0][0], "127.0.0.1") + self.assertEquals(reactor.tcpClients[0][1], 1234) + + # Check the factory passed to the connect, and its given path + factory = reactor.tcpClients[0][2] + self.assertIsInstance(factory, ProxyClientFactory) + self.assertEquals(factory.rest, expectedURI) + self.assertEquals(factory.headers["host"], "127.0.0.1:1234") + + + def test_render(self): + """ + Test that L{ReverseProxyResource.render} initiates a connection to the + given server with a L{ProxyClientFactory} as parameter. + """ + return self._testRender("/index", "/path") + + + def test_renderWithQuery(self): + """ + Test that L{ReverseProxyResource.render} passes query parameters to the + created factory. + """ + return self._testRender("/index?foo=bar", "/path?foo=bar") + + + def test_getChild(self): + """ + The L{ReverseProxyResource.getChild} method should return a resource + instance with the same class as the originating resource, forward port + and host values, and update the path value with the value passed. + """ + resource = ReverseProxyResource("127.0.0.1", 1234, "/path") + child = resource.getChild('foo', None) + # The child should keep the same class + self.assertIsInstance(child, ReverseProxyResource) + self.assertEquals(child.path, "/path/foo") + self.assertEquals(child.port, 1234) + self.assertEquals(child.host, "127.0.0.1") + + + def test_getChildWithSpecial(self): + """ + The L{ReverseProxyResource} return by C{getChild} has a path which has + already been quoted. + """ + resource = ReverseProxyResource("127.0.0.1", 1234, "/path") + child = resource.getChild(' /%', None) + self.assertEqual(child.path, "/path/%20%2F%25") + + + +class DummyChannel(object): + """ + A dummy HTTP channel, that does nothing but holds a transport and saves + connection lost. + + @ivar transport: the transport used by the client. + @ivar lostReason: the reason saved at connection lost. + """ + + def __init__(self, transport): + """ + Hold a reference to the transport. + """ + self.transport = transport + self.lostReason = None + + + def connectionLost(self, reason): + """ + Keep track of the connection lost reason. + """ + self.lostReason = reason + + + +class ProxyClientTestCase(TestCase): + """ + Tests for L{ProxyClient}. + """ + + def _parseOutHeaders(self, content): + """ + Parse the headers out of some web content. + + @param content: Bytes received from a web server. + @return: A tuple of (requestLine, headers, body). C{headers} is a dict + of headers, C{requestLine} is the first line (e.g. "POST /foo ...") + and C{body} is whatever is left. + """ + headers, body = content.split('\r\n\r\n') + headers = headers.split('\r\n') + requestLine = headers.pop(0) + return ( + requestLine, dict(header.split(': ') for header in headers), body) + + + def makeRequest(self, path): + """ + Make a dummy request object for the URL path. + + @param path: A URL path, beginning with a slash. + @return: A L{DummyRequest}. + """ + return DummyRequest(path) + + + def makeProxyClient(self, request, method="GET", headers=None, + requestBody=""): + """ + Make a L{ProxyClient} object used for testing. + + @param request: The request to use. + @param method: The HTTP method to use, GET by default. + @param headers: The HTTP headers to use expressed as a dict. If not + provided, defaults to {'accept': 'text/html'}. + @param requestBody: The body of the request. Defaults to the empty + string. + @return: A L{ProxyClient} + """ + if headers is None: + headers = {"accept": "text/html"} + path = '/' + request.postpath + return ProxyClient( + method, path, 'HTTP/1.0', headers, requestBody, request) + + + def connectProxy(self, proxyClient): + """ + Connect a proxy client to a L{StringTransportWithDisconnection}. + + @param proxyClient: A L{ProxyClient}. + @return: The L{StringTransportWithDisconnection}. + """ + clientTransport = StringTransportWithDisconnection() + clientTransport.protocol = proxyClient + proxyClient.makeConnection(clientTransport) + return clientTransport + + + def assertForwardsHeaders(self, proxyClient, requestLine, headers): + """ + Assert that C{proxyClient} sends C{headers} when it connects. + + @param proxyClient: A L{ProxyClient}. + @param requestLine: The request line we expect to be sent. + @param headers: A dict of headers we expect to be sent. + @return: If the assertion is successful, return the request body as + bytes. + """ + self.connectProxy(proxyClient) + requestContent = proxyClient.transport.value() + receivedLine, receivedHeaders, body = self._parseOutHeaders( + requestContent) + self.assertEquals(receivedLine, requestLine) + self.assertEquals(receivedHeaders, headers) + return body + + + def makeResponseBytes(self, code, message, headers, body): + lines = ["HTTP/1.0 %d %s" % (code, message)] + for header, values in headers: + for value in values: + lines.append("%s: %s" % (header, value)) + lines.extend(['', body]) + return '\r\n'.join(lines) + + + def assertForwardsResponse(self, request, code, message, headers, body): + """ + Assert that C{request} has forwarded a response from the server. + + @param request: A L{DummyRequest}. + @param code: The expected HTTP response code. + @param message: The expected HTTP message. + @param headers: The expected HTTP headers. + @param body: The expected response body. + """ + self.assertEquals(request.responseCode, code) + self.assertEquals(request.responseMessage, message) + receivedHeaders = list(request.responseHeaders.getAllRawHeaders()) + receivedHeaders.sort() + expectedHeaders = headers[:] + expectedHeaders.sort() + self.assertEquals(receivedHeaders, expectedHeaders) + self.assertEquals(''.join(request.written), body) + + + def _testDataForward(self, code, message, headers, body, method="GET", + requestBody="", loseConnection=True): + """ + Build a fake proxy connection, and send C{data} over it, checking that + it's forwarded to the originating request. + """ + request = self.makeRequest('foo') + client = self.makeProxyClient( + request, method, {'accept': 'text/html'}, requestBody) + + receivedBody = self.assertForwardsHeaders( + client, '%s /foo HTTP/1.0' % (method,), + {'connection': 'close', 'accept': 'text/html'}) + + self.assertEquals(receivedBody, requestBody) + + # Fake an answer + client.dataReceived( + self.makeResponseBytes(code, message, headers, body)) + + # Check that the response data has been forwarded back to the original + # requester. + self.assertForwardsResponse(request, code, message, headers, body) + + # Check that when the response is done, the request is finished. + if loseConnection: + client.transport.loseConnection() + + # Even if we didn't call loseConnection, the transport should be + # disconnected. This lets us not rely on the server to close our + # sockets for us. + self.assertFalse(client.transport.connected) + self.assertEquals(request.finished, 1) + + + def test_forward(self): + """ + When connected to the server, L{ProxyClient} should send the saved + request, with modifications of the headers, and then forward the result + to the parent request. + """ + return self._testDataForward( + 200, "OK", [("Foo", ["bar", "baz"])], "Some data\r\n") + + + def test_postData(self): + """ + Try to post content in the request, and check that the proxy client + forward the body of the request. + """ + return self._testDataForward( + 200, "OK", [("Foo", ["bar"])], "Some data\r\n", "POST", "Some content") + + + def test_statusWithMessage(self): + """ + If the response contains a status with a message, it should be + forwarded to the parent request with all the information. + """ + return self._testDataForward( + 404, "Not Found", [], "") + + + def test_contentLength(self): + """ + If the response contains a I{Content-Length} header, the inbound + request object should still only have C{finish} called on it once. + """ + data = "foo bar baz" + return self._testDataForward( + 200, "OK", [("Content-Length", [str(len(data))])], data) + + + def test_losesConnection(self): + """ + If the response contains a I{Content-Length} header, the outgoing + connection is closed when all response body data has been received. + """ + data = "foo bar baz" + return self._testDataForward( + 200, "OK", [("Content-Length", [str(len(data))])], data, + loseConnection=False) + + + def test_headersCleanups(self): + """ + The headers given at initialization should be modified: + B{proxy-connection} should be removed if present, and B{connection} + should be added. + """ + client = ProxyClient('GET', '/foo', 'HTTP/1.0', + {"accept": "text/html", "proxy-connection": "foo"}, '', None) + self.assertEquals(client.headers, + {"accept": "text/html", "connection": "close"}) + + + def test_keepaliveNotForwarded(self): + """ + The proxy doesn't really know what to do with keepalive things from + the remote server, so we stomp over any keepalive header we get from + the client. + """ + headers = { + "accept": "text/html", + 'keep-alive': '300', + 'connection': 'keep-alive', + } + expectedHeaders = headers.copy() + expectedHeaders['connection'] = 'close' + del expectedHeaders['keep-alive'] + client = ProxyClient('GET', '/foo', 'HTTP/1.0', headers, '', None) + self.assertForwardsHeaders( + client, 'GET /foo HTTP/1.0', expectedHeaders) + + + def test_defaultHeadersOverridden(self): + """ + L{server.Request} within the proxy sets certain response headers by + default. When we get these headers back from the remote server, the + defaults are overridden rather than simply appended. + """ + request = self.makeRequest('foo') + request.responseHeaders.setRawHeaders('server', ['old-bar']) + request.responseHeaders.setRawHeaders('date', ['old-baz']) + request.responseHeaders.setRawHeaders('content-type', ["old/qux"]) + client = self.makeProxyClient(request, headers={'accept': 'text/html'}) + self.connectProxy(client) + headers = { + 'Server': ['bar'], + 'Date': ['2010-01-01'], + 'Content-Type': ['application/x-baz'], + } + client.dataReceived( + self.makeResponseBytes(200, "OK", headers.items(), '')) + self.assertForwardsResponse( + request, 200, 'OK', headers.items(), '') + + + +class ProxyClientFactoryTestCase(TestCase): + """ + Tests for L{ProxyClientFactory}. + """ + + def test_connectionFailed(self): + """ + Check that L{ProxyClientFactory.clientConnectionFailed} produces + a B{501} response to the parent request. + """ + request = DummyRequest(['foo']) + factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0', + {"accept": "text/html"}, '', request) + + factory.clientConnectionFailed(None, None) + self.assertEquals(request.responseCode, 501) + self.assertEquals(request.responseMessage, "Gateway error") + self.assertEquals( + list(request.responseHeaders.getAllRawHeaders()), + [("Content-Type", ["text/html"])]) + self.assertEquals( + ''.join(request.written), + "

                              Could not connect

                              ") + self.assertEquals(request.finished, 1) + + + def test_buildProtocol(self): + """ + L{ProxyClientFactory.buildProtocol} should produce a L{ProxyClient} + with the same values of attributes (with updates on the headers). + """ + factory = ProxyClientFactory('GET', '/foo', 'HTTP/1.0', + {"accept": "text/html"}, 'Some data', + None) + proto = factory.buildProtocol(None) + self.assertIsInstance(proto, ProxyClient) + self.assertEquals(proto.command, 'GET') + self.assertEquals(proto.rest, '/foo') + self.assertEquals(proto.data, 'Some data') + self.assertEquals(proto.headers, + {"accept": "text/html", "connection": "close"}) + + + +class ProxyRequestTestCase(TestCase): + """ + Tests for L{ProxyRequest}. + """ + + def _testProcess(self, uri, expectedURI, method="GET", data=""): + """ + Build a request pointing at C{uri}, and check that a proxied request + is created, pointing a C{expectedURI}. + """ + transport = StringTransportWithDisconnection() + channel = DummyChannel(transport) + reactor = MemoryReactor() + request = ProxyRequest(channel, False, reactor) + request.gotLength(len(data)) + request.handleContentChunk(data) + request.requestReceived(method, 'http://example.com%s' % (uri,), + 'HTTP/1.0') + + self.assertEquals(len(reactor.tcpClients), 1) + self.assertEquals(reactor.tcpClients[0][0], "example.com") + self.assertEquals(reactor.tcpClients[0][1], 80) + + factory = reactor.tcpClients[0][2] + self.assertIsInstance(factory, ProxyClientFactory) + self.assertEquals(factory.command, method) + self.assertEquals(factory.version, 'HTTP/1.0') + self.assertEquals(factory.headers, {'host': 'example.com'}) + self.assertEquals(factory.data, data) + self.assertEquals(factory.rest, expectedURI) + self.assertEquals(factory.father, request) + + + def test_process(self): + """ + L{ProxyRequest.process} should create a connection to the given server, + with a L{ProxyClientFactory} as connection factory, with the correct + parameters: + - forward comment, version and data values + - update headers with the B{host} value + - remove the host from the URL + - pass the request as parent request + """ + return self._testProcess("/foo/bar", "/foo/bar") + + + def test_processWithoutTrailingSlash(self): + """ + If the incoming request doesn't contain a slash, + L{ProxyRequest.process} should add one when instantiating + L{ProxyClientFactory}. + """ + return self._testProcess("", "/") + + + def test_processWithData(self): + """ + L{ProxyRequest.process} should be able to retrieve request body and + to forward it. + """ + return self._testProcess( + "/foo/bar", "/foo/bar", "POST", "Some content") + + + def test_processWithPort(self): + """ + Check that L{ProxyRequest.process} correctly parse port in the incoming + URL, and create a outgoing connection with this port. + """ + transport = StringTransportWithDisconnection() + channel = DummyChannel(transport) + reactor = MemoryReactor() + request = ProxyRequest(channel, False, reactor) + request.gotLength(0) + request.requestReceived('GET', 'http://example.com:1234/foo/bar', + 'HTTP/1.0') + + # That should create one connection, with the port parsed from the URL + self.assertEquals(len(reactor.tcpClients), 1) + self.assertEquals(reactor.tcpClients[0][0], "example.com") + self.assertEquals(reactor.tcpClients[0][1], 1234) + + + +class DummyFactory(object): + """ + A simple holder for C{host} and C{port} information. + """ + + def __init__(self, host, port): + self.host = host + self.port = port + + + +class ReverseProxyRequestTestCase(TestCase): + """ + Tests for L{ReverseProxyRequest}. + """ + + def test_process(self): + """ + L{ReverseProxyRequest.process} should create a connection to its + factory host/port, using a L{ProxyClientFactory} instantiated with the + correct parameters, and particulary set the B{host} header to the + factory host. + """ + transport = StringTransportWithDisconnection() + channel = DummyChannel(transport) + reactor = MemoryReactor() + request = ReverseProxyRequest(channel, False, reactor) + request.factory = DummyFactory("example.com", 1234) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + + # Check that one connection has been created, to the good host/port + self.assertEquals(len(reactor.tcpClients), 1) + self.assertEquals(reactor.tcpClients[0][0], "example.com") + self.assertEquals(reactor.tcpClients[0][1], 1234) + + # Check the factory passed to the connect, and its headers + factory = reactor.tcpClients[0][2] + self.assertIsInstance(factory, ProxyClientFactory) + self.assertEquals(factory.headers, {'host': 'example.com'}) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_resource.py b/vendor/Twisted-10.0.0/twisted/web/test/test_resource.py new file mode 100644 index 000000000000..d6631c2e41f7 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_resource.py @@ -0,0 +1,144 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.resource}. +""" + +from twisted.trial.unittest import TestCase +from twisted.web import error +from twisted.web.http import NOT_FOUND, FORBIDDEN +from twisted.web.resource import ErrorPage, NoResource, ForbiddenResource +from twisted.web.test.test_web import DummyRequest + + +class ErrorPageTests(TestCase): + """ + Tests for L{ErrorPage}, L{NoResource}, and L{ForbiddenResource}. + """ + + errorPage = ErrorPage + noResource = NoResource + forbiddenResource = ForbiddenResource + + def test_getChild(self): + """ + The C{getChild} method of L{ErrorPage} returns the L{ErrorPage} it is + called on. + """ + page = self.errorPage(321, "foo", "bar") + self.assertIdentical(page.getChild("name", object()), page) + + + def _pageRenderingTest(self, page, code, brief, detail): + request = DummyRequest(['']) + self.assertEqual( + page.render(request), + "\n" + "\n" + " %s - %s\n" + " \n" + "

                              %s

                              \n" + "

                              %s

                              \n" + " \n" + "\n" % (code, brief, brief, detail)) + self.assertEqual(request.responseCode, code) + self.assertEqual( + request.outgoingHeaders, {'content-type': 'text/html'}) + + + def test_errorPageRendering(self): + """ + L{ErrorPage.render} returns a C{str} describing the error defined by + the response code and message passed to L{ErrorPage.__init__}. It also + uses that response code to set the response code on the L{Request} + passed in. + """ + code = 321 + brief = "brief description text" + detail = "much longer text might go here" + page = self.errorPage(code, brief, detail) + self._pageRenderingTest(page, code, brief, detail) + + + def test_noResourceRendering(self): + """ + L{NoResource} sets the HTTP I{NOT FOUND} code. + """ + detail = "long message" + page = self.noResource(detail) + self._pageRenderingTest(page, NOT_FOUND, "No Such Resource", detail) + + + def test_forbiddenResourceRendering(self): + """ + L{ForbiddenResource} sets the HTTP I{FORBIDDEN} code. + """ + detail = "longer message" + page = self.forbiddenResource(detail) + self._pageRenderingTest(page, FORBIDDEN, "Forbidden Resource", detail) + + + +class DeprecatedErrorPageTests(ErrorPageTests): + """ + Tests for L{error.ErrorPage}, L{error.NoResource}, and + L{error.ForbiddenResource}. + """ + def errorPage(self, *args): + return error.ErrorPage(*args) + + + def noResource(self, *args): + return error.NoResource(*args) + + + def forbiddenResource(self, *args): + return error.ForbiddenResource(*args) + + + def _assertWarning(self, name, offendingFunction): + warnings = self.flushWarnings([offendingFunction]) + self.assertEqual(len(warnings), 1) + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + 'twisted.web.error.%s is deprecated since Twisted 9.0. ' + 'See twisted.web.resource.%s.' % (name, name)) + + + def test_getChild(self): + """ + Like L{ErrorPageTests.test_getChild}, but flush the deprecation warning + emitted by instantiating L{error.ErrorPage}. + """ + ErrorPageTests.test_getChild(self) + self._assertWarning('ErrorPage', self.errorPage) + + + def test_errorPageRendering(self): + """ + Like L{ErrorPageTests.test_errorPageRendering}, but flush the + deprecation warning emitted by instantiating L{error.ErrorPage}. + """ + ErrorPageTests.test_errorPageRendering(self) + self._assertWarning('ErrorPage', self.errorPage) + + + def test_noResourceRendering(self): + """ + Like L{ErrorPageTests.test_noResourceRendering}, but flush the + deprecation warning emitted by instantiating L{error.NoResource}. + """ + ErrorPageTests.test_noResourceRendering(self) + self._assertWarning('NoResource', self.noResource) + + + def test_forbiddenResourceRendering(self): + """ + Like L{ErrorPageTests.test_forbiddenResourceRendering}, but flush the + deprecation warning emitted by instantiating + L{error.ForbiddenResource}. + """ + ErrorPageTests.test_forbiddenResourceRendering(self) + self._assertWarning('ForbiddenResource', self.forbiddenResource) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_script.py b/vendor/Twisted-10.0.0/twisted/web/test/test_script.py new file mode 100644 index 000000000000..f88d2216d92a --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_script.py @@ -0,0 +1,70 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.script}. +""" + +import os + +from twisted.trial.unittest import TestCase +from twisted.web.http import NOT_FOUND +from twisted.web.script import ResourceScriptDirectory, PythonScript +from twisted.web.test._util import _render +from twisted.web.test.test_web import DummyRequest + + +class ResourceScriptDirectoryTests(TestCase): + """ + Tests for L{ResourceScriptDirectory}. + """ + def test_render(self): + """ + L{ResourceScriptDirectory.render} sets the HTTP response code to I{NOT + FOUND}. + """ + resource = ResourceScriptDirectory(self.mktemp()) + request = DummyRequest(['']) + d = _render(resource, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d + + + def test_notFoundChild(self): + """ + L{ResourceScriptDirectory.getChild} returns a resource which renders an + response with the HTTP I{NOT FOUND} status code if the indicated child + does not exist as an entry in the directory used to initialized the + L{ResourceScriptDirectory}. + """ + path = self.mktemp() + os.makedirs(path) + resource = ResourceScriptDirectory(path) + request = DummyRequest(['foo']) + child = resource.getChild("foo", request) + d = _render(child, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d + + + +class PythonScriptTests(TestCase): + """ + Tests for L{PythonScript}. + """ + def test_notFoundRender(self): + """ + If the source file a L{PythonScript} is initialized with doesn't exist, + L{PythonScript.render} sets the HTTP response code to I{NOT FOUND}. + """ + resource = PythonScript(self.mktemp(), None) + request = DummyRequest(['']) + d = _render(resource, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_soap.py b/vendor/Twisted-10.0.0/twisted/web/test/test_soap.py new file mode 100644 index 000000000000..54e990e84cf9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_soap.py @@ -0,0 +1,114 @@ +# +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. +# + +"""Test SOAP support.""" + +try: + import SOAPpy +except ImportError: + SOAPpy = None + class SOAPPublisher: pass +else: + from twisted.web import soap + SOAPPublisher = soap.SOAPPublisher + +from twisted.trial import unittest +from twisted.web import server, error +from twisted.internet import reactor, defer + + +class Test(SOAPPublisher): + + def soap_add(self, a, b): + return a + b + + def soap_kwargs(self, a=1, b=2): + return a + b + soap_kwargs.useKeywords=True + + def soap_triple(self, string, num): + return [string, num, None] + + def soap_struct(self): + return SOAPpy.structType({"a": "c"}) + + def soap_defer(self, x): + return defer.succeed(x) + + def soap_deferFail(self): + return defer.fail(ValueError()) + + def soap_fail(self): + raise RuntimeError + + def soap_deferFault(self): + return defer.fail(ValueError()) + + def soap_complex(self): + return {"a": ["b", "c", 12, []], "D": "foo"} + + def soap_dict(self, map, key): + return map[key] + + +class SOAPTestCase(unittest.TestCase): + + def setUp(self): + self.publisher = Test() + self.p = reactor.listenTCP(0, server.Site(self.publisher), + interface="127.0.0.1") + self.port = self.p.getHost().port + + def tearDown(self): + return self.p.stopListening() + + def proxy(self): + return soap.Proxy("http://127.0.0.1:%d/" % self.port) + + def testResults(self): + inputOutput = [ + ("add", (2, 3), 5), + ("defer", ("a",), "a"), + ("dict", ({"a": 1}, "a"), 1), + ("triple", ("a", 1), ["a", 1, None])] + + dl = [] + for meth, args, outp in inputOutput: + d = self.proxy().callRemote(meth, *args) + d.addCallback(self.assertEquals, outp) + dl.append(d) + + # SOAPpy kinda blows. + d = self.proxy().callRemote('complex') + d.addCallback(lambda result: result._asdict()) + d.addCallback(self.assertEquals, {"a": ["b", "c", 12, []], "D": "foo"}) + dl.append(d) + + # We now return to our regularly scheduled program, already in progress. + return defer.DeferredList(dl, fireOnOneErrback=True) + + def testMethodNotFound(self): + """ + Check that a non existing method return error 500. + """ + d = self.proxy().callRemote('doesntexist') + self.assertFailure(d, error.Error) + def cb(err): + self.assertEquals(int(err.status), 500) + d.addCallback(cb) + return d + + def testLookupFunction(self): + """ + Test lookupFunction method on publisher, to see available remote + methods. + """ + self.assertTrue(self.publisher.lookupFunction("add")) + self.assertTrue(self.publisher.lookupFunction("fail")) + self.assertFalse(self.publisher.lookupFunction("foobar")) + +if not SOAPpy: + SOAPTestCase.skip = "SOAPpy not installed" + diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_static.py b/vendor/Twisted-10.0.0/twisted/web/test/test_static.py new file mode 100644 index 000000000000..ee8fe267a0b9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_static.py @@ -0,0 +1,1507 @@ +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.static}. +""" + +import os, re, StringIO + +from zope.interface.verify import verifyObject + +from twisted.internet import abstract, interfaces +from twisted.python.compat import set +from twisted.python.runtime import platform +from twisted.python.filepath import FilePath +from twisted.python import log +from twisted.trial.unittest import TestCase +from twisted.web import static, http, script, resource +from twisted.web.server import UnsupportedMethod +from twisted.web.test.test_web import DummyRequest +from twisted.web.test._util import _render + + +class StaticDataTests(TestCase): + """ + Tests for L{Data}. + """ + def test_headRequest(self): + """ + L{Data.render} returns an empty response body for a I{HEAD} request. + """ + data = static.Data("foo", "bar") + request = DummyRequest(['']) + request.method = 'HEAD' + d = _render(data, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), "") + d.addCallback(cbRendered) + return d + + + def test_invalidMethod(self): + """ + L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET}, + non-I{HEAD} request. + """ + data = static.Data("foo", "bar") + request = DummyRequest(['']) + request.method = 'POST' + self.assertRaises(UnsupportedMethod, data.render, request) + + + +class StaticFileTests(TestCase): + """ + Tests for the basic behavior of L{File}. + """ + def _render(self, resource, request): + return _render(resource, request) + + + def test_invalidMethod(self): + """ + L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET}, + non-I{HEAD} request. + """ + request = DummyRequest(['']) + request.method = 'POST' + path = FilePath(self.mktemp()) + path.setContent("foo") + file = static.File(path.path) + self.assertRaises(UnsupportedMethod, file.render, request) + + + def test_notFound(self): + """ + If a request is made which encounters a L{File} before a final segment + which does not correspond to any file in the path the L{File} was + created with, a not found response is sent. + """ + base = FilePath(self.mktemp()) + base.makedirs() + file = static.File(base.path) + + request = DummyRequest(['foobar']) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, 404) + d.addCallback(cbRendered) + return d + + + def test_emptyChild(self): + """ + The C{''} child of a L{File} which corresponds to a directory in the + filesystem is a L{DirectoryLister}. + """ + base = FilePath(self.mktemp()) + base.makedirs() + file = static.File(base.path) + + request = DummyRequest(['']) + child = resource.getChildForRequest(file, request) + self.assertIsInstance(child, static.DirectoryLister) + self.assertEqual(child.path, base.path) + + + def test_securityViolationNotFound(self): + """ + If a request is made which encounters a L{File} before a final segment + which cannot be looked up in the filesystem due to security + considerations, a not found response is sent. + """ + base = FilePath(self.mktemp()) + base.makedirs() + file = static.File(base.path) + + request = DummyRequest(['..']) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, 404) + d.addCallback(cbRendered) + return d + + + def test_forbiddenResource(self): + """ + If the file in the filesystem which would satisfy a request cannot be + read, L{File.render} sets the HTTP response code to I{FORBIDDEN}. + """ + base = FilePath(self.mktemp()) + base.setContent('') + # Make sure we can delete the file later. + self.addCleanup(base.chmod, 0700) + + # Get rid of our own read permission. + base.chmod(0) + + file = static.File(base.path) + request = DummyRequest(['']) + d = self._render(file, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, 403) + d.addCallback(cbRendered) + return d + if platform.isWindows(): + test_forbiddenResource.skip = "Cannot remove read permission on Windows" + + + def test_indexNames(self): + """ + If a request is made which encounters a L{File} before a final empty + segment, a file in the L{File} instance's C{indexNames} list which + exists in the path the L{File} was created with is served as the + response to the request. + """ + base = FilePath(self.mktemp()) + base.makedirs() + base.child("foo.bar").setContent("baz") + file = static.File(base.path) + file.indexNames = ['foo.bar'] + + request = DummyRequest(['']) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), 'baz') + self.assertEqual(request.outgoingHeaders['content-length'], '3') + d.addCallback(cbRendered) + return d + + + def test_staticFile(self): + """ + If a request is made which encounters a L{File} before a final segment + which names a file in the path the L{File} was created with, that file + is served as the response to the request. + """ + base = FilePath(self.mktemp()) + base.makedirs() + base.child("foo.bar").setContent("baz") + file = static.File(base.path) + + request = DummyRequest(['foo.bar']) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), 'baz') + self.assertEqual(request.outgoingHeaders['content-length'], '3') + d.addCallback(cbRendered) + return d + + + def test_staticFileDeletedGetChild(self): + """ + A L{static.File} created for a directory which does not exist should + return childNotFound from L{static.File.getChild}. + """ + staticFile = static.File(self.mktemp()) + request = DummyRequest(['foo.bar']) + child = staticFile.getChild("foo.bar", request) + self.assertEquals(child, staticFile.childNotFound) + + + def test_staticFileDeletedRender(self): + """ + A L{static.File} created for a file which does not exist should render + its C{childNotFound} page. + """ + staticFile = static.File(self.mktemp()) + request = DummyRequest(['foo.bar']) + request2 = DummyRequest(['foo.bar']) + d = self._render(staticFile, request) + d2 = self._render(staticFile.childNotFound, request2) + def cbRendered2(ignored): + def cbRendered(ignored): + self.assertEquals(''.join(request.written), + ''.join(request2.written)) + d.addCallback(cbRendered) + return d + d2.addCallback(cbRendered2) + return d2 + + + def test_headRequest(self): + """ + L{static.File.render} returns an empty response body for I{HEAD} + requests. + """ + path = FilePath(self.mktemp()) + path.setContent("foo") + file = static.File(path.path) + request = DummyRequest(['']) + request.method = 'HEAD' + d = _render(file, request) + def cbRendered(ignored): + self.assertEqual("".join(request.written), "") + d.addCallback(cbRendered) + return d + + + def test_processors(self): + """ + If a request is made which encounters a L{File} before a final segment + which names a file with an extension which is in the L{File}'s + C{processors} mapping, the processor associated with that extension is + used to serve the response to the request. + """ + base = FilePath(self.mktemp()) + base.makedirs() + base.child("foo.bar").setContent( + "from twisted.web.static import Data\n" + "resource = Data('dynamic world','text/plain')\n") + + file = static.File(base.path) + file.processors = {'.bar': script.ResourceScript} + request = DummyRequest(["foo.bar"]) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), 'dynamic world') + self.assertEqual(request.outgoingHeaders['content-length'], '13') + d.addCallback(cbRendered) + return d + + + def test_ignoreExt(self): + """ + The list of ignored extensions can be set by passing a value to + L{File.__init__} or by calling L{File.ignoreExt} later. + """ + file = static.File(".") + self.assertEqual(file.ignoredExts, []) + file.ignoreExt(".foo") + file.ignoreExt(".bar") + self.assertEqual(file.ignoredExts, [".foo", ".bar"]) + + file = static.File(".", ignoredExts=(".bar", ".baz")) + self.assertEqual(file.ignoredExts, [".bar", ".baz"]) + + + def test_ignoredExtensionsIgnored(self): + """ + A request for the I{base} child of a L{File} succeeds with a resource + for the I{base} file in the path the L{File} was created + with if such a file exists and the L{File} has been configured to + ignore the I{} extension. + """ + base = FilePath(self.mktemp()) + base.makedirs() + base.child('foo.bar').setContent('baz') + base.child('foo.quux').setContent('foobar') + file = static.File(base.path, ignoredExts=(".bar",)) + + request = DummyRequest(["foo"]) + child = resource.getChildForRequest(file, request) + + d = self._render(child, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), 'baz') + d.addCallback(cbRendered) + return d + + + def test_createPickleChild(self): + """ + L{static.File.createPickleChild} is deprecated. + """ + path = FilePath(self.mktemp()) + path.makedirs() + static.File(path.path).createPickleChild("foo", None) + warnings = self.flushWarnings([self.test_createPickleChild]) + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + "File.createPickleChild is deprecated since Twisted 9.0. " + "Resource persistence is beyond the scope of Twisted Web.") + self.assertEqual(len(warnings), 1) + + + +class StaticMakeProducerTests(TestCase): + """ + Tests for L{File.makeProducer}. + """ + + + def makeResourceWithContent(self, content, type=None, encoding=None): + """ + Make a L{static.File} resource that has C{content} for its content. + + @param content: The bytes to use as the contents of the resource. + @param type: Optional value for the content type of the resource. + """ + fileName = self.mktemp() + fileObject = open(fileName, 'w') + fileObject.write(content) + fileObject.close() + resource = static.File(fileName) + resource.encoding = encoding + resource.type = type + return resource + + + def contentHeaders(self, request): + """ + Extract the content-* headers from the L{DummyRequest} C{request}. + + This returns the subset of C{request.outgoingHeaders} of headers that + start with 'content-'. + """ + contentHeaders = {} + for k, v in request.outgoingHeaders.iteritems(): + if k.startswith('content-'): + contentHeaders[k] = v + return contentHeaders + + + def test_noRangeHeaderGivesNoRangeStaticProducer(self): + """ + makeProducer when no Range header is set returns an instance of + NoRangeStaticProducer. + """ + resource = self.makeResourceWithContent('') + request = DummyRequest([]) + producer = resource.makeProducer(request, resource.openForReading()) + self.assertIsInstance(producer, static.NoRangeStaticProducer) + + + def test_noRangeHeaderSets200OK(self): + """ + makeProducer when no Range header is set sets the responseCode on the + request to 'OK'. + """ + resource = self.makeResourceWithContent('') + request = DummyRequest([]) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual(http.OK, request.responseCode) + + + def test_noRangeHeaderSetsContentHeaders(self): + """ + makeProducer when no Range header is set sets the Content-* headers + for the response. + """ + length = 123 + contentType = "text/plain" + contentEncoding = 'gzip' + resource = self.makeResourceWithContent( + 'a'*length, type=contentType, encoding=contentEncoding) + request = DummyRequest([]) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + {'content-type': contentType, 'content-length': str(length), + 'content-encoding': contentEncoding}, + self.contentHeaders(request)) + + + def test_singleRangeGivesSingleRangeStaticProducer(self): + """ + makeProducer when the Range header requests a single byte range + returns an instance of SingleRangeStaticProducer. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3' + resource = self.makeResourceWithContent('abcdef') + producer = resource.makeProducer(request, resource.openForReading()) + self.assertIsInstance(producer, static.SingleRangeStaticProducer) + + + def test_singleRangeSets206PartialContent(self): + """ + makeProducer when the Range header requests a single, satisfiable byte + range sets the response code on the request to 'Partial Content'. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3' + resource = self.makeResourceWithContent('abcdef') + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + http.PARTIAL_CONTENT, request.responseCode) + + + def test_singleRangeSetsContentHeaders(self): + """ + makeProducer when the Range header requests a single, satisfiable byte + range sets the Content-* headers appropriately. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3' + contentType = "text/plain" + contentEncoding = 'gzip' + resource = self.makeResourceWithContent('abcdef', type=contentType, encoding=contentEncoding) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + {'content-type': contentType, 'content-encoding': contentEncoding, + 'content-range': 'bytes 1-3/6', 'content-length': '3'}, + self.contentHeaders(request)) + + + def test_singleUnsatisfiableRangeReturnsSingleRangeStaticProducer(self): + """ + makeProducer still returns an instance of L{SingleRangeStaticProducer} + when the Range header requests a single unsatisfiable byte range. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=4-10' + resource = self.makeResourceWithContent('abc') + producer = resource.makeProducer(request, resource.openForReading()) + self.assertIsInstance(producer, static.SingleRangeStaticProducer) + + + def test_singleUnsatisfiableRangeSets416ReqestedRangeNotSatisfiable(self): + """ + makeProducer sets the response code of the request to of 'Requested + Range Not Satisfiable' when the Range header requests a single + unsatisfiable byte range. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=4-10' + resource = self.makeResourceWithContent('abc') + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode) + + + def test_singleUnsatisfiableRangeSetsContentHeaders(self): + """ + makeProducer when the Range header requests a single, unsatisfiable + byte range sets the Content-* headers appropriately. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=4-10' + contentType = "text/plain" + resource = self.makeResourceWithContent('abc', type=contentType) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + {'content-type': 'text/plain', 'content-length': '0', + 'content-range': 'bytes */3'}, + self.contentHeaders(request)) + + + def test_singlePartiallyOverlappingRangeSetsContentHeaders(self): + """ + makeProducer when the Range header requests a single byte range that + partly overlaps the resource sets the Content-* headers appropriately. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=2-10' + contentType = "text/plain" + resource = self.makeResourceWithContent('abc', type=contentType) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + {'content-type': 'text/plain', 'content-length': '1', + 'content-range': 'bytes 2-2/3'}, + self.contentHeaders(request)) + + + def test_multipleRangeGivesMultipleRangeStaticProducer(self): + """ + makeProducer when the Range header requests a single byte range + returns an instance of MultipleRangeStaticProducer. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3,5-6' + resource = self.makeResourceWithContent('abcdef') + producer = resource.makeProducer(request, resource.openForReading()) + self.assertIsInstance(producer, static.MultipleRangeStaticProducer) + + + def test_multipleRangeSets206PartialContent(self): + """ + makeProducer when the Range header requests a multiple satisfiable + byte ranges sets the response code on the request to 'Partial + Content'. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3,5-6' + resource = self.makeResourceWithContent('abcdef') + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + http.PARTIAL_CONTENT, request.responseCode) + + + def test_mutipleRangeSetsContentHeaders(self): + """ + makeProducer when the Range header requests a single, satisfiable byte + range sets the Content-* headers appropriately. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3,5-6' + resource = self.makeResourceWithContent( + 'abcdefghijkl', encoding='gzip') + producer = resource.makeProducer(request, resource.openForReading()) + contentHeaders = self.contentHeaders(request) + # The only content-* headers set are content-type and content-length. + self.assertEqual( + set(['content-length', 'content-type']), + set(contentHeaders.keys())) + # The content-length depends on the boundary used in the response. + expectedLength = 5 + for boundary, offset, size in producer.rangeInfo: + expectedLength += len(boundary) + self.assertEqual(expectedLength, contentHeaders['content-length']) + # Content-type should be set to a value indicating a multipart + # response and the boundary used to separate the parts. + self.assertIn('content-type', contentHeaders) + contentType = contentHeaders['content-type'] + self.assertNotIdentical( + None, re.match( + 'multipart/byteranges; boundary="[^"]*"\Z', contentType)) + # Content-encoding is not set in the response to a multiple range + # response, which is a bit wussy but works well enough with the way + # static.File does content-encodings... + self.assertNotIn('content-encoding', contentHeaders) + + + def test_multipleUnsatisfiableRangesReturnsMultipleRangeStaticProducer(self): + """ + makeProducer still returns an instance of L{SingleRangeStaticProducer} + when the Range header requests multiple ranges, none of which are + satisfiable. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=10-12,15-20' + resource = self.makeResourceWithContent('abc') + producer = resource.makeProducer(request, resource.openForReading()) + self.assertIsInstance(producer, static.MultipleRangeStaticProducer) + + + def test_multipleUnsatisfiableRangesSets416ReqestedRangeNotSatisfiable(self): + """ + makeProducer sets the response code of the request to of 'Requested + Range Not Satisfiable' when the Range header requests multiple ranges, + none of which are satisfiable. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=10-12,15-20' + resource = self.makeResourceWithContent('abc') + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + http.REQUESTED_RANGE_NOT_SATISFIABLE, request.responseCode) + + + def test_multipleUnsatisfiableRangeSetsContentHeaders(self): + """ + makeProducer when the Range header requests multiple ranges, none of + which are satisfiable, sets the Content-* headers appropriately. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=4-10' + contentType = "text/plain" + request.headers['range'] = 'bytes=10-12,15-20' + resource = self.makeResourceWithContent('abc', type=contentType) + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + {'content-length': '0', 'content-range': 'bytes */3'}, + self.contentHeaders(request)) + + + def test_oneSatisfiableRangeIsEnough(self): + """ + makeProducer when the Range header requests multiple ranges, at least + one of which matches, sets the response code to 'Partial Content'. + """ + request = DummyRequest([]) + request.headers['range'] = 'bytes=1-3,100-200' + resource = self.makeResourceWithContent('abcdef') + resource.makeProducer(request, resource.openForReading()) + self.assertEqual( + http.PARTIAL_CONTENT, request.responseCode) + + + +class StaticProducerTests(TestCase): + """ + Tests for the abstract L{StaticProducer}. + """ + + def test_stopProducingClosesFile(self): + """ + L{StaticProducer.stopProducing} closes the file object the producer is + producing data from. + """ + fileObject = StringIO.StringIO() + producer = static.StaticProducer(None, fileObject) + producer.stopProducing() + self.assertTrue(fileObject.closed) + + + def test_stopProducingSetsRequestToNone(self): + """ + L{StaticProducer.stopProducing} sets the request instance variable to + None, which indicates to subclasses' resumeProducing methods that no + more data should be produced. + """ + fileObject = StringIO.StringIO() + producer = static.StaticProducer(DummyRequest([]), fileObject) + producer.stopProducing() + self.assertIdentical(None, producer.request) + + + +class NoRangeStaticProducerTests(TestCase): + """ + Tests for L{NoRangeStaticProducer}. + """ + + def test_implementsIPullProducer(self): + """ + L{NoRangeStaticProducer} implements L{IPullProducer}. + """ + verifyObject( + interfaces.IPullProducer, + static.NoRangeStaticProducer(None, None)) + + + def test_resumeProducingProducesContent(self): + """ + L{NoRangeStaticProducer.resumeProducing} writes content from the + resource to the request. + """ + request = DummyRequest([]) + content = 'abcdef' + producer = static.NoRangeStaticProducer( + request, StringIO.StringIO(content)) + # start calls registerProducer on the DummyRequest, which pulls all + # output from the producer and so we just need this one call. + producer.start() + self.assertEqual(content, ''.join(request.written)) + + + def test_resumeProducingBuffersOutput(self): + """ + L{NoRangeStaticProducer.start} writes at most + C{abstract.FileDescriptor.bufferSize} bytes of content from the + resource to the request at once. + """ + request = DummyRequest([]) + bufferSize = abstract.FileDescriptor.bufferSize + content = 'a' * (2*bufferSize + 1) + producer = static.NoRangeStaticProducer( + request, StringIO.StringIO(content)) + # start calls registerProducer on the DummyRequest, which pulls all + # output from the producer and so we just need this one call. + producer.start() + expected = [ + content[0:bufferSize], + content[bufferSize:2*bufferSize], + content[2*bufferSize:] + ] + self.assertEqual(expected, request.written) + + + def test_finishCalledWhenDone(self): + """ + L{NoRangeStaticProducer.resumeProducing} calls finish() on the request + after it is done producing content. + """ + request = DummyRequest([]) + finishDeferred = request.notifyFinish() + callbackList = [] + finishDeferred.addCallback(callbackList.append) + producer = static.NoRangeStaticProducer( + request, StringIO.StringIO('abcdef')) + # start calls registerProducer on the DummyRequest, which pulls all + # output from the producer and so we just need this one call. + producer.start() + self.assertEqual([None], callbackList) + + + +class SingleRangeStaticProducerTests(TestCase): + """ + Tests for L{SingleRangeStaticProducer}. + """ + + def test_implementsIPullProducer(self): + """ + L{SingleRangeStaticProducer} implements L{IPullProducer}. + """ + verifyObject( + interfaces.IPullProducer, + static.SingleRangeStaticProducer(None, None, None, None)) + + + def test_resumeProducingProducesContent(self): + """ + L{SingleRangeStaticProducer.resumeProducing} writes the given amount + of content, starting at the given offset, from the resource to the + request. + """ + request = DummyRequest([]) + content = 'abcdef' + producer = static.SingleRangeStaticProducer( + request, StringIO.StringIO(content), 1, 3) + # DummyRequest.registerProducer pulls all output from the producer, so + # we just need to call start. + producer.start() + self.assertEqual(content[1:4], ''.join(request.written)) + + + def test_resumeProducingBuffersOutput(self): + """ + L{SingleRangeStaticProducer.start} writes at most + C{abstract.FileDescriptor.bufferSize} bytes of content from the + resource to the request at once. + """ + request = DummyRequest([]) + bufferSize = abstract.FileDescriptor.bufferSize + content = 'abc' * bufferSize + producer = static.SingleRangeStaticProducer( + request, StringIO.StringIO(content), 1, bufferSize+10) + # DummyRequest.registerProducer pulls all output from the producer, so + # we just need to call start. + producer.start() + expected = [ + content[1:bufferSize+1], + content[bufferSize+1:bufferSize+11], + ] + self.assertEqual(expected, request.written) + + + def test_finishCalledWhenDone(self): + """ + L{SingleRangeStaticProducer.resumeProducing} calls finish() on the + request after it is done producing content. + """ + request = DummyRequest([]) + finishDeferred = request.notifyFinish() + callbackList = [] + finishDeferred.addCallback(callbackList.append) + producer = static.SingleRangeStaticProducer( + request, StringIO.StringIO('abcdef'), 1, 1) + # start calls registerProducer on the DummyRequest, which pulls all + # output from the producer and so we just need this one call. + producer.start() + self.assertEqual([None], callbackList) + + + +class MultipleRangeStaticProducerTests(TestCase): + """ + Tests for L{MultipleRangeStaticProducer}. + """ + + def test_implementsIPullProducer(self): + """ + L{MultipleRangeStaticProducer} implements L{IPullProducer}. + """ + verifyObject( + interfaces.IPullProducer, + static.MultipleRangeStaticProducer(None, None, None)) + + + def test_resumeProducingProducesContent(self): + """ + L{MultipleRangeStaticProducer.resumeProducing} writes the requested + chunks of content from the resource to the request, with the supplied + boundaries in between each chunk. + """ + request = DummyRequest([]) + content = 'abcdef' + producer = static.MultipleRangeStaticProducer( + request, StringIO.StringIO(content), [('1', 1, 3), ('2', 5, 1)]) + # DummyRequest.registerProducer pulls all output from the producer, so + # we just need to call start. + producer.start() + self.assertEqual('1bcd2f', ''.join(request.written)) + + + def test_resumeProducingBuffersOutput(self): + """ + L{MultipleRangeStaticProducer.start} writes about + C{abstract.FileDescriptor.bufferSize} bytes of content from the + resource to the request at once. + + To be specific about the 'about' above: it can write slightly more, + for example in the case where the first boundary plus the first chunk + is less than C{bufferSize} but first boundary plus the first chunk + plus the second boundary is more, but this is unimportant as in + practice the boundaries are fairly small. On the other side, it is + important for performance to bundle up several small chunks into one + call to request.write. + """ + request = DummyRequest([]) + content = '0123456789' * 2 + producer = static.MultipleRangeStaticProducer( + request, StringIO.StringIO(content), + [('a', 0, 2), ('b', 5, 10), ('c', 0, 0)]) + producer.bufferSize = 10 + # DummyRequest.registerProducer pulls all output from the producer, so + # we just need to call start. + producer.start() + expected = [ + 'a' + content[0:2] + 'b' + content[5:11], + content[11:15] + 'c', + ] + self.assertEqual(expected, request.written) + + + def test_finishCalledWhenDone(self): + """ + L{MultipleRangeStaticProducer.resumeProducing} calls finish() on the + request after it is done producing content. + """ + request = DummyRequest([]) + finishDeferred = request.notifyFinish() + callbackList = [] + finishDeferred.addCallback(callbackList.append) + producer = static.MultipleRangeStaticProducer( + request, StringIO.StringIO('abcdef'), [('', 1, 2)]) + # start calls registerProducer on the DummyRequest, which pulls all + # output from the producer and so we just need this one call. + producer.start() + self.assertEqual([None], callbackList) + + + +class RangeTests(TestCase): + """ + Tests for I{Range-Header} support in L{twisted.web.static.File}. + + @type file: L{file} + @ivar file: Temporary (binary) file containing the content to be served. + + @type resource: L{static.File} + @ivar resource: A leaf web resource using C{file} as content. + + @type request: L{DummyRequest} + @ivar request: A fake request, requesting C{resource}. + + @type catcher: L{list} + @ivar catcher: List which gathers all log information. + """ + def setUp(self): + """ + Create a temporary file with a fixed payload of 64 bytes. Create a + resource for that file and create a request which will be for that + resource. Each test can set a different range header to test different + aspects of the implementation. + """ + path = FilePath(self.mktemp()) + # This is just a jumble of random stuff. It's supposed to be a good + # set of data for this test, particularly in order to avoid + # accidentally seeing the right result by having a byte sequence + # repeated at different locations or by having byte values which are + # somehow correlated with their position in the string. + self.payload = ('\xf8u\xf3E\x8c7\xce\x00\x9e\xb6a0y0S\xf0\xef\xac\xb7' + '\xbe\xb5\x17M\x1e\x136k{\x1e\xbe\x0c\x07\x07\t\xd0' + '\xbckY\xf5I\x0b\xb8\x88oZ\x1d\x85b\x1a\xcdk\xf2\x1d' + '&\xfd%\xdd\x82q/A\x10Y\x8b') + path.setContent(self.payload) + self.file = path.open() + self.resource = static.File(self.file.name) + self.resource.isLeaf = 1 + self.request = DummyRequest(['']) + self.request.uri = self.file.name + self.catcher = [] + log.addObserver(self.catcher.append) + + + def tearDown(self): + """ + Clean up the resource file and the log observer. + """ + self.file.close() + log.removeObserver(self.catcher.append) + + + def _assertLogged(self, expected): + """ + Asserts that a given log message occurred with an expected message. + """ + logItem = self.catcher.pop() + self.assertEquals(logItem["message"][0], expected) + self.assertEqual( + self.catcher, [], "An additional log occured: %r" % (logItem,)) + + + def test_invalidRanges(self): + """ + L{File._parseRangeHeader} raises L{ValueError} when passed + syntactically invalid byte ranges. + """ + f = self.resource._parseRangeHeader + + # there's no = + self.assertRaises(ValueError, f, 'bytes') + + # unknown isn't a valid Bytes-Unit + self.assertRaises(ValueError, f, 'unknown=1-2') + + # there's no - in =stuff + self.assertRaises(ValueError, f, 'bytes=3') + + # both start and end are empty + self.assertRaises(ValueError, f, 'bytes=-') + + # start isn't an integer + self.assertRaises(ValueError, f, 'bytes=foo-') + + # end isn't an integer + self.assertRaises(ValueError, f, 'bytes=-foo') + + # end isn't equal to or greater than start + self.assertRaises(ValueError, f, 'bytes=5-4') + + + def test_rangeMissingStop(self): + """ + A single bytes range without an explicit stop position is parsed into a + two-tuple giving the start position and C{None}. + """ + self.assertEqual( + self.resource._parseRangeHeader('bytes=0-'), [(0, None)]) + + + def test_rangeMissingStart(self): + """ + A single bytes range without an explicit start position is parsed into + a two-tuple of C{None} and the end position. + """ + self.assertEqual( + self.resource._parseRangeHeader('bytes=-3'), [(None, 3)]) + + + def test_range(self): + """ + A single bytes range with explicit start and stop positions is parsed + into a two-tuple of those positions. + """ + self.assertEqual( + self.resource._parseRangeHeader('bytes=2-5'), [(2, 5)]) + + + def test_rangeWithSpace(self): + """ + A single bytes range with whitespace in allowed places is parsed in + the same way as it would be without the whitespace. + """ + self.assertEqual( + self.resource._parseRangeHeader(' bytes=1-2 '), [(1, 2)]) + self.assertEqual( + self.resource._parseRangeHeader('bytes =1-2 '), [(1, 2)]) + self.assertEqual( + self.resource._parseRangeHeader('bytes= 1-2'), [(1, 2)]) + self.assertEqual( + self.resource._parseRangeHeader('bytes=1 -2'), [(1, 2)]) + self.assertEqual( + self.resource._parseRangeHeader('bytes=1- 2'), [(1, 2)]) + self.assertEqual( + self.resource._parseRangeHeader('bytes=1-2 '), [(1, 2)]) + + + def test_nullRangeElements(self): + """ + If there are multiple byte ranges but only one is non-null, the + non-null range is parsed and its start and stop returned. + """ + self.assertEqual( + self.resource._parseRangeHeader('bytes=1-2,\r\n, ,\t'), [(1, 2)]) + + + def test_multipleRanges(self): + """ + If multiple byte ranges are specified their starts and stops are + returned. + """ + self.assertEqual( + self.resource._parseRangeHeader('bytes=1-2,3-4'), + [(1, 2), (3, 4)]) + + + def test_bodyLength(self): + """ + A correct response to a range request is as long as the length of the + requested range. + """ + self.request.headers['range'] = 'bytes=0-43' + self.resource.render(self.request) + self.assertEquals(len(''.join(self.request.written)), 44) + + + def test_invalidRangeRequest(self): + """ + An incorrect range request (RFC 2616 defines a correct range request as + a Bytes-Unit followed by a '=' character followed by a specific range. + Only 'bytes' is defined) results in the range header value being logged + and a normal 200 response being sent. + """ + self.request.headers['range'] = range = 'foobar=0-43' + self.resource.render(self.request) + expected = "Ignoring malformed Range header %r" % (range,) + self._assertLogged(expected) + self.assertEquals(''.join(self.request.written), self.payload) + self.assertEquals(self.request.responseCode, http.OK) + self.assertEquals( + self.request.outgoingHeaders['content-length'], + str(len(self.payload))) + + + def parseMultipartBody(self, body, boundary): + """ + Parse C{body} as a multipart MIME response separated by C{boundary}. + + Note that this with fail the calling test on certain syntactic + problems. + """ + sep = "\r\n--" + boundary + parts = ''.join(body).split(sep) + self.assertEquals('', parts[0]) + self.assertEquals('--\r\n', parts[-1]) + parsed_parts = [] + for part in parts[1:-1]: + before, header1, header2, blank, partBody = part.split('\r\n', 4) + headers = header1 + '\n' + header2 + self.assertEqual('', before) + self.assertEqual('', blank) + partContentTypeValue = re.search( + '^content-type: (.*)$', headers, re.I|re.M).group(1) + start, end, size = re.search( + '^content-range: bytes ([0-9]+)-([0-9]+)/([0-9]+)$', + headers, re.I|re.M).groups() + parsed_parts.append( + {'contentType': partContentTypeValue, + 'contentRange': (start, end, size), + 'body': partBody}) + return parsed_parts + + + def test_multipleRangeRequest(self): + """ + The response to a request for multipe bytes ranges is a MIME-ish + multipart response. + """ + startEnds = [(0, 2), (20, 30), (40, 50)] + rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds]) + self.request.headers['range'] = 'bytes=' + rangeHeaderValue + self.resource.render(self.request) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + boundary = re.match( + '^multipart/byteranges; boundary="(.*)"$', + self.request.outgoingHeaders['content-type']).group(1) + parts = self.parseMultipartBody(''.join(self.request.written), boundary) + self.assertEquals(len(startEnds), len(parts)) + for part, (s, e) in zip(parts, startEnds): + self.assertEqual(self.resource.type, part['contentType']) + start, end, size = part['contentRange'] + self.assertEqual(int(start), s) + self.assertEqual(int(end), e) + self.assertEqual(int(size), self.resource.getFileSize()) + self.assertEqual(self.payload[s:e+1], part['body']) + + + def test_multipleRangeRequestWithRangeOverlappingEnd(self): + """ + The response to a request for multipe bytes ranges is a MIME-ish + multipart response, even when one of the ranged falls off the end of + the resource. + """ + startEnds = [(0, 2), (40, len(self.payload) + 10)] + rangeHeaderValue = ','.join(["%s-%s"%(s,e) for (s, e) in startEnds]) + self.request.headers['range'] = 'bytes=' + rangeHeaderValue + self.resource.render(self.request) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + boundary = re.match( + '^multipart/byteranges; boundary="(.*)"$', + self.request.outgoingHeaders['content-type']).group(1) + parts = self.parseMultipartBody(''.join(self.request.written), boundary) + self.assertEquals(len(startEnds), len(parts)) + for part, (s, e) in zip(parts, startEnds): + self.assertEqual(self.resource.type, part['contentType']) + start, end, size = part['contentRange'] + self.assertEqual(int(start), s) + self.assertEqual(int(end), min(e, self.resource.getFileSize()-1)) + self.assertEqual(int(size), self.resource.getFileSize()) + self.assertEqual(self.payload[s:e+1], part['body']) + + + def test_implicitEnd(self): + """ + If the end byte position is omitted, then it is treated as if the + length of the resource was specified by the end byte position. + """ + self.request.headers['range'] = 'bytes=23-' + self.resource.render(self.request) + self.assertEquals(''.join(self.request.written), self.payload[23:]) + self.assertEquals(len(''.join(self.request.written)), 41) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + self.assertEquals( + self.request.outgoingHeaders['content-range'], 'bytes 23-63/64') + self.assertEquals(self.request.outgoingHeaders['content-length'], '41') + + + def test_implicitStart(self): + """ + If the start byte position is omitted but the end byte position is + supplied, then the range is treated as requesting the last -N bytes of + the resource, where N is the end byte position. + """ + self.request.headers['range'] = 'bytes=-17' + self.resource.render(self.request) + self.assertEquals(''.join(self.request.written), self.payload[-17:]) + self.assertEquals(len(''.join(self.request.written)), 17) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + self.assertEquals( + self.request.outgoingHeaders['content-range'], 'bytes 47-63/64') + self.assertEquals(self.request.outgoingHeaders['content-length'], '17') + + + def test_explicitRange(self): + """ + A correct response to a bytes range header request from A to B starts + with the A'th byte and ends with (including) the B'th byte. The first + byte of a page is numbered with 0. + """ + self.request.headers['range'] = 'bytes=3-43' + self.resource.render(self.request) + written = ''.join(self.request.written) + self.assertEquals(written, self.payload[3:44]) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + self.assertEquals( + self.request.outgoingHeaders['content-range'], 'bytes 3-43/64') + self.assertEquals( + str(len(written)), self.request.outgoingHeaders['content-length']) + + + def test_explicitRangeOverlappingEnd(self): + """ + A correct response to a bytes range header request from A to B when B + is past the end of the resource starts with the A'th byte and ends + with the last byte of the resource. The first byte of a page is + numbered with 0. + """ + self.request.headers['range'] = 'bytes=40-100' + self.resource.render(self.request) + written = ''.join(self.request.written) + self.assertEquals(written, self.payload[40:]) + self.assertEquals(self.request.responseCode, http.PARTIAL_CONTENT) + self.assertEquals( + self.request.outgoingHeaders['content-range'], 'bytes 40-63/64') + self.assertEquals( + str(len(written)), self.request.outgoingHeaders['content-length']) + + + def test_statusCodeRequestedRangeNotSatisfiable(self): + """ + If a range is syntactically invalid due to the start being greater than + the end, the range header is ignored (the request is responded to as if + it were not present). + """ + self.request.headers['range'] = 'bytes=20-13' + self.resource.render(self.request) + self.assertEquals(self.request.responseCode, http.OK) + self.assertEquals(''.join(self.request.written), self.payload) + self.assertEquals( + self.request.outgoingHeaders['content-length'], + str(len(self.payload))) + + + def test_invalidStartBytePos(self): + """ + If a range is unsatisfiable due to the start not being less than the + length of the resource, the response is 416 (Requested range not + satisfiable) and no data is written to the response body (RFC 2616, + section 14.35.1). + """ + self.request.headers['range'] = 'bytes=67-108' + self.resource.render(self.request) + self.assertEquals( + self.request.responseCode, http.REQUESTED_RANGE_NOT_SATISFIABLE) + self.assertEquals(''.join(self.request.written), '') + self.assertEquals(self.request.outgoingHeaders['content-length'], '0') + # Sections 10.4.17 and 14.16 + self.assertEquals( + self.request.outgoingHeaders['content-range'], + 'bytes */%d' % (len(self.payload),)) + + + +class DirectoryListerTest(TestCase): + """ + Tests for L{static.DirectoryLister}. + """ + def _request(self, uri): + request = DummyRequest(['']) + request.uri = uri + return request + + + def test_renderHeader(self): + """ + L{static.DirectoryLister} prints the request uri as header of the + rendered content. + """ + path = FilePath(self.mktemp()) + path.makedirs() + + lister = static.DirectoryLister(path.path) + data = lister.render(self._request('foo')) + self.assertIn("

                              Directory listing for foo

                              ", data) + self.assertIn("Directory listing for foo", data) + + + def test_renderUnquoteHeader(self): + """ + L{static.DirectoryLister} unquote the request uri before printing it. + """ + path = FilePath(self.mktemp()) + path.makedirs() + + lister = static.DirectoryLister(path.path) + data = lister.render(self._request('foo%20bar')) + self.assertIn("

                              Directory listing for foo bar

                              ", data) + self.assertIn("Directory listing for foo bar", data) + + + def test_escapeHeader(self): + """ + L{static.DirectoryLister} escape "&", "<" and ">" after unquoting the + request uri. + """ + path = FilePath(self.mktemp()) + path.makedirs() + + lister = static.DirectoryLister(path.path) + data = lister.render(self._request('foo%26bar')) + self.assertIn("

                              Directory listing for foo&bar

                              ", data) + self.assertIn("Directory listing for foo&bar", data) + + + def test_renderFiles(self): + """ + L{static.DirectoryLister} is able to list all the files inside a + directory. + """ + path = FilePath(self.mktemp()) + path.makedirs() + path.child('file1').setContent("content1") + path.child('file2').setContent("content2" * 1000) + + lister = static.DirectoryLister(path.path) + data = lister.render(self._request('foo')) + body = """ + file1 + 8B + [text/html] + + + + file2 + 7K + [text/html] + +""" + self.assertIn(body, data) + + + def test_renderDirectories(self): + """ + L{static.DirectoryLister} is able to list all the directories inside + a directory. + """ + path = FilePath(self.mktemp()) + path.makedirs() + path.child('dir1').makedirs() + path.child('dir2 & 3').makedirs() + + lister = static.DirectoryLister(path.path) + data = lister.render(self._request('foo')) + body = """ + dir1/ + + [Directory] + + + + dir2 & 3/ + + [Directory] + +""" + self.assertIn(body, data) + + + def test_renderFiltered(self): + """ + L{static.DirectoryLister} takes a optional C{dirs} argument that + filter out the list of of directories and files printed. + """ + path = FilePath(self.mktemp()) + path.makedirs() + path.child('dir1').makedirs() + path.child('dir2').makedirs() + path.child('dir3').makedirs() + lister = static.DirectoryLister(path.path, dirs=["dir1", "dir3"]) + data = lister.render(self._request('foo')) + body = """ + dir1/ + + [Directory] + + + + dir3/ + + [Directory] + +""" + self.assertIn(body, data) + + + def test_oddAndEven(self): + """ + L{static.DirectoryLister} gives an alternate class for each odd and + even rows in the table. + """ + lister = static.DirectoryLister(None) + elements = [{"href": "", "text": "", "size": "", "type": "", + "encoding": ""} for i in xrange(5)] + content = lister._buildTableContent(elements) + + self.assertEquals(len(content), 5) + self.assertTrue(content[0].startswith('')) + self.assertTrue(content[1].startswith('')) + self.assertTrue(content[2].startswith('')) + self.assertTrue(content[3].startswith('')) + self.assertTrue(content[4].startswith('')) + + + def test_mimeTypeAndEncodings(self): + """ + L{static.DirectoryLister} is able to detect mimetype and encoding of + listed files. + """ + path = FilePath(self.mktemp()) + path.makedirs() + path.child('file1.txt').setContent("file1") + path.child('file2.py').setContent("python") + path.child('file3.conf.gz').setContent("conf compressed") + path.child('file4.diff.bz2').setContent("diff compressed") + directory = os.listdir(path.path) + directory.sort() + + contentTypes = { + ".txt": "text/plain", + ".py": "text/python", + ".conf": "text/configuration", + ".diff": "text/diff" + } + + lister = static.DirectoryLister(path.path, contentTypes=contentTypes) + dirs, files = lister._getFilesAndDirectories(directory) + self.assertEquals(dirs, []) + self.assertEquals(files, [ + {'encoding': '', + 'href': 'file1.txt', + 'size': '5B', + 'text': 'file1.txt', + 'type': '[text/plain]'}, + {'encoding': '', + 'href': 'file2.py', + 'size': '6B', + 'text': 'file2.py', + 'type': '[text/python]'}, + {'encoding': '[gzip]', + 'href': 'file3.conf.gz', + 'size': '15B', + 'text': 'file3.conf.gz', + 'type': '[text/configuration]'}, + {'encoding': '[bzip2]', + 'href': 'file4.diff.bz2', + 'size': '15B', + 'text': 'file4.diff.bz2', + 'type': '[text/diff]'}]) + + + def test_brokenSymlink(self): + """ + If on the file in the listing points to a broken symlink, it should not + be returned by L{static.DirectoryLister._getFilesAndDirectories}. + """ + path = FilePath(self.mktemp()) + path.makedirs() + file1 = path.child('file1') + file1.setContent("file1") + file1.linkTo(path.child("file2")) + file1.remove() + + lister = static.DirectoryLister(path.path) + directory = os.listdir(path.path) + directory.sort() + dirs, files = lister._getFilesAndDirectories(directory) + self.assertEquals(dirs, []) + self.assertEquals(files, []) + + if getattr(os, "symlink", None) is None: + test_brokenSymlink.skip = "No symlink support" + + + def test_childrenNotFound(self): + """ + Any child resource of L{static.DirectoryLister} renders an HTTP + I{NOT FOUND} response code. + """ + path = FilePath(self.mktemp()) + path.makedirs() + lister = static.DirectoryLister(path.path) + request = self._request('') + child = resource.getChildForRequest(lister, request) + result = _render(child, request) + def cbRendered(ignored): + self.assertEquals(request.responseCode, http.NOT_FOUND) + result.addCallback(cbRendered) + return result + + + def test_repr(self): + """ + L{static.DirectoryLister.__repr__} gives the path of the lister. + """ + path = FilePath(self.mktemp()) + lister = static.DirectoryLister(path.path) + self.assertEquals(repr(lister), + "" % (path.path,)) + self.assertEquals(str(lister), + "" % (path.path,)) + + def test_formatFileSize(self): + """ + L{static.formatFileSize} format an amount of bytes into a more readable + format. + """ + self.assertEquals(static.formatFileSize(0), "0B") + self.assertEquals(static.formatFileSize(123), "123B") + self.assertEquals(static.formatFileSize(4567), "4K") + self.assertEquals(static.formatFileSize(8900000), "8M") + self.assertEquals(static.formatFileSize(1234000000), "1G") + self.assertEquals(static.formatFileSize(1234567890000), "1149G") + + + +class TestFileTransferDeprecated(TestCase): + """ + L{static.FileTransfer} is deprecated. + """ + + def test_deprecation(self): + """ + Instantiation of L{FileTransfer} produces a deprecation warning. + """ + static.FileTransfer(StringIO.StringIO(), 0, DummyRequest([])) + warnings = self.flushWarnings([self.test_deprecation]) + self.assertEqual(len(warnings), 1) + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + 'FileTransfer is deprecated since Twisted 9.0. ' + 'Use a subclass of StaticProducer instead.') diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_tap.py b/vendor/Twisted-10.0.0/twisted/web/test/test_tap.py new file mode 100644 index 000000000000..358f2789d8e4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_tap.py @@ -0,0 +1,251 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.tap}. +""" + +import os, stat, pickle + +from twisted.python.usage import UsageError +from twisted.python.filepath import FilePath +from twisted.internet.interfaces import IReactorUNIX +from twisted.internet import reactor +from twisted.python.threadpool import ThreadPool +from twisted.trial.unittest import TestCase +from twisted.application import strports + +from twisted.web.server import Site +from twisted.web.static import Data, File +from twisted.web.distrib import ResourcePublisher, UserDirectory +from twisted.web.wsgi import WSGIResource +from twisted.web.tap import Options, makePersonalServerFactory, makeService +from twisted.web.twcgi import CGIScript, PHP3Script, PHPScript +from twisted.web.script import PythonScript + + +from twisted.spread.pb import PBServerFactory + +application = object() + +class ServiceTests(TestCase): + """ + Tests for the service creation APIs in L{twisted.web.tap}. + """ + def _pathOption(self): + """ + Helper for the I{--path} tests which creates a directory and creates + an L{Options} object which uses that directory as its static + filesystem root. + + @return: A two-tuple of a L{FilePath} referring to the directory and + the value associated with the C{'root'} key in the L{Options} + instance after parsing a I{--path} option. + """ + path = FilePath(self.mktemp()) + path.makedirs() + options = Options() + options.parseOptions(['--path', path.path]) + root = options['root'] + return path, root + + + def test_path(self): + """ + The I{--path} option causes L{Options} to create a root resource + which serves responses from the specified path. + """ + path, root = self._pathOption() + self.assertIsInstance(root, File) + self.assertEqual(root.path, path.path) + + + def test_cgiProcessor(self): + """ + The I{--path} option creates a root resource which serves a + L{CGIScript} instance for any child with the C{".cgi"} extension. + """ + path, root = self._pathOption() + path.child("foo.cgi").setContent("") + self.assertIsInstance(root.getChild("foo.cgi", None), CGIScript) + + + def test_php3Processor(self): + """ + The I{--path} option creates a root resource which serves a + L{PHP3Script} instance for any child with the C{".php3"} extension. + """ + path, root = self._pathOption() + path.child("foo.php3").setContent("") + self.assertIsInstance(root.getChild("foo.php3", None), PHP3Script) + + + def test_phpProcessor(self): + """ + The I{--path} option creates a root resource which serves a + L{PHPScript} instance for any child with the C{".php"} extension. + """ + path, root = self._pathOption() + path.child("foo.php").setContent("") + self.assertIsInstance(root.getChild("foo.php", None), PHPScript) + + + def test_epyProcessor(self): + """ + The I{--path} option creates a root resource which serves a + L{PythonScript} instance for any child with the C{".epy"} extension. + """ + path, root = self._pathOption() + path.child("foo.epy").setContent("") + self.assertIsInstance(root.getChild("foo.epy", None), PythonScript) + + + def test_rpyProcessor(self): + """ + The I{--path} option creates a root resource which serves the + C{resource} global defined by the Python source in any child with + the C{".rpy"} extension. + """ + path, root = self._pathOption() + path.child("foo.rpy").setContent( + "from twisted.web.static import Data\n" + "resource = Data('content', 'major/minor')\n") + child = root.getChild("foo.rpy", None) + self.assertIsInstance(child, Data) + self.assertEqual(child.data, 'content') + self.assertEqual(child.type, 'major/minor') + + + def test_trpProcessor(self): + """ + The I{--path} option creates a root resource which serves the + pickled resource out of any child with the C{".rpy"} extension. + """ + path, root = self._pathOption() + path.child("foo.trp").setContent(pickle.dumps(Data("foo", "bar"))) + child = root.getChild("foo.trp", None) + self.assertIsInstance(child, Data) + self.assertEqual(child.data, 'foo') + self.assertEqual(child.type, 'bar') + + warnings = self.flushWarnings() + + # If the trp module hadn't been imported before this test ran, there + # will be two deprecation warnings; one for the module, one for the + # function. If the module has already been imported, the + # module-scope deprecation won't be emitted again. + if len(warnings) == 2: + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + "twisted.web.trp is deprecated as of Twisted 9.0. Resource " + "persistence is beyond the scope of Twisted Web.") + warning = warnings[1] + else: + warning = warnings[0] + + self.assertEqual(warning['category'], DeprecationWarning) + self.assertEqual( + warning['message'], + "twisted.web.trp.ResourceUnpickler is deprecated as of Twisted " + "9.0. Resource persistence is beyond the scope of Twisted Web.") + + + def test_makePersonalServerFactory(self): + """ + L{makePersonalServerFactory} returns a PB server factory which has + as its root object a L{ResourcePublisher}. + """ + # The fact that this pile of objects can actually be used somehow is + # verified by twisted.web.test.test_distrib. + site = Site(Data("foo bar", "text/plain")) + serverFactory = makePersonalServerFactory(site) + self.assertIsInstance(serverFactory, PBServerFactory) + self.assertIsInstance(serverFactory.root, ResourcePublisher) + self.assertIdentical(serverFactory.root.site, site) + + + def test_personalServer(self): + """ + The I{--personal} option to L{makeService} causes it to return a + service which will listen on the server address given by the I{--port} + option. + """ + port = self.mktemp() + options = Options() + options.parseOptions(['--port', 'unix:' + port, '--personal']) + service = makeService(options) + service.startService() + self.addCleanup(service.stopService) + self.assertTrue(os.path.exists(port)) + self.assertTrue(stat.S_ISSOCK(os.stat(port).st_mode)) + + if not IReactorUNIX.providedBy(reactor): + test_personalServer.skip = ( + "The reactor does not support UNIX domain sockets") + + + def test_defaultPersonalPath(self): + """ + If the I{--port} option not specified but the I{--personal} option is, + L{Options} defaults the port to C{UserDirectory.userSocketName} in the + user's home directory. + """ + options = Options() + options.parseOptions(['--personal']) + path = os.path.expanduser( + os.path.join('~', UserDirectory.userSocketName)) + self.assertEqual( + strports.parse(options['port'], None)[:2], + ('UNIX', (path, None))) + + if not IReactorUNIX.providedBy(reactor): + test_defaultPersonalPath.skip = ( + "The reactor does not support UNIX domain sockets") + + + def test_defaultPort(self): + """ + If the I{--port} option is not specified, L{Options} defaults the port + to C{8080}. + """ + options = Options() + options.parseOptions([]) + self.assertEqual( + strports.parse(options['port'], None)[:2], + ('TCP', (8080, None))) + + + def test_wsgi(self): + """ + The I{--wsgi} option takes the fully-qualifed Python name of a WSGI + application object and creates a L{WSGIResource} at the root which + serves that application. + """ + options = Options() + options.parseOptions(['--wsgi', __name__ + '.application']) + root = options['root'] + self.assertTrue(root, WSGIResource) + self.assertIdentical(root._reactor, reactor) + self.assertTrue(isinstance(root._threadpool, ThreadPool)) + self.assertIdentical(root._application, application) + + # The threadpool should start and stop with the reactor. + self.assertFalse(root._threadpool.started) + reactor.fireSystemEvent('startup') + self.assertTrue(root._threadpool.started) + self.assertFalse(root._threadpool.joined) + reactor.fireSystemEvent('shutdown') + self.assertTrue(root._threadpool.joined) + + + def test_invalidApplication(self): + """ + If I{--wsgi} is given an invalid name, L{Options.parseOptions} + raises L{UsageError}. + """ + options = Options() + for name in [__name__ + '.nosuchthing', 'foo.']: + exc = self.assertRaises( + UsageError, options.parseOptions, ['--wsgi', name]) + self.assertEqual(str(exc), "No such WSGI application: %r" % (name,)) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_vhost.py b/vendor/Twisted-10.0.0/twisted/web/test/test_vhost.py new file mode 100644 index 000000000000..076d09c670f9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_vhost.py @@ -0,0 +1,105 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.vhost}. +""" + +from twisted.internet.defer import gatherResults +from twisted.trial.unittest import TestCase +from twisted.web.http import NOT_FOUND +from twisted.web.static import Data +from twisted.web.vhost import NameVirtualHost +from twisted.web.test.test_web import DummyRequest +from twisted.web.test._util import _render + +class NameVirtualHostTests(TestCase): + """ + Tests for L{NameVirtualHost}. + """ + def test_renderWithoutHost(self): + """ + L{NameVirtualHost.render} returns the result of rendering the + instance's C{default} if it is not C{None} and there is no I{Host} + header in the request. + """ + virtualHostResource = NameVirtualHost() + virtualHostResource.default = Data("correct result", "") + request = DummyRequest(['']) + self.assertEqual( + virtualHostResource.render(request), "correct result") + + + def test_renderWithoutHostNoDefault(self): + """ + L{NameVirtualHost.render} returns a response with a status of I{NOT + FOUND} if the instance's C{default} is C{None} and there is no I{Host} + header in the request. + """ + virtualHostResource = NameVirtualHost() + request = DummyRequest(['']) + d = _render(virtualHostResource, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d + + + def test_renderWithHost(self): + """ + L{NameVirtualHost.render} returns the result of rendering the resource + which is the value in the instance's C{host} dictionary corresponding + to the key indicated by the value of the I{Host} header in the request. + """ + virtualHostResource = NameVirtualHost() + virtualHostResource.addHost('example.org', Data("winner", "")) + + request = DummyRequest(['']) + request.headers['host'] = 'example.org' + d = _render(virtualHostResource, request) + def cbRendered(ignored, request): + self.assertEqual(''.join(request.written), "winner") + d.addCallback(cbRendered, request) + + # The port portion of the Host header should not be considered. + requestWithPort = DummyRequest(['']) + requestWithPort.headers['host'] = 'example.org:8000' + dWithPort = _render(virtualHostResource, requestWithPort) + def cbRendered(ignored, requestWithPort): + self.assertEqual(''.join(requestWithPort.written), "winner") + dWithPort.addCallback(cbRendered, requestWithPort) + + return gatherResults([d, dWithPort]) + + + def test_renderWithUnknownHost(self): + """ + L{NameVirtualHost.render} returns the result of rendering the + instance's C{default} if it is not C{None} and there is no host + matching the value of the I{Host} header in the request. + """ + virtualHostResource = NameVirtualHost() + virtualHostResource.default = Data("correct data", "") + request = DummyRequest(['']) + request.headers['host'] = 'example.com' + d = _render(virtualHostResource, request) + def cbRendered(ignored): + self.assertEqual(''.join(request.written), "correct data") + d.addCallback(cbRendered) + return d + + + def test_renderWithUnknownHostNoDefault(self): + """ + L{NameVirtualHost.render} returns a response with a status of I{NOT + FOUND} if the instance's C{default} is C{None} and there is no host + matching the value of the I{Host} header in the request. + """ + virtualHostResource = NameVirtualHost() + request = DummyRequest(['']) + request.headers['host'] = 'example.com' + d = _render(virtualHostResource, request) + def cbRendered(ignored): + self.assertEqual(request.responseCode, NOT_FOUND) + d.addCallback(cbRendered) + return d diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_web.py b/vendor/Twisted-10.0.0/twisted/web/test/test_web.py new file mode 100644 index 000000000000..0fde6ff2fb26 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_web.py @@ -0,0 +1,863 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for various parts of L{twisted.web}. +""" + +from cStringIO import StringIO + +from zope.interface import implements +from zope.interface.verify import verifyObject + +from twisted.trial import unittest +from twisted.internet import reactor +from twisted.internet.address import IPv4Address +from twisted.internet.defer import Deferred +from twisted.web import server, resource, util +from twisted.internet import defer, interfaces, task +from twisted.web import iweb, http, http_headers +from twisted.python import log + + +class DummyRequest: + """ + Represents a dummy or fake request. + + @ivar _finishedDeferreds: C{None} or a C{list} of L{Deferreds} which will + be called back with C{None} when C{finish} is called or which will be + errbacked if C{processingFailed} is called. + + @type headers: C{dict} + @ivar headers: A mapping of header name to header value for all request + headers. + + @type outgoingHeaders: C{dict} + @ivar outgoingHeaders: A mapping of header name to header value for all + response headers. + + @type responseCode: C{int} + @ivar responseCode: The response code which was passed to + C{setResponseCode}. + + @type written: C{list} of C{str} + @ivar written: The bytes which have been written to the request. + """ + uri = 'http://dummy/' + method = 'GET' + client = None + + def registerProducer(self, prod,s): + self.go = 1 + while self.go: + prod.resumeProducing() + + def unregisterProducer(self): + self.go = 0 + + + def __init__(self, postpath, session=None): + self.sitepath = [] + self.written = [] + self.finished = 0 + self.postpath = postpath + self.prepath = [] + self.session = None + self.protoSession = session or server.Session(0, self) + self.args = {} + self.outgoingHeaders = {} + self.responseHeaders = http_headers.Headers() + self.responseCode = None + self.headers = {} + self._finishedDeferreds = [] + + + def getHeader(self, name): + """ + Retrieve the value of a request header. + + @type name: C{str} + @param name: The name of the request header for which to retrieve the + value. Header names are compared case-insensitively. + + @rtype: C{str} or L{NoneType} + @return: The value of the specified request header. + """ + return self.headers.get(name.lower(), None) + + + def setHeader(self, name, value): + """TODO: make this assert on write() if the header is content-length + """ + self.outgoingHeaders[name.lower()] = value + + def getSession(self): + if self.session: + return self.session + assert not self.written, "Session cannot be requested after data has been written." + self.session = self.protoSession + return self.session + + + def render(self, resource): + """ + Render the given resource as a response to this request. + + This implementation only handles a few of the most common behaviors of + resources. It can handle a render method that returns a string or + C{NOT_DONE_YET}. It doesn't know anything about the semantics of + request methods (eg HEAD) nor how to set any particular headers. + Basically, it's largely broken, but sufficient for some tests at least. + It should B{not} be expanded to do all the same stuff L{Request} does. + Instead, L{DummyRequest} should be phased out and L{Request} (or some + other real code factored in a different way) used. + """ + result = resource.render(self) + if result is server.NOT_DONE_YET: + return + self.write(result) + self.finish() + + + def write(self, data): + self.written.append(data) + + def notifyFinish(self): + """ + Return a L{Deferred} which is called back with C{None} when the request + is finished. This will probably only work if you haven't called + C{finish} yet. + """ + finished = Deferred() + self._finishedDeferreds.append(finished) + return finished + + + def finish(self): + """ + Record that the request is finished and callback and L{Deferred}s + waiting for notification of this. + """ + self.finished = self.finished + 1 + if self._finishedDeferreds is not None: + observers = self._finishedDeferreds + self._finishedDeferreds = None + for obs in observers: + obs.callback(None) + + + def processingFailed(self, reason): + """ + Errback and L{Deferreds} waiting for finish notification. + """ + if self._finishedDeferreds is not None: + observers = self._finishedDeferreds + self._finishedDeferreds = None + for obs in observers: + obs.errback(reason) + + + def addArg(self, name, value): + self.args[name] = [value] + + + def setResponseCode(self, code, message=None): + """ + Set the HTTP status response code, but takes care that this is called + before any data is written. + """ + assert not self.written, "Response code cannot be set after data has been written: %s." % "@@@@".join(self.written) + self.responseCode = code + self.responseMessage = message + + + def setLastModified(self, when): + assert not self.written, "Last-Modified cannot be set after data has been written: %s." % "@@@@".join(self.written) + + + def setETag(self, tag): + assert not self.written, "ETag cannot be set after data has been written: %s." % "@@@@".join(self.written) + + + def getClientIP(self): + """ + Return the IPv4 address of the client which made this request, if there + is one, otherwise C{None}. + """ + if isinstance(self.client, IPv4Address): + return self.client.host + return None + + +class ResourceTestCase(unittest.TestCase): + def testListEntities(self): + r = resource.Resource() + self.failUnlessEqual([], r.listEntities()) + + +class SimpleResource(resource.Resource): + def render(self, request): + if http.CACHED in (request.setLastModified(10), + request.setETag('MatchingTag')): + return '' + else: + return "correct" + + +class DummyChannel: + class TCP: + port = 80 + disconnected = False + + def __init__(self): + self.written = StringIO() + self.producers = [] + + def getPeer(self): + return IPv4Address("TCP", '192.168.1.1', 12344) + + def write(self, bytes): + assert isinstance(bytes, str) + self.written.write(bytes) + + def writeSequence(self, iovec): + map(self.write, iovec) + + def getHost(self): + return IPv4Address("TCP", '10.0.0.1', self.port) + + def registerProducer(self, producer, streaming): + self.producers.append((producer, streaming)) + + def loseConnection(self): + self.disconnected = True + + + class SSL(TCP): + implements(interfaces.ISSLTransport) + + site = server.Site(resource.Resource()) + + def __init__(self): + self.transport = self.TCP() + + + def requestDone(self, request): + pass + + + +class SiteTest(unittest.TestCase): + def test_simplestSite(self): + """ + L{Site.getResourceFor} returns the C{""} child of the root resource it + is constructed with when processing a request for I{/}. + """ + sres1 = SimpleResource() + sres2 = SimpleResource() + sres1.putChild("",sres2) + site = server.Site(sres1) + self.assertIdentical( + site.getResourceFor(DummyRequest([''])), + sres2, "Got the wrong resource.") + + + +class SessionTest(unittest.TestCase): + """ + Tests for L{server.Session}. + """ + def setUp(self): + """ + Create a site with one active session using a deterministic, easily + controlled clock. + """ + self.clock = task.Clock() + self.uid = 'unique' + self.site = server.Site(resource.Resource()) + self.session = server.Session(self.site, self.uid, self.clock) + self.site.sessions[self.uid] = self.session + + + def test_defaultReactor(self): + """ + If not value is passed to L{server.Session.__init__}, the global + reactor is used. + """ + session = server.Session(server.Site(resource.Resource()), '123') + self.assertIdentical(session._reactor, reactor) + + + def test_startCheckingExpiration(self): + """ + L{server.Session.startCheckingExpiration} causes the session to expire + after L{server.Session.sessionTimeout} seconds without activity. + """ + self.session.startCheckingExpiration() + + # Advance to almost the timeout - nothing should happen. + self.clock.advance(self.session.sessionTimeout - 1) + self.assertIn(self.uid, self.site.sessions) + + # Advance to the timeout, the session should expire. + self.clock.advance(1) + self.assertNotIn(self.uid, self.site.sessions) + + # There should be no calls left over, either. + self.assertFalse(self.clock.calls) + + + def test_expire(self): + """ + L{server.Session.expire} expires the session. + """ + self.session.expire() + # It should be gone from the session dictionary. + self.assertNotIn(self.uid, self.site.sessions) + # And there should be no pending delayed calls. + self.assertFalse(self.clock.calls) + + + def test_expireWhileChecking(self): + """ + L{server.Session.expire} expires the session even if the timeout call + isn't due yet. + """ + self.session.startCheckingExpiration() + self.test_expire() + + + def test_notifyOnExpire(self): + """ + A function registered with L{server.Session.notifyOnExpire} is called + when the session expires. + """ + callbackRan = [False] + def expired(): + callbackRan[0] = True + self.session.notifyOnExpire(expired) + self.session.expire() + self.assertTrue(callbackRan[0]) + + + def test_touch(self): + """ + L{server.Session.touch} updates L{server.Session.lastModified} and + delays session timeout. + """ + # Make sure it works before startCheckingExpiration + self.clock.advance(3) + self.session.touch() + self.assertEqual(self.session.lastModified, 3) + + # And after startCheckingExpiration + self.session.startCheckingExpiration() + self.clock.advance(self.session.sessionTimeout - 1) + self.session.touch() + self.clock.advance(self.session.sessionTimeout - 1) + self.assertIn(self.uid, self.site.sessions) + + # It should have advanced it by just sessionTimeout, no more. + self.clock.advance(1) + self.assertNotIn(self.uid, self.site.sessions) + + + def test_startCheckingExpirationParameterDeprecated(self): + """ + L{server.Session.startCheckingExpiration} emits a deprecation warning + if it is invoked with a parameter. + """ + self.session.startCheckingExpiration(123) + warnings = self.flushWarnings([ + self.test_startCheckingExpirationParameterDeprecated]) + self.assertEqual(len(warnings), 1) + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + "The lifetime parameter to startCheckingExpiration is deprecated " + "since Twisted 9.0. See Session.sessionTimeout instead.") + + + def test_checkExpiredDeprecated(self): + """ + L{server.Session.checkExpired} is deprecated. + """ + self.session.checkExpired() + warnings = self.flushWarnings([self.test_checkExpiredDeprecated]) + self.assertEqual(warnings[0]['category'], DeprecationWarning) + self.assertEqual( + warnings[0]['message'], + "Session.checkExpired is deprecated since Twisted 9.0; sessions " + "check themselves now, you don't need to.") + self.assertEqual(len(warnings), 1) + + +# Conditional requests: +# If-None-Match, If-Modified-Since + +# make conditional request: +# normal response if condition succeeds +# if condition fails: +# response code +# no body + +def httpBody(whole): + return whole.split('\r\n\r\n', 1)[1] + +def httpHeader(whole, key): + key = key.lower() + headers = whole.split('\r\n\r\n', 1)[0] + for header in headers.split('\r\n'): + if header.lower().startswith(key): + return header.split(':', 1)[1].strip() + return None + +def httpCode(whole): + l1 = whole.split('\r\n', 1)[0] + return int(l1.split()[1]) + +class ConditionalTest(unittest.TestCase): + """ + web.server's handling of conditional requests for cache validation. + """ + + # XXX: test web.distrib. + + def setUp(self): + self.resrc = SimpleResource() + self.resrc.putChild('', self.resrc) + self.site = server.Site(self.resrc) + self.site = server.Site(self.resrc) + self.site.logFile = log.logfile + + # HELLLLLLLLLLP! This harness is Very Ugly. + self.channel = self.site.buildProtocol(None) + self.transport = http.StringTransport() + self.transport.close = lambda *a, **kw: None + self.transport.disconnecting = lambda *a, **kw: 0 + self.transport.getPeer = lambda *a, **kw: "peer" + self.transport.getHost = lambda *a, **kw: "host" + self.channel.makeConnection(self.transport) + for l in ["GET / HTTP/1.1", + "Accept: text/html"]: + self.channel.lineReceived(l) + + def tearDown(self): + self.channel.connectionLost(None) + + + def _modifiedTest(self, modifiedSince): + """ + Given the value C{modifiedSince} for the I{If-Modified-Since} + header, verify that a response with a 200 code and the resource as + the body is returned. + """ + self.channel.lineReceived("If-Modified-Since: " + modifiedSince) + self.channel.lineReceived('') + result = self.transport.getvalue() + self.failUnlessEqual(httpCode(result), http.OK) + self.failUnlessEqual(httpBody(result), "correct") + + + def test_modified(self): + """ + If a request is made with an I{If-Modified-Since} header value with + a timestamp indicating a time before the last modification of the + requested resource, a 200 response is returned along with a response + body containing the resource. + """ + self._modifiedTest(http.datetimeToString(1)) + + + def test_unmodified(self): + """ + If a request is made with an I{If-Modified-Since} header value with + a timestamp indicating a time after the last modification of the + request resource, a 304 response is returned along with an empty + response body. + """ + self.channel.lineReceived("If-Modified-Since: %s" + % http.datetimeToString(100)) + self.channel.lineReceived('') + result = self.transport.getvalue() + self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED) + self.failUnlessEqual(httpBody(result), "") + + + def test_invalidTimestamp(self): + """ + If a request is made with an I{If-Modified-Since} header value which + cannot be parsed, the header is treated as not having been present + and a normal 200 response is returned with a response body + containing the resource. + """ + self._modifiedTest("like, maybe a week ago, I guess?") + + + def test_invalidTimestampYear(self): + """ + If a request is made with an I{If-Modified-Since} header value which + contains a string in the year position which is not an integer, the + header is treated as not having been present and a normal 200 + response is returned with a response body containing the resource. + """ + self._modifiedTest("Thu, 01 Jan blah 00:00:10 GMT") + + + def test_invalidTimestampTooLongAgo(self): + """ + If a request is made with an I{If-Modified-Since} header value which + contains a year before the epoch, the header is treated as not + having been present and a normal 200 response is returned with a + response body containing the resource. + """ + self._modifiedTest("Thu, 01 Jan 1899 00:00:10 GMT") + + + def test_invalidTimestampMonth(self): + """ + If a request is made with an I{If-Modified-Since} header value which + contains a string in the month position which is not a recognized + month abbreviation, the header is treated as not having been present + and a normal 200 response is returned with a response body + containing the resource. + """ + self._modifiedTest("Thu, 01 Blah 1970 00:00:10 GMT") + + + def test_etagMatchedNot(self): + """If-None-Match ETag cache validator (positive)""" + self.channel.lineReceived("If-None-Match: unmatchedTag") + self.channel.lineReceived('') + result = self.transport.getvalue() + self.failUnlessEqual(httpCode(result), http.OK) + self.failUnlessEqual(httpBody(result), "correct") + + def test_etagMatched(self): + """If-None-Match ETag cache validator (negative)""" + self.channel.lineReceived("If-None-Match: MatchingTag") + self.channel.lineReceived('') + result = self.transport.getvalue() + self.failUnlessEqual(httpHeader(result, "ETag"), "MatchingTag") + self.failUnlessEqual(httpCode(result), http.NOT_MODIFIED) + self.failUnlessEqual(httpBody(result), "") + + + +from twisted.web import google +class GoogleTestCase(unittest.TestCase): + def testCheckGoogle(self): + raise unittest.SkipTest("no violation of google ToS") + d = google.checkGoogle('site:www.twistedmatrix.com twisted') + d.addCallback(self.assertEquals, 'http://twistedmatrix.com/') + return d + + + + + +class RequestTests(unittest.TestCase): + """ + Tests for the HTTP request class, L{server.Request}. + """ + + def test_interface(self): + """ + L{server.Request} instances provide L{iweb.IRequest}. + """ + self.assertTrue( + verifyObject(iweb.IRequest, server.Request(DummyChannel(), True))) + + + def testChildLink(self): + request = server.Request(DummyChannel(), 1) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.childLink('baz'), 'bar/baz') + request = server.Request(DummyChannel(), 1) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar/', 'HTTP/1.0') + self.assertEqual(request.childLink('baz'), 'baz') + + def testPrePathURLSimple(self): + request = server.Request(DummyChannel(), 1) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + request.setHost('example.com', 80) + self.assertEqual(request.prePathURL(), 'http://example.com/foo/bar') + + def testPrePathURLNonDefault(self): + d = DummyChannel() + d.transport.port = 81 + request = server.Request(d, 1) + request.setHost('example.com', 81) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'http://example.com:81/foo/bar') + + def testPrePathURLSSLPort(self): + d = DummyChannel() + d.transport.port = 443 + request = server.Request(d, 1) + request.setHost('example.com', 443) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'http://example.com:443/foo/bar') + + def testPrePathURLSSLPortAndSSL(self): + d = DummyChannel() + d.transport = DummyChannel.SSL() + d.transport.port = 443 + request = server.Request(d, 1) + request.setHost('example.com', 443) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'https://example.com/foo/bar') + + def testPrePathURLHTTPPortAndSSL(self): + d = DummyChannel() + d.transport = DummyChannel.SSL() + d.transport.port = 80 + request = server.Request(d, 1) + request.setHost('example.com', 80) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'https://example.com:80/foo/bar') + + def testPrePathURLSSLNonDefault(self): + d = DummyChannel() + d.transport = DummyChannel.SSL() + d.transport.port = 81 + request = server.Request(d, 1) + request.setHost('example.com', 81) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'https://example.com:81/foo/bar') + + def testPrePathURLSetSSLHost(self): + d = DummyChannel() + d.transport.port = 81 + request = server.Request(d, 1) + request.setHost('foo.com', 81, 1) + request.gotLength(0) + request.requestReceived('GET', '/foo/bar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'https://foo.com:81/foo/bar') + + + def test_prePathURLQuoting(self): + """ + L{Request.prePathURL} quotes special characters in the URL segments to + preserve the original meaning. + """ + d = DummyChannel() + request = server.Request(d, 1) + request.setHost('example.com', 80) + request.gotLength(0) + request.requestReceived('GET', '/foo%2Fbar', 'HTTP/1.0') + self.assertEqual(request.prePathURL(), 'http://example.com/foo%2Fbar') + + + +class RootResource(resource.Resource): + isLeaf=0 + def getChildWithDefault(self, name, request): + request.rememberRootURL() + return resource.Resource.getChildWithDefault(self, name, request) + def render(self, request): + return '' + +class RememberURLTest(unittest.TestCase): + def createServer(self, r): + chan = DummyChannel() + chan.site = server.Site(r) + return chan + + def testSimple(self): + r = resource.Resource() + r.isLeaf=0 + rr = RootResource() + r.putChild('foo', rr) + rr.putChild('', rr) + rr.putChild('bar', resource.Resource()) + chan = self.createServer(r) + for url in ['/foo/', '/foo/bar', '/foo/bar/baz', '/foo/bar/']: + request = server.Request(chan, 1) + request.setHost('example.com', 81) + request.gotLength(0) + request.requestReceived('GET', url, 'HTTP/1.0') + self.assertEqual(request.getRootURL(), "http://example.com/foo") + + def testRoot(self): + rr = RootResource() + rr.putChild('', rr) + rr.putChild('bar', resource.Resource()) + chan = self.createServer(rr) + for url in ['/', '/bar', '/bar/baz', '/bar/']: + request = server.Request(chan, 1) + request.setHost('example.com', 81) + request.gotLength(0) + request.requestReceived('GET', url, 'HTTP/1.0') + self.assertEqual(request.getRootURL(), "http://example.com/") + + +class NewRenderResource(resource.Resource): + def render_GET(self, request): + return "hi hi" + + def render_HEH(self, request): + return "ho ho" + + +class NewRenderTestCase(unittest.TestCase): + def _getReq(self): + d = DummyChannel() + d.site.resource.putChild('newrender', NewRenderResource()) + d.transport.port = 81 + request = server.Request(d, 1) + request.setHost('example.com', 81) + request.gotLength(0) + return request + + def testGoodMethods(self): + req = self._getReq() + req.requestReceived('GET', '/newrender', 'HTTP/1.0') + self.assertEquals(req.transport.getvalue().splitlines()[-1], 'hi hi') + + req = self._getReq() + req.requestReceived('HEH', '/newrender', 'HTTP/1.0') + self.assertEquals(req.transport.getvalue().splitlines()[-1], 'ho ho') + + def testBadMethods(self): + req = self._getReq() + req.requestReceived('CONNECT', '/newrender', 'HTTP/1.0') + self.assertEquals(req.code, 501) + + req = self._getReq() + req.requestReceived('hlalauguG', '/newrender', 'HTTP/1.0') + self.assertEquals(req.code, 501) + + def testImplicitHead(self): + req = self._getReq() + req.requestReceived('HEAD', '/newrender', 'HTTP/1.0') + self.assertEquals(req.code, 200) + self.assertEquals(-1, req.transport.getvalue().find('hi hi')) + + + +class SDResource(resource.Resource): + def __init__(self,default): + self.default = default + + + def getChildWithDefault(self, name, request): + d = defer.succeed(self.default) + resource = util.DeferredResource(d) + return resource.getChildWithDefault(name, request) + + + +class DeferredResourceTests(unittest.TestCase): + """ + Tests for L{DeferredResource}. + """ + + def testDeferredResource(self): + r = resource.Resource() + r.isLeaf = 1 + s = SDResource(r) + d = DummyRequest(['foo', 'bar', 'baz']) + resource.getChildForRequest(s, d) + self.assertEqual(d.postpath, ['bar', 'baz']) + + + def test_render(self): + """ + L{DeferredResource} uses the request object's C{render} method to + render the resource which is the result of the L{Deferred} being + handled. + """ + rendered = [] + request = DummyRequest([]) + request.render = rendered.append + + result = resource.Resource() + deferredResource = util.DeferredResource(defer.succeed(result)) + deferredResource.render(request) + self.assertEquals(rendered, [result]) + + + +class DummyRequestForLogTest(DummyRequest): + uri = '/dummy' # parent class uri has "http://", which doesn't really happen + code = 123 + + clientproto = 'HTTP/1.0' + sentLength = None + client = IPv4Address('TCP', '1.2.3.4', 12345) + + + +class TestLogEscaping(unittest.TestCase): + def setUp(self): + self.site = http.HTTPFactory() + self.site.logFile = StringIO() + self.request = DummyRequestForLogTest(self.site, False) + + def testSimple(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "-"\n') + + def testMethodQuote(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.request.method = 'G"T' + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n') + + def testRequestQuote(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.request.uri='/dummy"withquote' + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n') + + def testProtoQuote(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.request.clientproto='HT"P/1.0' + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HT\\"P/1.0" 123 - "-" "-"\n') + + def testRefererQuote(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.request.headers['referer'] = 'http://malicious" ".website.invalid' + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "http://malicious\\" \\".website.invalid" "-"\n') + + def testUserAgentQuote(self): + http._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % ( + 25, 'Oct', 2004, 12, 31, 59) + self.request.headers['user-agent'] = 'Malicious Web" Evil' + self.site.log(self.request) + self.site.logFile.seek(0) + self.assertEqual( + self.site.logFile.read(), + '1.2.3.4 - - [25/Oct/2004:12:31:59 +0000] "GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n') diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_webclient.py b/vendor/Twisted-10.0.0/twisted/web/test/test_webclient.py new file mode 100644 index 000000000000..314f3e739c87 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_webclient.py @@ -0,0 +1,1060 @@ +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.client}. +""" + +import os +from errno import ENOSPC + +from urlparse import urlparse + +from twisted.trial import unittest +from twisted.web import server, static, client, error, util, resource, http_headers +from twisted.internet import reactor, defer, interfaces +from twisted.python.filepath import FilePath +from twisted.python.log import msg +from twisted.protocols.policies import WrappingFactory +from twisted.test.proto_helpers import StringTransport +from twisted.test.proto_helpers import MemoryReactor +from twisted.internet.address import IPv4Address +from twisted.internet.task import Clock +from twisted.internet.error import ConnectionRefusedError +from twisted.internet.protocol import Protocol +from twisted.internet.defer import Deferred +from twisted.web.client import Request +from twisted.web.error import SchemeNotSupported + +try: + from twisted.internet import ssl +except: + ssl = None + + + +class ExtendedRedirect(resource.Resource): + """ + Redirection resource. + + The HTTP status code is set according to the C{code} query parameter. + + @type lastMethod: C{str} + @ivar lastMethod: Last handled HTTP request method + """ + isLeaf = 1 + lastMethod = None + + + def __init__(self, url): + resource.Resource.__init__(self) + self.url = url + + + def render(self, request): + if self.lastMethod: + self.lastMethod = request.method + return "OK Thnx!" + else: + self.lastMethod = request.method + code = int(request.args['code'][0]) + return self.redirectTo(self.url, request, code) + + + def getChild(self, name, request): + return self + + + def redirectTo(self, url, request, code): + request.setResponseCode(code) + request.setHeader("location", url) + return "OK Bye!" + + + +class ForeverTakingResource(resource.Resource): + """ + L{ForeverTakingResource} is a resource which never finishes responding + to requests. + """ + def __init__(self, write=False): + resource.Resource.__init__(self) + self._write = write + + def render(self, request): + if self._write: + request.write('some bytes') + return server.NOT_DONE_YET + + +class CookieMirrorResource(resource.Resource): + def render(self, request): + l = [] + for k,v in request.received_cookies.items(): + l.append((k, v)) + l.sort() + return repr(l) + +class RawCookieMirrorResource(resource.Resource): + def render(self, request): + return repr(request.getHeader('cookie')) + +class ErrorResource(resource.Resource): + + def render(self, request): + request.setResponseCode(401) + if request.args.get("showlength"): + request.setHeader("content-length", "0") + return "" + +class NoLengthResource(resource.Resource): + + def render(self, request): + return "nolength" + + + +class HostHeaderResource(resource.Resource): + """ + A testing resource which renders itself as the value of the host header + from the request. + """ + def render(self, request): + return request.received_headers['host'] + + + +class PayloadResource(resource.Resource): + """ + A testing resource which renders itself as the contents of the request body + as long as the request body is 100 bytes long, otherwise which renders + itself as C{"ERROR"}. + """ + def render(self, request): + data = request.content.read() + contentLength = request.received_headers['content-length'] + if len(data) != 100 or int(contentLength) != 100: + return "ERROR" + return data + + + +class BrokenDownloadResource(resource.Resource): + + def render(self, request): + # only sends 3 bytes even though it claims to send 5 + request.setHeader("content-length", "5") + request.write('abc') + return '' + +class CountingRedirect(util.Redirect): + """ + A L{util.Redirect} resource that keeps track of the number of times the + resource has been accessed. + """ + def __init__(self, *a, **kw): + util.Redirect.__init__(self, *a, **kw) + self.count = 0 + + def render(self, request): + self.count += 1 + return util.Redirect.render(self, request) + + + +class ParseUrlTestCase(unittest.TestCase): + """ + Test URL parsing facility and defaults values. + """ + + def test_parse(self): + """ + L{client._parse} correctly parses a URL into its various components. + """ + # The default port for HTTP is 80. + self.assertEqual( + client._parse('http://127.0.0.1/'), + ('http', '127.0.0.1', 80, '/')) + + # The default port for HTTPS is 443. + self.assertEqual( + client._parse('https://127.0.0.1/'), + ('https', '127.0.0.1', 443, '/')) + + # Specifying a port. + self.assertEqual( + client._parse('http://spam:12345/'), + ('http', 'spam', 12345, '/')) + + # Weird (but commonly accepted) structure uses default port. + self.assertEqual( + client._parse('http://spam:/'), + ('http', 'spam', 80, '/')) + + # Spaces in the hostname are trimmed, the default path is /. + self.assertEqual( + client._parse('http://foo '), + ('http', 'foo', 80, '/')) + + + def test_externalUnicodeInterference(self): + """ + L{client._parse} should return C{str} for the scheme, host, and path + elements of its return tuple, even when passed an URL which has + previously been passed to L{urlparse} as a C{unicode} string. + """ + badInput = u'http://example.com/path' + goodInput = badInput.encode('ascii') + urlparse(badInput) + scheme, host, port, path = client._parse(goodInput) + self.assertTrue(isinstance(scheme, str)) + self.assertTrue(isinstance(host, str)) + self.assertTrue(isinstance(path, str)) + + + +class HTTPPageGetterTests(unittest.TestCase): + """ + Tests for L{HTTPPagerGetter}, the HTTP client protocol implementation + used to implement L{getPage}. + """ + def test_earlyHeaders(self): + """ + When a connection is made, L{HTTPPagerGetter} sends the headers from + its factory's C{headers} dict. If I{Host} or I{Content-Length} is + present in this dict, the values are not sent, since they are sent with + special values before the C{headers} dict is processed. If + I{User-Agent} is present in the dict, it overrides the value of the + C{agent} attribute of the factory. If I{Cookie} is present in the + dict, its value is added to the values from the factory's C{cookies} + attribute. + """ + factory = client.HTTPClientFactory( + 'http://foo/bar', + agent="foobar", + cookies={'baz': 'quux'}, + postdata="some data", + headers={ + 'Host': 'example.net', + 'User-Agent': 'fooble', + 'Cookie': 'blah blah', + 'Content-Length': '12981', + 'Useful': 'value'}) + transport = StringTransport() + protocol = client.HTTPPageGetter() + protocol.factory = factory + protocol.makeConnection(transport) + self.assertEqual( + transport.value(), + "GET /bar HTTP/1.0\r\n" + "Host: example.net\r\n" + "User-Agent: foobar\r\n" + "Content-Length: 9\r\n" + "Useful: value\r\n" + "connection: close\r\n" + "Cookie: blah blah; baz=quux\r\n" + "\r\n" + "some data") + + + +class WebClientTestCase(unittest.TestCase): + def _listen(self, site): + return reactor.listenTCP(0, site, interface="127.0.0.1") + + def setUp(self): + self.cleanupServerConnections = 0 + name = self.mktemp() + os.mkdir(name) + FilePath(name).child("file").setContent("0123456789") + r = static.File(name) + r.putChild("redirect", util.Redirect("/file")) + self.infiniteRedirectResource = CountingRedirect("/infiniteRedirect") + r.putChild("infiniteRedirect", self.infiniteRedirectResource) + r.putChild("wait", ForeverTakingResource()) + r.putChild("write-then-wait", ForeverTakingResource(write=True)) + r.putChild("error", ErrorResource()) + r.putChild("nolength", NoLengthResource()) + r.putChild("host", HostHeaderResource()) + r.putChild("payload", PayloadResource()) + r.putChild("broken", BrokenDownloadResource()) + r.putChild("cookiemirror", CookieMirrorResource()) + + miscasedHead = static.Data("miscased-head GET response content", "major/minor") + miscasedHead.render_Head = lambda request: "miscased-head content" + r.putChild("miscased-head", miscasedHead) + + self.extendedRedirect = ExtendedRedirect('/extendedRedirect') + r.putChild("extendedRedirect", self.extendedRedirect) + self.site = server.Site(r, timeout=None) + self.wrapper = WrappingFactory(self.site) + self.port = self._listen(self.wrapper) + self.portno = self.port.getHost().port + + def tearDown(self): + # If the test indicated it might leave some server-side connections + # around, clean them up. + connections = self.wrapper.protocols.keys() + # If there are fewer server-side connections than requested, + # that's okay. Some might have noticed that the client closed + # the connection and cleaned up after themselves. + for n in range(min(len(connections), self.cleanupServerConnections)): + proto = connections.pop() + msg("Closing %r" % (proto,)) + proto.transport.loseConnection() + if connections: + msg("Some left-over connections; this test is probably buggy.") + return self.port.stopListening() + + def getURL(self, path): + return "http://127.0.0.1:%d/%s" % (self.portno, path) + + def testPayload(self): + s = "0123456789" * 10 + return client.getPage(self.getURL("payload"), postdata=s + ).addCallback(self.assertEquals, s + ) + + + def test_getPageBrokenDownload(self): + """ + If the connection is closed before the number of bytes indicated by + I{Content-Length} have been received, the L{Deferred} returned by + L{getPage} fails with L{PartialDownloadError}. + """ + d = client.getPage(self.getURL("broken")) + d = self.assertFailure(d, client.PartialDownloadError) + d.addCallback(lambda exc: self.assertEquals(exc.response, "abc")) + return d + + + def test_downloadPageBrokenDownload(self): + """ + If the connection is closed before the number of bytes indicated by + I{Content-Length} have been received, the L{Deferred} returned by + L{downloadPage} fails with L{PartialDownloadError}. + """ + # test what happens when download gets disconnected in the middle + path = FilePath(self.mktemp()) + d = client.downloadPage(self.getURL("broken"), path.path) + d = self.assertFailure(d, client.PartialDownloadError) + + def checkResponse(response): + """ + The HTTP status code from the server is propagated through the + C{PartialDownloadError}. + """ + self.assertEquals(response.status, "200") + self.assertEquals(response.message, "OK") + return response + d.addCallback(checkResponse) + + def cbFailed(ignored): + self.assertEquals(path.getContent(), "abc") + d.addCallback(cbFailed) + return d + + + def test_downloadPageLogsFileCloseError(self): + """ + If there is an exception closing the file being written to after the + connection is prematurely closed, that exception is logged. + """ + class BrokenFile: + def write(self, bytes): + pass + + def close(self): + raise IOError(ENOSPC, "No file left on device") + + d = client.downloadPage(self.getURL("broken"), BrokenFile()) + d = self.assertFailure(d, client.PartialDownloadError) + def cbFailed(ignored): + self.assertEquals(len(self.flushLoggedErrors(IOError)), 1) + d.addCallback(cbFailed) + return d + + + def testHostHeader(self): + # if we pass Host header explicitly, it should be used, otherwise + # it should extract from url + return defer.gatherResults([ + client.getPage(self.getURL("host")).addCallback(self.assertEquals, "127.0.0.1"), + client.getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(self.assertEquals, "www.example.com")]) + + + def test_getPage(self): + """ + L{client.getPage} returns a L{Deferred} which is called back with + the body of the response if the default method B{GET} is used. + """ + d = client.getPage(self.getURL("file")) + d.addCallback(self.assertEquals, "0123456789") + return d + + + def test_getPageHEAD(self): + """ + L{client.getPage} returns a L{Deferred} which is called back with + the empty string if the method is I{HEAD} and there is a successful + response code. + """ + d = client.getPage(self.getURL("file"), method="HEAD") + d.addCallback(self.assertEquals, "") + return d + + + + def test_getPageNotQuiteHEAD(self): + """ + If the request method is a different casing of I{HEAD} (ie, not all + capitalized) then it is not a I{HEAD} request and the response body + is returned. + """ + d = client.getPage(self.getURL("miscased-head"), method='Head') + d.addCallback(self.assertEquals, "miscased-head content") + return d + + + def test_timeoutNotTriggering(self): + """ + When a non-zero timeout is passed to L{getPage} and the page is + retrieved before the timeout period elapses, the L{Deferred} is + called back with the contents of the page. + """ + d = client.getPage(self.getURL("host"), timeout=100) + d.addCallback(self.assertEquals, "127.0.0.1") + return d + + + def test_timeoutTriggering(self): + """ + When a non-zero timeout is passed to L{getPage} and that many + seconds elapse before the server responds to the request. the + L{Deferred} is errbacked with a L{error.TimeoutError}. + """ + # This will probably leave some connections around. + self.cleanupServerConnections = 1 + return self.assertFailure( + client.getPage(self.getURL("wait"), timeout=0.000001), + defer.TimeoutError) + + + def testDownloadPage(self): + downloads = [] + downloadData = [("file", self.mktemp(), "0123456789"), + ("nolength", self.mktemp(), "nolength")] + + for (url, name, data) in downloadData: + d = client.downloadPage(self.getURL(url), name) + d.addCallback(self._cbDownloadPageTest, data, name) + downloads.append(d) + return defer.gatherResults(downloads) + + def _cbDownloadPageTest(self, ignored, data, name): + bytes = file(name, "rb").read() + self.assertEquals(bytes, data) + + def testDownloadPageError1(self): + class errorfile: + def write(self, data): + raise IOError, "badness happened during write" + def close(self): + pass + ef = errorfile() + return self.assertFailure( + client.downloadPage(self.getURL("file"), ef), + IOError) + + def testDownloadPageError2(self): + class errorfile: + def write(self, data): + pass + def close(self): + raise IOError, "badness happened during close" + ef = errorfile() + return self.assertFailure( + client.downloadPage(self.getURL("file"), ef), + IOError) + + def testDownloadPageError3(self): + # make sure failures in open() are caught too. This is tricky. + # Might only work on posix. + tmpfile = open("unwritable", "wb") + tmpfile.close() + os.chmod("unwritable", 0) # make it unwritable (to us) + d = self.assertFailure( + client.downloadPage(self.getURL("file"), "unwritable"), + IOError) + d.addBoth(self._cleanupDownloadPageError3) + return d + + def _cleanupDownloadPageError3(self, ignored): + os.chmod("unwritable", 0700) + os.unlink("unwritable") + return ignored + + def _downloadTest(self, method): + dl = [] + for (url, code) in [("nosuchfile", "404"), ("error", "401"), + ("error?showlength=1", "401")]: + d = method(url) + d = self.assertFailure(d, error.Error) + d.addCallback(lambda exc, code=code: self.assertEquals(exc.args[0], code)) + dl.append(d) + return defer.DeferredList(dl, fireOnOneErrback=True) + + def testServerError(self): + return self._downloadTest(lambda url: client.getPage(self.getURL(url))) + + def testDownloadServerError(self): + return self._downloadTest(lambda url: client.downloadPage(self.getURL(url), url.split('?')[0])) + + def testFactoryInfo(self): + url = self.getURL('file') + scheme, host, port, path = client._parse(url) + factory = client.HTTPClientFactory(url) + reactor.connectTCP(host, port, factory) + return factory.deferred.addCallback(self._cbFactoryInfo, factory) + + def _cbFactoryInfo(self, ignoredResult, factory): + self.assertEquals(factory.status, '200') + self.assert_(factory.version.startswith('HTTP/')) + self.assertEquals(factory.message, 'OK') + self.assertEquals(factory.response_headers['content-length'][0], '10') + + + def testRedirect(self): + return client.getPage(self.getURL("redirect")).addCallback(self._cbRedirect) + + def _cbRedirect(self, pageData): + self.assertEquals(pageData, "0123456789") + d = self.assertFailure( + client.getPage(self.getURL("redirect"), followRedirect=0), + error.PageRedirect) + d.addCallback(self._cbCheckLocation) + return d + + def _cbCheckLocation(self, exc): + self.assertEquals(exc.location, "/file") + + + def test_infiniteRedirection(self): + """ + When more than C{redirectLimit} HTTP redirects are encountered, the + page request fails with L{InfiniteRedirection}. + """ + def checkRedirectCount(*a): + self.assertEquals(f._redirectCount, 13) + self.assertEquals(self.infiniteRedirectResource.count, 13) + + f = client._makeGetterFactory( + self.getURL('infiniteRedirect'), + client.HTTPClientFactory, + redirectLimit=13) + d = self.assertFailure(f.deferred, error.InfiniteRedirection) + d.addCallback(checkRedirectCount) + return d + + + def test_isolatedFollowRedirect(self): + """ + C{client.HTTPPagerGetter} instances each obey the C{followRedirect} + value passed to the L{client.getPage} call which created them. + """ + d1 = client.getPage(self.getURL('redirect'), followRedirect=True) + d2 = client.getPage(self.getURL('redirect'), followRedirect=False) + + d = self.assertFailure(d2, error.PageRedirect + ).addCallback(lambda dummy: d1) + return d + + + def test_afterFoundGet(self): + """ + Enabling unsafe redirection behaviour overwrites the method of + redirected C{POST} requests with C{GET}. + """ + url = self.getURL('extendedRedirect?code=302') + f = client.HTTPClientFactory(url, followRedirect=True, method="POST") + self.assertFalse( + f.afterFoundGet, + "By default, afterFoundGet must be disabled") + + def gotPage(page): + self.assertEquals( + self.extendedRedirect.lastMethod, + "GET", + "With afterFoundGet, the HTTP method must change to GET") + + d = client.getPage( + url, followRedirect=True, afterFoundGet=True, method="POST") + d.addCallback(gotPage) + return d + + + def testPartial(self): + name = self.mktemp() + f = open(name, "wb") + f.write("abcd") + f.close() + + partialDownload = [(True, "abcd456789"), + (True, "abcd456789"), + (False, "0123456789")] + + d = defer.succeed(None) + for (partial, expectedData) in partialDownload: + d.addCallback(self._cbRunPartial, name, partial) + d.addCallback(self._cbPartialTest, expectedData, name) + + return d + + testPartial.skip = "Cannot test until webserver can serve partial data properly" + + def _cbRunPartial(self, ignored, name, partial): + return client.downloadPage(self.getURL("file"), name, supportPartial=partial) + + def _cbPartialTest(self, ignored, expectedData, filename): + bytes = file(filename, "rb").read() + self.assertEquals(bytes, expectedData) + + + def test_downloadTimeout(self): + """ + If the timeout indicated by the C{timeout} parameter to + L{client.HTTPDownloader.__init__} elapses without the complete response + being received, the L{defer.Deferred} returned by + L{client.downloadPage} fires with a L{Failure} wrapping a + L{defer.TimeoutError}. + """ + self.cleanupServerConnections = 2 + # Verify the behavior if no bytes are ever written. + first = client.downloadPage( + self.getURL("wait"), + self.mktemp(), timeout=0.01) + + # Verify the behavior if some bytes are written but then the request + # never completes. + second = client.downloadPage( + self.getURL("write-then-wait"), + self.mktemp(), timeout=0.01) + + return defer.gatherResults([ + self.assertFailure(first, defer.TimeoutError), + self.assertFailure(second, defer.TimeoutError)]) + + + def test_downloadHeaders(self): + """ + After L{client.HTTPDownloader.deferred} fires, the + L{client.HTTPDownloader} instance's C{status} and C{response_headers} + attributes are populated with the values from the response. + """ + def checkHeaders(factory): + self.assertEquals(factory.status, '200') + self.assertEquals(factory.response_headers['content-type'][0], 'text/html') + self.assertEquals(factory.response_headers['content-length'][0], '10') + os.unlink(factory.fileName) + factory = client._makeGetterFactory( + self.getURL('file'), + client.HTTPDownloader, + fileOrName=self.mktemp()) + return factory.deferred.addCallback(lambda _: checkHeaders(factory)) + + + def test_downloadCookies(self): + """ + The C{cookies} dict passed to the L{client.HTTPDownloader} + initializer is used to populate the I{Cookie} header included in the + request sent to the server. + """ + output = self.mktemp() + factory = client._makeGetterFactory( + self.getURL('cookiemirror'), + client.HTTPDownloader, + fileOrName=output, + cookies={'foo': 'bar'}) + def cbFinished(ignored): + self.assertEqual( + FilePath(output).getContent(), + "[('foo', 'bar')]") + factory.deferred.addCallback(cbFinished) + return factory.deferred + + + def test_downloadRedirectLimit(self): + """ + When more than C{redirectLimit} HTTP redirects are encountered, the + page request fails with L{InfiniteRedirection}. + """ + def checkRedirectCount(*a): + self.assertEquals(f._redirectCount, 7) + self.assertEquals(self.infiniteRedirectResource.count, 7) + + f = client._makeGetterFactory( + self.getURL('infiniteRedirect'), + client.HTTPDownloader, + fileOrName=self.mktemp(), + redirectLimit=7) + d = self.assertFailure(f.deferred, error.InfiniteRedirection) + d.addCallback(checkRedirectCount) + return d + + + +class WebClientSSLTestCase(WebClientTestCase): + def _listen(self, site): + from twisted import test + return reactor.listenSSL(0, site, + contextFactory=ssl.DefaultOpenSSLContextFactory( + FilePath(test.__file__).sibling('server.pem').path, + FilePath(test.__file__).sibling('server.pem').path, + ), + interface="127.0.0.1") + + def getURL(self, path): + return "https://127.0.0.1:%d/%s" % (self.portno, path) + + def testFactoryInfo(self): + url = self.getURL('file') + scheme, host, port, path = client._parse(url) + factory = client.HTTPClientFactory(url) + reactor.connectSSL(host, port, factory, ssl.ClientContextFactory()) + # The base class defines _cbFactoryInfo correctly for this + return factory.deferred.addCallback(self._cbFactoryInfo, factory) + +class WebClientRedirectBetweenSSLandPlainText(unittest.TestCase): + def getHTTPS(self, path): + return "https://127.0.0.1:%d/%s" % (self.tlsPortno, path) + + def getHTTP(self, path): + return "http://127.0.0.1:%d/%s" % (self.plainPortno, path) + + def setUp(self): + plainRoot = static.Data('not me', 'text/plain') + tlsRoot = static.Data('me neither', 'text/plain') + + plainSite = server.Site(plainRoot, timeout=None) + tlsSite = server.Site(tlsRoot, timeout=None) + + from twisted import test + self.tlsPort = reactor.listenSSL(0, tlsSite, + contextFactory=ssl.DefaultOpenSSLContextFactory( + FilePath(test.__file__).sibling('server.pem').path, + FilePath(test.__file__).sibling('server.pem').path, + ), + interface="127.0.0.1") + self.plainPort = reactor.listenTCP(0, plainSite, interface="127.0.0.1") + + self.plainPortno = self.plainPort.getHost().port + self.tlsPortno = self.tlsPort.getHost().port + + plainRoot.putChild('one', util.Redirect(self.getHTTPS('two'))) + tlsRoot.putChild('two', util.Redirect(self.getHTTP('three'))) + plainRoot.putChild('three', util.Redirect(self.getHTTPS('four'))) + tlsRoot.putChild('four', static.Data('FOUND IT!', 'text/plain')) + + def tearDown(self): + ds = map(defer.maybeDeferred, + [self.plainPort.stopListening, self.tlsPort.stopListening]) + return defer.gatherResults(ds) + + def testHoppingAround(self): + return client.getPage(self.getHTTP("one") + ).addCallback(self.assertEquals, "FOUND IT!" + ) + +class FakeTransport: + disconnecting = False + def __init__(self): + self.data = [] + def write(self, stuff): + self.data.append(stuff) + +class CookieTestCase(unittest.TestCase): + def _listen(self, site): + return reactor.listenTCP(0, site, interface="127.0.0.1") + + def setUp(self): + root = static.Data('El toro!', 'text/plain') + root.putChild("cookiemirror", CookieMirrorResource()) + root.putChild("rawcookiemirror", RawCookieMirrorResource()) + site = server.Site(root, timeout=None) + self.port = self._listen(site) + self.portno = self.port.getHost().port + + def tearDown(self): + return self.port.stopListening() + + def getHTTP(self, path): + return "http://127.0.0.1:%d/%s" % (self.portno, path) + + def testNoCookies(self): + return client.getPage(self.getHTTP("cookiemirror") + ).addCallback(self.assertEquals, "[]" + ) + + def testSomeCookies(self): + cookies = {'foo': 'bar', 'baz': 'quux'} + return client.getPage(self.getHTTP("cookiemirror"), cookies=cookies + ).addCallback(self.assertEquals, "[('baz', 'quux'), ('foo', 'bar')]" + ) + + def testRawNoCookies(self): + return client.getPage(self.getHTTP("rawcookiemirror") + ).addCallback(self.assertEquals, "None" + ) + + def testRawSomeCookies(self): + cookies = {'foo': 'bar', 'baz': 'quux'} + return client.getPage(self.getHTTP("rawcookiemirror"), cookies=cookies + ).addCallback(self.assertEquals, "'foo=bar; baz=quux'" + ) + + def testCookieHeaderParsing(self): + factory = client.HTTPClientFactory('http://foo.example.com/') + proto = factory.buildProtocol('127.42.42.42') + proto.transport = FakeTransport() + proto.connectionMade() + for line in [ + '200 Ok', + 'Squash: yes', + 'Hands: stolen', + 'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT', + 'Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/', + 'Set-Cookie: SHIPPING=FEDEX; path=/foo', + '', + 'body', + 'more body', + ]: + proto.dataReceived(line + '\r\n') + self.assertEquals(proto.transport.data, + ['GET / HTTP/1.0\r\n', + 'Host: foo.example.com\r\n', + 'User-Agent: Twisted PageGetter\r\n', + '\r\n']) + self.assertEquals(factory.cookies, + { + 'CUSTOMER': 'WILE_E_COYOTE', + 'PART_NUMBER': 'ROCKET_LAUNCHER_0001', + 'SHIPPING': 'FEDEX', + }) + + + +class StubHTTPProtocol(Protocol): + """ + A protocol like L{HTTP11ClientProtocol} but which does not actually know + HTTP/1.1 and only collects requests in a list. + + @ivar requests: A C{list} of two-tuples. Each time a request is made, a + tuple consisting of the request and the L{Deferred} returned from the + request method is appended to this list. + """ + def __init__(self): + self.requests = [] + + + def request(self, request): + """ + Capture the given request for later inspection. + + @return: A L{Deferred} which this code will never fire. + """ + result = Deferred() + self.requests.append((request, result)) + return result + + + +class AgentTests(unittest.TestCase): + """ + Tests for the new HTTP client API provided by L{Agent}. + """ + def setUp(self): + """ + Create an L{Agent} wrapped around a fake reactor. + """ + class Reactor(MemoryReactor, Clock): + def __init__(self): + MemoryReactor.__init__(self) + Clock.__init__(self) + + self.reactor = Reactor() + self.agent = client.Agent(self.reactor) + + + def completeConnection(self): + """ + Do whitebox stuff to finish any outstanding connection attempts the + agent may have initiated. + + This spins the fake reactor clock just enough to get L{ClientCreator}, + which agent is implemented in terms of, to fire its Deferreds. + """ + self.reactor.advance(0) + + + def _verifyAndCompleteConnectionTo(self, host, port): + """ + Assert that the destination of the oldest unverified TCP connection + attempt is the given host and port. Then pop it, create a protocol, + connect it to a L{StringTransport}, and return the protocol. + """ + # Grab the connection attempt, make sure it goes to the right place, + # and cause it to succeed. + host, port, factory = self.reactor.tcpClients.pop()[:3] + self.assertEquals(host, host) + self.assertEquals(port, port) + + protocol = factory.buildProtocol(IPv4Address('TCP', '10.0.0.3', 1234)) + transport = StringTransport() + protocol.makeConnection(transport) + self.completeConnection() + return protocol + + + def test_unsupportedScheme(self): + """ + L{Agent.request} returns a L{Deferred} which fails with + L{SchemeNotSupported} if the scheme of the URI passed to it is not + C{'http'}. + """ + return self.assertFailure( + self.agent.request('GET', 'mailto:alice@example.com'), + SchemeNotSupported) + + + def test_connectionFailed(self): + """ + The L{Deferred} returned by L{Agent.request} fires with a L{Failure} if + the TCP connection attempt fails. + """ + result = self.agent.request('GET', 'http://foo/') + + # Cause the connection to be refused + host, port, factory = self.reactor.tcpClients.pop()[:3] + factory.clientConnectionFailed(None, ConnectionRefusedError()) + self.completeConnection() + + return self.assertFailure(result, ConnectionRefusedError) + + + def test_request(self): + """ + L{Agent.request} establishes a new connection to the host indicated by + the host part of the URI passed to it and issues a request using the + method, the path portion of the URI, the headers, and the body producer + passed to it. It returns a L{Deferred} which fires with a L{Response} + from the server. + """ + self.agent._protocol = StubHTTPProtocol + + headers = http_headers.Headers({'foo': ['bar']}) + # Just going to check the body for identity, so it doesn't need to be + # real. + body = object() + self.agent.request( + 'GET', 'http://example.com:1234/foo?bar', headers, body) + + protocol = self._verifyAndCompleteConnectionTo('example.com', 1234) + + # The request should be issued. + self.assertEquals(len(protocol.requests), 1) + req, res = protocol.requests.pop() + self.assertTrue(isinstance(req, Request)) + self.assertEquals(req.method, 'GET') + self.assertEquals(req.uri, '/foo?bar') + self.assertEquals( + req.headers, + http_headers.Headers({'foo': ['bar'], + 'host': ['example.com:1234']})) + self.assertIdentical(req.bodyProducer, body) + + + def test_hostProvided(self): + """ + If C{None} is passed to L{Agent.request} for the C{headers} + parameter, a L{Headers} instance is created for the request and a + I{Host} header added to it. + """ + self.agent._protocol = StubHTTPProtocol + + self.agent.request('GET', 'http://example.com/foo') + + protocol = self._verifyAndCompleteConnectionTo('example.com', 80) + + # The request should have been issued with a host header based on + # the request URL. + self.assertEquals(len(protocol.requests), 1) + req, res = protocol.requests.pop() + self.assertEquals(req.headers.getRawHeaders('host'), ['example.com']) + + + def test_hostOverride(self): + """ + If the headers passed to L{Agent.request} includes a value for the + I{Host} header, that value takes precedence over the one which would + otherwise be automatically provided. + """ + self.agent._protocol = StubHTTPProtocol + + headers = http_headers.Headers({'foo': ['bar'], 'host': ['quux']}) + body = object() + self.agent.request( + 'GET', 'http://example.com/baz', headers, body) + + protocol = self._verifyAndCompleteConnectionTo('example.com', 80) + + # The request should have been issued with the host header specified + # above, not one based on the request URI. + self.assertEquals(len(protocol.requests), 1) + req, res = protocol.requests.pop() + self.assertEquals(req.headers.getRawHeaders('host'), ['quux']) + + + def test_headersUnmodified(self): + """ + If a I{Host} header must be added to the request, the L{Headers} + instance passed to L{Agent.request} is not modified. + """ + self.agent._protocol = StubHTTPProtocol + + headers = http_headers.Headers() + body = object() + self.agent.request( + 'GET', 'http://example.com/foo', headers, body) + + protocol = self._verifyAndCompleteConnectionTo('example.com', 80) + + # The request should have been issued. + self.assertEquals(len(protocol.requests), 1) + # And the headers object passed in should not have changed. + self.assertEquals(headers, http_headers.Headers()) + + + def test_hostValue(self): + """ + L{Agent._computeHostValue} returns just the hostname it is passed if + the port number it is passed is the default for the scheme it is + passed, otherwise it returns a string containing both the host and port + separated by C{":"}. + """ + self.assertEquals( + self.agent._computeHostValue('http', 'example.com', 80), + 'example.com') + + self.assertEquals( + self.agent._computeHostValue('http', 'example.com', 54321), + 'example.com:54321') + + + +if ssl is None or not hasattr(ssl, 'DefaultOpenSSLContextFactory'): + for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]: + case.skip = "OpenSSL not present" + +if not interfaces.IReactorSSL(reactor, None): + for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]: + case.skip = "Reactor doesn't support SSL" diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_wsgi.py b/vendor/Twisted-10.0.0/twisted/web/test/test_wsgi.py new file mode 100644 index 000000000000..f3bc25d3adc2 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_wsgi.py @@ -0,0 +1,1572 @@ +# Copyright (c) 2008-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.web.wsgi}. +""" + +__metaclass__ = type + +from sys import exc_info +from urllib import quote +from thread import get_ident +import StringIO, cStringIO, tempfile + +from zope.interface.verify import verifyObject + +from twisted.python.compat import set +from twisted.python.log import addObserver, removeObserver, err +from twisted.python.failure import Failure +from twisted.python.threadpool import ThreadPool +from twisted.internet.defer import Deferred, gatherResults +from twisted.internet import reactor +from twisted.internet.error import ConnectionLost +from twisted.trial.unittest import TestCase +from twisted.web import http +from twisted.web.resource import IResource, Resource +from twisted.web.server import Request, Site, version +from twisted.web.wsgi import WSGIResource +from twisted.web.test.test_web import DummyChannel + + +class SynchronousThreadPool: + """ + A single-threaded implementation of part of the L{ThreadPool} interface. + This implementation calls functions synchronously rather than running + them in a thread pool. It is used to make the tests which are not + directly for thread-related behavior deterministic. + """ + def callInThread(self, f, *a, **kw): + """ + Call C{f(*a, **kw)} in this thread rather than scheduling it to be + called in a thread. + """ + try: + f(*a, **kw) + except: + # callInThread doesn't let exceptions propagate to the caller. + # None is always returned and any exception raised gets logged + # later on. + err(None, "Callable passed to SynchronousThreadPool.callInThread failed") + + + +class SynchronousReactorThreads: + """ + A single-threaded implementation of part of the L{IReactorThreads} + interface. This implementation assumes that it will only be invoked + from the reactor thread, so it calls functions synchronously rather than + trying to schedule them to run in the reactor thread. It is used in + conjunction with L{SynchronousThreadPool} to make the tests which are + not directly for thread-related behavior deterministic. + """ + def callFromThread(self, f, *a, **kw): + """ + Call C{f(*a, **kw)} in this thread which should also be the reactor + thread. + """ + f(*a, **kw) + + + +class WSGIResourceTests(TestCase): + def setUp(self): + """ + Create a L{WSGIResource} with synchronous threading objects and a no-op + application object. This is useful for testing certain things about + the resource implementation which are unrelated to WSGI. + """ + self.resource = WSGIResource( + SynchronousReactorThreads(), SynchronousThreadPool(), + lambda environ, startResponse: None) + + + def test_interfaces(self): + """ + L{WSGIResource} implements L{IResource} and stops resource traversal. + """ + verifyObject(IResource, self.resource) + self.assertTrue(self.resource.isLeaf) + + + def test_unsupported(self): + """ + A L{WSGIResource} cannot have L{IResource} children. Its + C{getChildWithDefault} and C{putChild} methods raise L{RuntimeError}. + """ + self.assertRaises( + RuntimeError, + self.resource.getChildWithDefault, + "foo", Request(DummyChannel(), False)) + self.assertRaises( + RuntimeError, + self.resource.putChild, + "foo", Resource()) + + +class WSGITestsMixin: + """ + @ivar channelFactory: A no-argument callable which will be invoked to + create a new HTTP channel to associate with request objects. + """ + channelFactory = DummyChannel + + def setUp(self): + self.threadpool = SynchronousThreadPool() + self.reactor = SynchronousReactorThreads() + + + def lowLevelRender( + self, requestFactory, applicationFactory, channelFactory, method, + version, resourceSegments, requestSegments, query=None, headers=[], + body=None, safe=''): + """ + @param method: A C{str} giving the request method to use. + + @param version: A C{str} like C{'1.1'} giving the request version. + + @param resourceSegments: A C{list} of unencoded path segments which + specifies the location in the resource hierarchy at which the + L{WSGIResource} will be placed, eg C{['']} for I{/}, C{['foo', + 'bar', '']} for I{/foo/bar/}, etc. + + @param requestSegments: A C{list} of unencoded path segments giving the + request URI. + + @param query: A C{list} of two-tuples of C{str} giving unencoded query + argument keys and values. + + @param headers: A C{list} of two-tuples of C{str} giving request header + names and corresponding values. + + @param safe: A C{str} giving the bytes which are to be considered + I{safe} for inclusion in the request URI and not quoted. + + @return: A L{Deferred} which will be called back with a two-tuple of + the arguments passed which would be passed to the WSGI application + object for this configuration and request (ie, the environment and + start_response callable). + """ + root = WSGIResource( + self.reactor, self.threadpool, applicationFactory()) + resourceSegments.reverse() + for seg in resourceSegments: + tmp = Resource() + tmp.putChild(seg, root) + root = tmp + + channel = channelFactory() + channel.site = Site(root) + request = requestFactory(channel, False) + for k, v in headers: + request.requestHeaders.addRawHeader(k, v) + request.gotLength(0) + if body: + request.content.write(body) + request.content.seek(0) + uri = '/' + '/'.join([quote(seg, safe) for seg in requestSegments]) + if query is not None: + uri += '?' + '&'.join(['='.join([quote(k, safe), quote(v, safe)]) + for (k, v) in query]) + request.requestReceived(method, uri, 'HTTP/' + version) + return request + + + def render(self, *a, **kw): + result = Deferred() + def applicationFactory(): + def application(*args): + environ, startResponse = args + result.callback(args) + startResponse('200 OK', []) + return iter(()) + return application + self.lowLevelRender( + Request, applicationFactory, self.channelFactory, *a, **kw) + return result + + + def requestFactoryFactory(self, requestClass=Request): + d = Deferred() + def requestFactory(*a, **kw): + request = requestClass(*a, **kw) + # If notifyFinish is called after lowLevelRender returns, it won't + # do the right thing, because the request will have already + # finished. One might argue that this is a bug in + # Request.notifyFinish. + request.notifyFinish().chainDeferred(d) + return request + return d, requestFactory + + + def getContentFromResponse(self, response): + return response.split('\r\n\r\n', 1)[1] + + + +class EnvironTests(WSGITestsMixin, TestCase): + """ + Tests for the values in the C{environ} C{dict} passed to the application + object by L{twisted.web.wsgi.WSGIResource}. + """ + def environKeyEqual(self, key, value): + def assertEnvironKeyEqual((environ, startResponse)): + self.assertEqual(environ[key], value) + return assertEnvironKeyEqual + + + def test_environIsDict(self): + """ + L{WSGIResource} calls the application object with an C{environ} + parameter which is exactly of type C{dict}. + """ + d = self.render('GET', '1.1', [], ['']) + def cbRendered((environ, startResponse)): + self.assertIdentical(type(environ), dict) + d.addCallback(cbRendered) + return d + + + def test_requestMethod(self): + """ + The C{'REQUEST_METHOD'} key of the C{environ} C{dict} passed to the + application contains the HTTP method in the request (RFC 3875, section + 4.1.12). + """ + get = self.render('GET', '1.1', [], ['']) + get.addCallback(self.environKeyEqual('REQUEST_METHOD', 'GET')) + + # Also make sure a different request method shows up as a different + # value in the environ dict. + post = self.render('POST', '1.1', [], ['']) + post.addCallback(self.environKeyEqual('REQUEST_METHOD', 'POST')) + + return gatherResults([get, post]) + + + def test_scriptName(self): + """ + The C{'SCRIPT_NAME'} key of the C{environ} C{dict} passed to the + application contains the I{abs_path} (RFC 2396, section 3) to this + resource (RFC 3875, section 4.1.13). + """ + root = self.render('GET', '1.1', [], ['']) + root.addCallback(self.environKeyEqual('SCRIPT_NAME', '')) + + emptyChild = self.render('GET', '1.1', [''], ['']) + emptyChild.addCallback(self.environKeyEqual('SCRIPT_NAME', '/')) + + leaf = self.render('GET', '1.1', ['foo'], ['foo']) + leaf.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo')) + + container = self.render('GET', '1.1', ['foo', ''], ['foo', '']) + container.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo/')) + + internal = self.render('GET', '1.1', ['foo'], ['foo', 'bar']) + internal.addCallback(self.environKeyEqual('SCRIPT_NAME', '/foo')) + + unencoded = self.render( + 'GET', '1.1', ['foo', '/', 'bar\xff'], ['foo', '/', 'bar\xff']) + # The RFC says "(not URL-encoded)", even though that makes + # interpretation of SCRIPT_NAME ambiguous. + unencoded.addCallback( + self.environKeyEqual('SCRIPT_NAME', '/foo///bar\xff')) + + return gatherResults([ + root, emptyChild, leaf, container, internal, unencoded]) + + + def test_pathInfo(self): + """ + The C{'PATH_INFO'} key of the C{environ} C{dict} passed to the + application contains the suffix of the request URI path which is not + included in the value for the C{'SCRIPT_NAME'} key (RFC 3875, section + 4.1.5). + """ + assertKeyEmpty = self.environKeyEqual('PATH_INFO', '') + + root = self.render('GET', '1.1', [], ['']) + root.addCallback(self.environKeyEqual('PATH_INFO', '/')) + + emptyChild = self.render('GET', '1.1', [''], ['']) + emptyChild.addCallback(assertKeyEmpty) + + leaf = self.render('GET', '1.1', ['foo'], ['foo']) + leaf.addCallback(assertKeyEmpty) + + container = self.render('GET', '1.1', ['foo', ''], ['foo', '']) + container.addCallback(assertKeyEmpty) + + internalLeaf = self.render('GET', '1.1', ['foo'], ['foo', 'bar']) + internalLeaf.addCallback(self.environKeyEqual('PATH_INFO', '/bar')) + + internalContainer = self.render('GET', '1.1', ['foo'], ['foo', '']) + internalContainer.addCallback(self.environKeyEqual('PATH_INFO', '/')) + + unencoded = self.render('GET', '1.1', [], ['foo', '/', 'bar\xff']) + unencoded.addCallback( + self.environKeyEqual('PATH_INFO', '/foo///bar\xff')) + + return gatherResults([ + root, leaf, container, internalLeaf, + internalContainer, unencoded]) + + + def test_queryString(self): + """ + The C{'QUERY_STRING'} key of the C{environ} C{dict} passed to the + application contains the portion of the request URI after the first + I{?} (RFC 3875, section 4.1.7). + """ + missing = self.render('GET', '1.1', [], [''], None) + missing.addCallback(self.environKeyEqual('QUERY_STRING', '')) + + empty = self.render('GET', '1.1', [], [''], []) + empty.addCallback(self.environKeyEqual('QUERY_STRING', '')) + + present = self.render('GET', '1.1', [], [''], [('foo', 'bar')]) + present.addCallback(self.environKeyEqual('QUERY_STRING', 'foo=bar')) + + unencoded = self.render('GET', '1.1', [], [''], [('/', '/')]) + unencoded.addCallback(self.environKeyEqual('QUERY_STRING', '%2F=%2F')) + + # "?" is reserved in the portion of a URL. However, it + # seems to be a common mistake of clients to forget to quote it. So, + # make sure we handle that invalid case. + doubleQuestion = self.render( + 'GET', '1.1', [], [''], [('foo', '?bar')], safe='?') + doubleQuestion.addCallback( + self.environKeyEqual('QUERY_STRING', 'foo=?bar')) + + return gatherResults([ + missing, empty, present, unencoded, doubleQuestion]) + + + def test_contentType(self): + """ + The C{'CONTENT_TYPE'} key of the C{environ} C{dict} passed to the + application contains the value of the I{Content-Type} request header + (RFC 3875, section 4.1.3). + """ + missing = self.render('GET', '1.1', [], ['']) + missing.addCallback(self.environKeyEqual('CONTENT_TYPE', '')) + + present = self.render( + 'GET', '1.1', [], [''], None, [('content-type', 'x-foo/bar')]) + present.addCallback(self.environKeyEqual('CONTENT_TYPE', 'x-foo/bar')) + + return gatherResults([missing, present]) + + + def test_contentLength(self): + """ + The C{'CONTENT_LENGTH'} key of the C{environ} C{dict} passed to the + application contains the value of the I{Content-Length} request header + (RFC 3875, section 4.1.2). + """ + missing = self.render('GET', '1.1', [], ['']) + missing.addCallback(self.environKeyEqual('CONTENT_LENGTH', '')) + + present = self.render( + 'GET', '1.1', [], [''], None, [('content-length', '1234')]) + present.addCallback(self.environKeyEqual('CONTENT_LENGTH', '1234')) + + return gatherResults([missing, present]) + + + def test_serverName(self): + """ + The C{'SERVER_NAME'} key of the C{environ} C{dict} passed to the + application contains the best determination of the server hostname + possible, using either the value of the I{Host} header in the request + or the address the server is listening on if that header is not + present (RFC 3875, section 4.1.14). + """ + missing = self.render('GET', '1.1', [], ['']) + # 10.0.0.1 value comes from a bit far away - + # twisted.test.test_web.DummyChannel.transport.getHost().host + missing.addCallback(self.environKeyEqual('SERVER_NAME', '10.0.0.1')) + + present = self.render( + 'GET', '1.1', [], [''], None, [('host', 'example.org')]) + present.addCallback(self.environKeyEqual('SERVER_NAME', 'example.org')) + + return gatherResults([missing, present]) + + + def test_serverPort(self): + """ + The C{'SERVER_PORT'} key of the C{environ} C{dict} passed to the + application contains the port number of the server which received the + request (RFC 3875, section 4.1.15). + """ + portNumber = 12354 + def makeChannel(): + channel = DummyChannel() + channel.transport = DummyChannel.TCP() + channel.transport.port = portNumber + return channel + self.channelFactory = makeChannel + + d = self.render('GET', '1.1', [], ['']) + d.addCallback(self.environKeyEqual('SERVER_PORT', str(portNumber))) + return d + + + def test_serverProtocol(self): + """ + The C{'SERVER_PROTOCOL'} key of the C{environ} C{dict} passed to the + application contains the HTTP version number received in the request + (RFC 3875, section 4.1.16). + """ + old = self.render('GET', '1.0', [], ['']) + old.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.0')) + + new = self.render('GET', '1.1', [], ['']) + new.addCallback(self.environKeyEqual('SERVER_PROTOCOL', 'HTTP/1.1')) + + return gatherResults([old, new]) + + + def test_remoteAddr(self): + """ + The C{'REMOTE_ADDR'} key of the C{environ} C{dict} passed to the + application contains the address of the client making the request. + """ + d = self.render('GET', '1.1', [], ['']) + d.addCallback(self.environKeyEqual('REMOTE_ADDR', '192.168.1.1')) + + return d + + def test_headers(self): + """ + HTTP request headers are copied into the C{environ} C{dict} passed to + the application with a C{HTTP_} prefix added to their names. + """ + singleValue = self.render( + 'GET', '1.1', [], [''], None, [('foo', 'bar'), ('baz', 'quux')]) + def cbRendered((environ, startResponse)): + self.assertEqual(environ['HTTP_FOO'], 'bar') + self.assertEqual(environ['HTTP_BAZ'], 'quux') + singleValue.addCallback(cbRendered) + + multiValue = self.render( + 'GET', '1.1', [], [''], None, [('foo', 'bar'), ('foo', 'baz')]) + multiValue.addCallback(self.environKeyEqual('HTTP_FOO', 'bar,baz')) + + withHyphen = self.render( + 'GET', '1.1', [], [''], None, [('foo-bar', 'baz')]) + withHyphen.addCallback(self.environKeyEqual('HTTP_FOO_BAR', 'baz')) + + multiLine = self.render( + 'GET', '1.1', [], [''], None, [('foo', 'bar\n\tbaz')]) + multiLine.addCallback(self.environKeyEqual('HTTP_FOO', 'bar \tbaz')) + + return gatherResults([singleValue, multiValue, withHyphen, multiLine]) + + + def test_wsgiVersion(self): + """ + The C{'wsgi.version'} key of the C{environ} C{dict} passed to the + application has the value C{(1, 0)} indicating that this is a WSGI 1.0 + container. + """ + versionDeferred = self.render('GET', '1.1', [], ['']) + versionDeferred.addCallback(self.environKeyEqual('wsgi.version', (1, 0))) + return versionDeferred + + + def test_wsgiRunOnce(self): + """ + The C{'wsgi.run_once'} key of the C{environ} C{dict} passed to the + application is set to C{False}. + """ + once = self.render('GET', '1.1', [], ['']) + once.addCallback(self.environKeyEqual('wsgi.run_once', False)) + return once + + + def test_wsgiMultithread(self): + """ + The C{'wsgi.multithread'} key of the C{environ} C{dict} passed to the + application is set to C{True}. + """ + thread = self.render('GET', '1.1', [], ['']) + thread.addCallback(self.environKeyEqual('wsgi.multithread', True)) + return thread + + + def test_wsgiMultiprocess(self): + """ + The C{'wsgi.multiprocess'} key of the C{environ} C{dict} passed to the + application is set to C{False}. + """ + process = self.render('GET', '1.1', [], ['']) + process.addCallback(self.environKeyEqual('wsgi.multiprocess', False)) + return process + + + def test_wsgiURLScheme(self): + """ + The C{'wsgi.url_scheme'} key of the C{environ} C{dict} passed to the + application has the request URL scheme. + """ + # XXX Does this need to be different if the request is for an absolute + # URL? + def channelFactory(): + channel = DummyChannel() + channel.transport = DummyChannel.SSL() + return channel + + self.channelFactory = DummyChannel + httpDeferred = self.render('GET', '1.1', [], ['']) + httpDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'http')) + + self.channelFactory = channelFactory + httpsDeferred = self.render('GET', '1.1', [], ['']) + httpsDeferred.addCallback(self.environKeyEqual('wsgi.url_scheme', 'https')) + + return gatherResults([httpDeferred, httpsDeferred]) + + + def test_wsgiErrors(self): + """ + The C{'wsgi.errors'} key of the C{environ} C{dict} passed to the + application is a file-like object (as defined in the U{Input and Errors + Streams} + section of PEP 333) which converts bytes written to it into events for + the logging system. + """ + events = [] + addObserver(events.append) + self.addCleanup(removeObserver, events.append) + + errors = self.render('GET', '1.1', [], ['']) + def cbErrors((environ, startApplication)): + errors = environ['wsgi.errors'] + errors.write('some message\n') + errors.writelines(['another\nmessage\n']) + errors.flush() + self.assertEqual(events[0]['message'], ('some message\n',)) + self.assertEqual(events[0]['system'], 'wsgi') + self.assertTrue(events[0]['isError']) + self.assertEqual(events[1]['message'], ('another\nmessage\n',)) + self.assertEqual(events[1]['system'], 'wsgi') + self.assertTrue(events[1]['isError']) + self.assertEqual(len(events), 2) + errors.addCallback(cbErrors) + return errors + + +class InputStreamTestMixin(WSGITestsMixin): + """ + A mixin for L{TestCase} subclasses which defines a number of tests against + L{_InputStream}. The subclass is expected to create a file-like object to + be wrapped by an L{_InputStream} under test. + """ + def getFileType(self): + raise NotImplementedError( + "%s.getFile must be implemented" % (self.__class__.__name__,)) + + + def _renderAndReturnReaderResult(self, reader, content): + contentType = self.getFileType() + class CustomizedRequest(Request): + def gotLength(self, length): + # Always allocate a file of the specified type, instead of + # using the base behavior of selecting one depending on the + # length. + self.content = contentType() + + def appFactoryFactory(reader): + result = Deferred() + def applicationFactory(): + def application(*args): + environ, startResponse = args + result.callback(reader(environ['wsgi.input'])) + startResponse('200 OK', []) + return iter(()) + return application + return result, applicationFactory + d, appFactory = appFactoryFactory(reader) + self.lowLevelRender( + CustomizedRequest, appFactory, DummyChannel, + 'PUT', '1.1', [], [''], None, [], + content) + return d + + + def test_readAll(self): + """ + Calling L{_InputStream.read} with no arguments returns the entire input + stream. + """ + bytes = "some bytes are here" + d = self._renderAndReturnReaderResult(lambda input: input.read(), bytes) + d.addCallback(self.assertEquals, bytes) + return d + + + def test_readSome(self): + """ + Calling L{_InputStream.read} with an integer returns that many bytes + from the input stream, as long as it is less than or equal to the total + number of bytes available. + """ + bytes = "hello, world." + d = self._renderAndReturnReaderResult(lambda input: input.read(3), bytes) + d.addCallback(self.assertEquals, "hel") + return d + + + def test_readMoreThan(self): + """ + Calling L{_InputStream.read} with an integer that is greater than the + total number of bytes in the input stream returns all bytes in the + input stream. + """ + bytes = "some bytes are here" + d = self._renderAndReturnReaderResult( + lambda input: input.read(len(bytes) + 3), bytes) + d.addCallback(self.assertEquals, bytes) + return d + + + def test_readTwice(self): + """ + Calling L{_InputStream.read} a second time returns bytes starting from + the position after the last byte returned by the previous read. + """ + bytes = "some bytes, hello" + def read(input): + input.read(3) + return input.read() + d = self._renderAndReturnReaderResult(read, bytes) + d.addCallback(self.assertEquals, bytes[3:]) + return d + + + def test_readNone(self): + """ + Calling L{_InputStream.read} with C{None} as an argument returns all + bytes in the input stream. + """ + bytes = "the entire stream" + d = self._renderAndReturnReaderResult( + lambda input: input.read(None), bytes) + d.addCallback(self.assertEquals, bytes) + return d + + + def test_readNegative(self): + """ + Calling L{_InputStream.read} with a negative integer as an argument + returns all bytes in the input stream. + """ + bytes = "all of the input" + d = self._renderAndReturnReaderResult( + lambda input: input.read(-1), bytes) + d.addCallback(self.assertEquals, bytes) + return d + + + def test_readline(self): + """ + Calling L{_InputStream.readline} with no argument returns one line from + the input stream. + """ + bytes = "hello\nworld" + d = self._renderAndReturnReaderResult( + lambda input: input.readline(), bytes) + d.addCallback(self.assertEquals, "hello\n") + return d + + + def test_readlineSome(self): + """ + Calling L{_InputStream.readline} with an integer returns at most that + many bytes, even if it is not enough to make up a complete line. + + COMPATIBILITY NOTE: the size argument is excluded from the WSGI + specification, but is provided here anyhow, because useful libraries + such as python stdlib's cgi.py assume their input file-like-object + supports readline with a size argument. If you use it, be aware your + application may not be portable to other conformant WSGI servers. + """ + bytes = "goodbye\nworld" + d = self._renderAndReturnReaderResult( + lambda input: input.readline(3), bytes) + d.addCallback(self.assertEquals, "goo") + return d + + + def test_readlineMoreThan(self): + """ + Calling L{_InputStream.readline} with an integer which is greater than + the number of bytes in the next line returns only the next line. + """ + bytes = "some lines\nof text" + d = self._renderAndReturnReaderResult( + lambda input: input.readline(20), bytes) + d.addCallback(self.assertEquals, "some lines\n") + return d + + + def test_readlineTwice(self): + """ + Calling L{_InputStream.readline} a second time returns the line + following the line returned by the first call. + """ + bytes = "first line\nsecond line\nlast line" + def readline(input): + input.readline() + return input.readline() + d = self._renderAndReturnReaderResult(readline, bytes) + d.addCallback(self.assertEquals, "second line\n") + return d + + + def test_readlineNone(self): + """ + Calling L{_InputStream.readline} with C{None} as an argument returns + one line from the input stream. + """ + bytes = "this is one line\nthis is another line" + d = self._renderAndReturnReaderResult( + lambda input: input.readline(None), bytes) + d.addCallback(self.assertEquals, "this is one line\n") + return d + + + def test_readlineNegative(self): + """ + Calling L{_InputStream.readline} with a negative integer as an argument + returns one line from the input stream. + """ + bytes = "input stream line one\nline two" + d = self._renderAndReturnReaderResult( + lambda input: input.readline(-1), bytes) + d.addCallback(self.assertEquals, "input stream line one\n") + return d + + + def test_readlines(self): + """ + Calling L{_InputStream.readlines} with no arguments returns a list of + all lines from the input stream. + """ + bytes = "alice\nbob\ncarol" + d = self._renderAndReturnReaderResult( + lambda input: input.readlines(), bytes) + d.addCallback(self.assertEquals, ["alice\n", "bob\n", "carol"]) + return d + + + def test_readlinesSome(self): + """ + Calling L{_InputStream.readlines} with an integer as an argument + returns a list of lines from the input stream with the argument serving + as an approximate bound on the total number of bytes to read. + """ + bytes = "123\n456\n789\n0" + d = self._renderAndReturnReaderResult( + lambda input: input.readlines(5), bytes) + def cbLines(lines): + # Make sure we got enough lines to make 5 bytes. Anything beyond + # that is fine too. + self.assertEquals(lines[:2], ["123\n", "456\n"]) + d.addCallback(cbLines) + return d + + + def test_readlinesMoreThan(self): + """ + Calling L{_InputStream.readlines} with an integer which is greater than + the total number of bytes in the input stream returns a list of all + lines from the input. + """ + bytes = "one potato\ntwo potato\nthree potato" + d = self._renderAndReturnReaderResult( + lambda input: input.readlines(100), bytes) + d.addCallback( + self.assertEquals, + ["one potato\n", "two potato\n", "three potato"]) + return d + + + def test_readlinesAfterRead(self): + """ + Calling L{_InputStream.readlines} after a call to L{_InputStream.read} + returns lines starting at the byte after the last byte returned by the + C{read} call. + """ + bytes = "hello\nworld\nfoo" + def readlines(input): + input.read(7) + return input.readlines() + d = self._renderAndReturnReaderResult(readlines, bytes) + d.addCallback(self.assertEquals, ["orld\n", "foo"]) + return d + + + def test_readlinesNone(self): + """ + Calling L{_InputStream.readlines} with C{None} as an argument returns + all lines from the input. + """ + bytes = "one fish\ntwo fish\n" + d = self._renderAndReturnReaderResult( + lambda input: input.readlines(None), bytes) + d.addCallback(self.assertEquals, ["one fish\n", "two fish\n"]) + return d + + + def test_readlinesNegative(self): + """ + Calling L{_InputStream.readlines} with a negative integer as an + argument returns a list of all lines from the input. + """ + bytes = "red fish\nblue fish\n" + d = self._renderAndReturnReaderResult( + lambda input: input.readlines(-1), bytes) + d.addCallback(self.assertEquals, ["red fish\n", "blue fish\n"]) + return d + + + def test_iterable(self): + """ + Iterating over L{_InputStream} produces lines from the input stream. + """ + bytes = "green eggs\nand ham\n" + d = self._renderAndReturnReaderResult(lambda input: list(input), bytes) + d.addCallback(self.assertEquals, ["green eggs\n", "and ham\n"]) + return d + + + def test_iterableAfterRead(self): + """ + Iterating over L{_InputStream} after calling L{_InputStream.read} + produces lines from the input stream starting from the first byte after + the last byte returned by the C{read} call. + """ + bytes = "green eggs\nand ham\n" + def iterate(input): + input.read(3) + return list(input) + d = self._renderAndReturnReaderResult(iterate, bytes) + d.addCallback(self.assertEquals, ["en eggs\n", "and ham\n"]) + return d + + + +class InputStreamStringIOTests(InputStreamTestMixin, TestCase): + """ + Tests for L{_InputStream} when it is wrapped around a L{StringIO.StringIO}. + """ + def getFileType(self): + return StringIO.StringIO + + + +class InputStreamCStringIOTests(InputStreamTestMixin, TestCase): + """ + Tests for L{_InputStream} when it is wrapped around a + L{cStringIO.StringIO}. + """ + def getFileType(self): + return cStringIO.StringIO + + + +class InputStreamTemporaryFileTests(InputStreamTestMixin, TestCase): + """ + Tests for L{_InputStream} when it is wrapped around a L{tempfile.TemporaryFile}. + """ + def getFileType(self): + return tempfile.TemporaryFile + + + +class StartResponseTests(WSGITestsMixin, TestCase): + """ + Tests for the I{start_response} parameter passed to the application object + by L{WSGIResource}. + """ + def test_status(self): + """ + The response status passed to the I{start_response} callable is written + as the status of the response to the request. + """ + channel = DummyChannel() + + def applicationFactory(): + def application(environ, startResponse): + startResponse('107 Strange message', []) + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertTrue( + channel.transport.written.getvalue().startswith( + 'HTTP/1.1 107 Strange message')) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def _headersTest(self, appHeaders, expectedHeaders): + """ + Verify that if the response headers given by C{appHeaders} are passed + to the I{start_response} callable, then the response header lines given + by C{expectedHeaders} plus I{Server} and I{Date} header lines are + included in the response. + """ + # Make the Date header value deterministic + self.patch(http, 'datetimeToString', lambda: 'Tuesday') + + channel = DummyChannel() + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', appHeaders) + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + response = channel.transport.written.getvalue() + headers, rest = response.split('\r\n\r\n', 1) + headerLines = headers.split('\r\n')[1:] + headerLines.sort() + allExpectedHeaders = expectedHeaders + [ + 'Date: Tuesday', + 'Server: ' + version, + 'Transfer-Encoding: chunked'] + allExpectedHeaders.sort() + self.assertEqual(headerLines, allExpectedHeaders) + + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + return d + + + def test_headers(self): + """ + The headers passed to the I{start_response} callable are included in + the response as are the required I{Date} and I{Server} headers and the + necessary connection (hop to hop) header I{Transfer-Encoding}. + """ + return self._headersTest( + [('foo', 'bar'), ('baz', 'quux')], + ['Baz: quux', 'Foo: bar']) + + + def test_applicationProvidedContentType(self): + """ + If I{Content-Type} is included in the headers passed to the + I{start_response} callable, one I{Content-Type} header is included in + the response. + """ + return self._headersTest( + [('content-type', 'monkeys are great')], + ['Content-Type: monkeys are great']) + + + def test_applicationProvidedServerAndDate(self): + """ + If either I{Server} or I{Date} is included in the headers passed to the + I{start_response} callable, they are disregarded. + """ + return self._headersTest( + [('server', 'foo'), ('Server', 'foo'), + ('date', 'bar'), ('dATE', 'bar')], + []) + + + def test_delayedUntilReturn(self): + """ + Nothing is written in response to a request when the I{start_response} + callable is invoked. If the iterator returned by the application + object produces only empty strings, the response is written after the + last element is produced. + """ + channel = DummyChannel() + + intermediateValues = [] + def record(): + intermediateValues.append(channel.transport.written.getvalue()) + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', [('foo', 'bar'), ('baz', 'quux')]) + yield '' + record() + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertEqual(intermediateValues, ['']) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_delayedUntilContent(self): + """ + Nothing is written in response to a request when the I{start_response} + callable is invoked. Once a non-empty string has been produced by the + iterator returned by the application object, the response status and + headers are written. + """ + channel = DummyChannel() + + intermediateValues = [] + def record(): + intermediateValues.append(channel.transport.written.getvalue()) + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', [('foo', 'bar')]) + yield '' + record() + yield 'foo' + record() + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertFalse(intermediateValues[0]) + self.assertTrue(intermediateValues[1]) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_content(self): + """ + Content produced by the iterator returned by the application object is + written to the request as it is produced. + """ + channel = DummyChannel() + + intermediateValues = [] + def record(): + intermediateValues.append(channel.transport.written.getvalue()) + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', [('content-length', '6')]) + yield 'foo' + record() + yield 'bar' + record() + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertEqual( + self.getContentFromResponse(intermediateValues[0]), + 'foo') + self.assertEqual( + self.getContentFromResponse(intermediateValues[1]), + 'foobar') + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_multipleStartResponse(self): + """ + If the I{start_response} callable is invoked multiple times before a + data for the response body is produced, the values from the last call + are used. + """ + channel = DummyChannel() + + def applicationFactory(): + def application(environ, startResponse): + startResponse('100 Foo', []) + startResponse('200 Bar', []) + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertTrue( + channel.transport.written.getvalue().startswith( + 'HTTP/1.1 200 Bar\r\n')) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_startResponseWithException(self): + """ + If the I{start_response} callable is invoked with a third positional + argument before the status and headers have been written to the + response, the status and headers become the newly supplied values. + """ + channel = DummyChannel() + + def applicationFactory(): + def application(environ, startResponse): + startResponse('100 Foo', [], (Exception, Exception("foo"), None)) + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertTrue( + channel.transport.written.getvalue().startswith( + 'HTTP/1.1 100 Foo\r\n')) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_startResponseWithExceptionTooLate(self): + """ + If the I{start_response} callable is invoked with a third positional + argument after the status and headers have been written to the + response, the supplied I{exc_info} values are re-raised to the + application. + """ + channel = DummyChannel() + + class SomeException(Exception): + pass + + try: + raise SomeException() + except: + excInfo = exc_info() + + reraised = [] + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', []) + yield 'foo' + try: + startResponse('500 ERR', [], excInfo) + except: + reraised.append(exc_info()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertTrue( + channel.transport.written.getvalue().startswith( + 'HTTP/1.1 200 OK\r\n')) + self.assertEqual(reraised[0][0], excInfo[0]) + self.assertEqual(reraised[0][1], excInfo[1]) + self.assertEqual(reraised[0][2].tb_next, excInfo[2]) + + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_write(self): + """ + I{start_response} returns the I{write} callable which can be used to + write bytes to the response body without buffering. + """ + channel = DummyChannel() + + intermediateValues = [] + def record(): + intermediateValues.append(channel.transport.written.getvalue()) + + def applicationFactory(): + def application(environ, startResponse): + write = startResponse('100 Foo', [('content-length', '6')]) + write('foo') + record() + write('bar') + record() + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertEqual( + self.getContentFromResponse(intermediateValues[0]), + 'foo') + self.assertEqual( + self.getContentFromResponse(intermediateValues[1]), + 'foobar') + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + +class ApplicationTests(WSGITestsMixin, TestCase): + """ + Tests for things which are done to the application object and the iterator + it returns. + """ + def enableThreads(self): + self.reactor = reactor + self.threadpool = ThreadPool() + self.threadpool.start() + self.addCleanup(self.threadpool.stop) + + + def test_close(self): + """ + If the application object returns an iterator which also has a I{close} + method, that method is called after iteration is complete. + """ + channel = DummyChannel() + + class Result: + def __init__(self): + self.open = True + + def __iter__(self): + for i in range(3): + if self.open: + yield str(i) + + def close(self): + self.open = False + + result = Result() + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', [('content-length', '3')]) + return result + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertEqual( + self.getContentFromResponse( + channel.transport.written.getvalue()), + '012') + self.assertFalse(result.open) + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], ['']) + + return d + + + def test_applicationCalledInThread(self): + """ + The application object is invoked and iterated in a thread which is not + the reactor thread. + """ + self.enableThreads() + invoked = [] + + def applicationFactory(): + def application(environ, startResponse): + def result(): + for i in range(3): + invoked.append(get_ident()) + yield str(i) + invoked.append(get_ident()) + startResponse('200 OK', [('content-length', '3')]) + return result() + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + self.assertNotIn(get_ident(), invoked) + self.assertEqual(len(set(invoked)), 1) + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, + DummyChannel, 'GET', '1.1', [], ['']) + + return d + + + def test_writeCalledFromThread(self): + """ + The I{write} callable returned by I{start_response} calls the request's + C{write} method in the reactor thread. + """ + self.enableThreads() + invoked = [] + + class ThreadVerifier(Request): + def write(self, bytes): + invoked.append(get_ident()) + return Request.write(self, bytes) + + def applicationFactory(): + def application(environ, startResponse): + write = startResponse('200 OK', []) + write('foo') + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory(ThreadVerifier) + def cbRendered(ignored): + self.assertEqual(set(invoked), set([get_ident()])) + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, DummyChannel, + 'GET', '1.1', [], ['']) + + return d + + + def test_iteratedValuesWrittenFromThread(self): + """ + Strings produced by the iterator returned by the application object are + written to the request in the reactor thread. + """ + self.enableThreads() + invoked = [] + + class ThreadVerifier(Request): + def write(self, bytes): + invoked.append(get_ident()) + return Request.write(self, bytes) + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', []) + yield 'foo' + return application + + d, requestFactory = self.requestFactoryFactory(ThreadVerifier) + def cbRendered(ignored): + self.assertEqual(set(invoked), set([get_ident()])) + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, DummyChannel, + 'GET', '1.1', [], ['']) + + return d + + + def test_statusWrittenFromThread(self): + """ + The response status is set on the request object in the reactor thread. + """ + self.enableThreads() + invoked = [] + + class ThreadVerifier(Request): + def setResponseCode(self, code, message): + invoked.append(get_ident()) + return Request.setResponseCode(self, code, message) + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', []) + return iter(()) + return application + + d, requestFactory = self.requestFactoryFactory(ThreadVerifier) + def cbRendered(ignored): + self.assertEqual(set(invoked), set([get_ident()])) + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, DummyChannel, + 'GET', '1.1', [], ['']) + + return d + + + def test_connectionClosedDuringIteration(self): + """ + If the request connection is lost while the application object is being + iterated, iteration is stopped. + """ + class UnreliableConnection(Request): + """ + This is a request which pretends its connection is lost immediately + after the first write is done to it. + """ + def write(self, bytes): + self.connectionLost(Failure(ConnectionLost("No more connection"))) + + self.badIter = False + def appIter(): + yield "foo" + self.badIter = True + raise Exception("Should not have gotten here") + + def applicationFactory(): + def application(environ, startResponse): + startResponse('200 OK', []) + return appIter() + return application + + d, requestFactory = self.requestFactoryFactory(UnreliableConnection) + def cbRendered(ignored): + self.assertFalse(self.badIter, "Should not have resumed iteration") + d.addCallback(cbRendered) + + self.lowLevelRender( + requestFactory, applicationFactory, DummyChannel, + 'GET', '1.1', [], ['']) + + return self.assertFailure(d, ConnectionLost) + + + def _internalServerErrorTest(self, application): + channel = DummyChannel() + + def applicationFactory(): + return application + + d, requestFactory = self.requestFactoryFactory() + def cbRendered(ignored): + errors = self.flushLoggedErrors(RuntimeError) + self.assertEquals(len(errors), 1) + + self.assertTrue( + channel.transport.written.getvalue().startswith( + 'HTTP/1.1 500 Internal Server Error')) + d.addCallback(cbRendered) + + request = self.lowLevelRender( + requestFactory, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + return d + + + def test_applicationExceptionBeforeStartResponse(self): + """ + If the application raises an exception before calling I{start_response} + then the response status is I{500} and the exception is logged. + """ + def application(environ, startResponse): + raise RuntimeError("This application had some error.") + return self._internalServerErrorTest(application) + + + def test_applicationExceptionAfterStartResponse(self): + """ + If the application calls I{start_response} but then raises an exception + before any data is written to the response then the response status is + I{500} and the exception is logged. + """ + def application(environ, startResponse): + startResponse('200 OK', []) + raise RuntimeError("This application had some error.") + return self._internalServerErrorTest(application) + + + def _connectionClosedTest(self, application, responseContent): + channel = DummyChannel() + + def applicationFactory(): + return application + + d, requestFactory = self.requestFactoryFactory() + + # Capture the request so we can disconnect it later on. + requests = [] + def requestFactoryWrapper(*a, **kw): + requests.append(requestFactory(*a, **kw)) + return requests[-1] + + def ebRendered(ignored): + errors = self.flushLoggedErrors(RuntimeError) + self.assertEquals(len(errors), 1) + + response = channel.transport.written.getvalue() + self.assertTrue(response.startswith('HTTP/1.1 200 OK')) + # Chunked transfer-encoding makes this a little messy. + self.assertIn(responseContent, response) + d.addErrback(ebRendered) + + request = self.lowLevelRender( + requestFactoryWrapper, applicationFactory, + lambda: channel, 'GET', '1.1', [], [''], None, []) + + # By now the connection should be closed. + self.assertTrue(channel.transport.disconnected) + # Give it a little push to go the rest of the way. + requests[0].connectionLost(Failure(ConnectionLost("All gone"))) + + return d + + + def test_applicationExceptionAfterWrite(self): + """ + If the application raises an exception after the response status has + already been sent then the connection is closed and the exception is + logged. + """ + responseContent = ( + 'Some bytes, triggering the server to start sending the response') + + def application(environ, startResponse): + startResponse('200 OK', []) + yield responseContent + raise RuntimeError("This application had some error.") + return self._connectionClosedTest(application, responseContent) + + + def test_applicationCloseException(self): + """ + If the application returns a closeable iterator and the C{close} method + raises an exception when called then the connection is still closed and + the exception is logged. + """ + responseContent = 'foo' + + class Application(object): + def __init__(self, environ, startResponse): + startResponse('200 OK', []) + + def __iter__(self): + yield responseContent + + def close(self): + raise RuntimeError("This application had some error.") + + return self._connectionClosedTest(Application, responseContent) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_xml.py b/vendor/Twisted-10.0.0/twisted/web/test/test_xml.py new file mode 100644 index 000000000000..3a0067b3bef3 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_xml.py @@ -0,0 +1,1105 @@ +# -*- test-case-name: twisted.web.test.test_xml -*- +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Some fairly inadequate testcases for Twisted XML support. +""" + +from twisted.trial.unittest import TestCase +from twisted.web import sux +from twisted.web import microdom +from twisted.web import domhelpers + + +class Sux0r(sux.XMLParser): + def __init__(self): + self.tokens = [] + + def getTagStarts(self): + return [token for token in self.tokens if token[0] == 'start'] + + def gotTagStart(self, name, attrs): + self.tokens.append(("start", name, attrs)) + + def gotText(self, text): + self.tokens.append(("text", text)) + +class SUXTest(TestCase): + + def testBork(self): + s = "" + ms = Sux0r() + ms.connectionMade() + ms.dataReceived(s) + self.failUnlessEqual(len(ms.getTagStarts()),3) + + +class MicroDOMTest(TestCase): + + def test_leadingTextDropping(self): + """ + Make sure that if there's no top-level node lenient-mode won't + drop leading text that's outside of any elements. + """ + s = "Hi orders!
                              Well.
                              " + d = microdom.parseString(s, beExtremelyLenient=True) + self.assertEquals(d.firstChild().toxml(), + 'Hi orders!
                              Well.
                              ') + + def test_trailingTextDropping(self): + """ + Ensure that no *trailing* text in a mal-formed + no-top-level-element document(s) will not be dropped. + """ + s = "
                              Hi orders!" + d = microdom.parseString(s, beExtremelyLenient=True) + self.assertEquals(d.firstChild().toxml(), + '
                              Hi orders!') + + + def test_noTags(self): + """ + A string with nothing that looks like a tag at all should just + be parsed as body text. + """ + s = "Hi orders!" + d = microdom.parseString(s, beExtremelyLenient=True) + self.assertEquals(d.firstChild().toxml(), + "Hi orders!") + + + def test_surroundingCrap(self): + """ + If a document is surrounded by non-xml text, the text should + be remain in the XML. + """ + s = "Hi
                              orders!" + d = microdom.parseString(s, beExtremelyLenient=True) + self.assertEquals(d.firstChild().toxml(), + "Hi
                              orders!") + + + def testCaseSensitiveSoonCloser(self): + s = """ + +

                              + +

                              + +

                              + This is an insane set of text nodes that should NOT be gathered under + the A tag above. +

                              + + """ + d = microdom.parseString(s, beExtremelyLenient=1) + l = domhelpers.findNodesNamed(d.documentElement, 'a') + n = domhelpers.gatherTextNodes(l[0],1).replace(' ',' ') + self.assertEquals(n.find('insane'), -1) + + + def test_lenientParenting(self): + """ + Test that C{parentNode} attributes are set to meaningful values when + we are parsing HTML that lacks a root node. + """ + # Spare the rod, ruin the child. + s = "

                              " + d = microdom.parseString(s, beExtremelyLenient=1) + self.assertIdentical(d.documentElement, + d.documentElement.firstChild().parentNode) + + + def test_lenientParentSingle(self): + """ + Test that the C{parentNode} attribute is set to a meaningful value + when we parse an HTML document that has a non-Element root node. + """ + s = "Hello" + d = microdom.parseString(s, beExtremelyLenient=1) + self.assertIdentical(d.documentElement, + d.documentElement.firstChild().parentNode) + + + def testUnEntities(self): + s = """ + + This HTML goes between Stupid <=CrAzY!=> Dumb. + + """ + d = microdom.parseString(s, beExtremelyLenient=1) + n = domhelpers.gatherTextNodes(d) + self.assertNotEquals(n.find('>'), -1) + + def testEmptyError(self): + self.assertRaises(sux.ParseError, microdom.parseString, "") + + def testTameDocument(self): + s = """ + + + + + test + + + + + """ + d = microdom.parseString(s) + self.assertEquals( + domhelpers.gatherTextNodes(d.documentElement).strip() ,'test') + + def testAwfulTagSoup(self): + s = """ + + I send you this message to have your advice!!!!</titl e + </headd> + + <body bgcolor alink hlink vlink> + + <h1><BLINK>SALE</blINK> TWENTY MILLION EMAILS & FUR COAT NOW + FREE WITH `ENLARGER'</h1> + + YES THIS WONDERFUL AWFER IS NOW HERER!!! + + <script LANGUAGE="javascript"> +function give_answers() { +if (score < 70) { +alert("I hate you"); +}} + </script><a href=/foo.com/lalal name=foo>lalal</a> + </body> + </HTML> + """ + d = microdom.parseString(s, beExtremelyLenient=1) + l = domhelpers.findNodesNamed(d.documentElement, 'blink') + self.assertEquals(len(l), 1) + + def testScriptLeniency(self): + s = """ + <script>(foo < bar) and (bar > foo)</script> + <script language="javascript">foo </scrip bar </script> + <script src="foo"> + <script src="foo">baz</script> + <script /><script></script> + """ + d = microdom.parseString(s, beExtremelyLenient=1) + self.assertEquals(d.firstChild().firstChild().firstChild().data, + "(foo < bar) and (bar > foo)") + self.assertEquals( + d.firstChild().getElementsByTagName("script")[1].firstChild().data, + "foo </scrip bar ") + + def testScriptLeniencyIntelligence(self): + # if there is comment or CDATA in script, the autoquoting in bEL mode + # should not happen + s = """<script><!-- lalal --></script>""" + self.assertEquals( + microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s) + s = """<script><![CDATA[lalal]]></script>""" + self.assertEquals( + microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s) + s = """<script> // <![CDATA[ + lalal + //]]></script>""" + self.assertEquals( + microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s) + + def testPreserveCase(self): + s = '<eNcApSuLaTe><sUxor></sUxor><bOrk><w00T>TeXt</W00t></BoRk></EnCaPsUlAtE>' + s2 = s.lower().replace('text', 'TeXt') + # these are the only two option permutations that *can* parse the above + d = microdom.parseString(s, caseInsensitive=1, preserveCase=1) + d2 = microdom.parseString(s, caseInsensitive=1, preserveCase=0) + # caseInsensitive=0 preserveCase=0 is not valid, it's converted to + # caseInsensitive=0 preserveCase=1 + d3 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1) + d4 = microdom.parseString(s2, caseInsensitive=1, preserveCase=0) + d5 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1) + # this is slightly contrived, toxml() doesn't need to be identical + # for the documents to be equivalent (i.e. <b></b> to <b/>), + # however this assertion tests preserving case for start and + # end tags while still matching stuff like <bOrk></BoRk> + self.assertEquals(d.documentElement.toxml(), s) + self.assert_(d.isEqualToDocument(d2), "%r != %r" % (d.toxml(), d2.toxml())) + self.assert_(d2.isEqualToDocument(d3), "%r != %r" % (d2.toxml(), d3.toxml())) + # caseInsensitive=0 on the left, NOT perserveCase=1 on the right + ## XXX THIS TEST IS TURNED OFF UNTIL SOMEONE WHO CARES ABOUT FIXING IT DOES + #self.failIf(d3.isEqualToDocument(d2), "%r == %r" % (d3.toxml(), d2.toxml())) + self.assert_(d3.isEqualToDocument(d4), "%r != %r" % (d3.toxml(), d4.toxml())) + self.assert_(d4.isEqualToDocument(d5), "%r != %r" % (d4.toxml(), d5.toxml())) + + def testDifferentQuotes(self): + s = '<test a="a" b=\'b\' />' + d = microdom.parseString(s) + e = d.documentElement + self.assertEquals(e.getAttribute('a'), 'a') + self.assertEquals(e.getAttribute('b'), 'b') + + def testLinebreaks(self): + s = '<test \na="a"\n\tb="#b" />' + d = microdom.parseString(s) + e = d.documentElement + self.assertEquals(e.getAttribute('a'), 'a') + self.assertEquals(e.getAttribute('b'), '#b') + + def testMismatchedTags(self): + for s in '<test>', '<test> </tset>', '</test>': + self.assertRaises(microdom.MismatchedTags, microdom.parseString, s) + + def testComment(self): + s = "<bar><!--<foo />--></bar>" + d = microdom.parseString(s) + e = d.documentElement + self.assertEquals(e.nodeName, "bar") + c = e.childNodes[0] + self.assert_(isinstance(c, microdom.Comment)) + self.assertEquals(c.value, "<foo />") + c2 = c.cloneNode() + self.assert_(c is not c2) + self.assertEquals(c2.toxml(), "<!--<foo />-->") + + def testText(self): + d = microdom.parseString("<bar>xxxx</bar>").documentElement + text = d.childNodes[0] + self.assert_(isinstance(text, microdom.Text)) + self.assertEquals(text.value, "xxxx") + clone = text.cloneNode() + self.assert_(clone is not text) + self.assertEquals(clone.toxml(), "xxxx") + + def testEntities(self): + nodes = microdom.parseString("<b>& AB;</b>").documentElement.childNodes + self.assertEquals(len(nodes), 2) + self.assertEquals(nodes[0].data, "&") + self.assertEquals(nodes[1].data, " AB;") + self.assertEquals(nodes[0].cloneNode().toxml(), "&") + for n in nodes: + self.assert_(isinstance(n, microdom.EntityReference)) + + def testCData(self): + s = '<x><![CDATA[</x>\r\n & foo]]></x>' + cdata = microdom.parseString(s).documentElement.childNodes[0] + self.assert_(isinstance(cdata, microdom.CDATASection)) + self.assertEquals(cdata.data, "</x>\r\n & foo") + self.assertEquals(cdata.cloneNode().toxml(), "<![CDATA[</x>\r\n & foo]]>") + + def testSingletons(self): + s = "<foo><b/><b /><b\n/></foo>" + s2 = "<foo><b/><b/><b/></foo>" + nodes = microdom.parseString(s).documentElement.childNodes + nodes2 = microdom.parseString(s2).documentElement.childNodes + self.assertEquals(len(nodes), 3) + for (n, n2) in zip(nodes, nodes2): + self.assert_(isinstance(n, microdom.Element)) + self.assertEquals(n.nodeName, "b") + self.assert_(n.isEqualToNode(n2)) + + def testAttributes(self): + s = '<foo a="b" />' + node = microdom.parseString(s).documentElement + + self.assertEquals(node.getAttribute("a"), "b") + self.assertEquals(node.getAttribute("c"), None) + self.assert_(node.hasAttribute("a")) + self.assert_(not node.hasAttribute("c")) + a = node.getAttributeNode("a") + self.assertEquals(a.value, "b") + + node.setAttribute("foo", "bar") + self.assertEquals(node.getAttribute("foo"), "bar") + + def testChildren(self): + s = "<foo><bar /><baz /><bax>foo</bax></foo>" + d = microdom.parseString(s).documentElement + self.assertEquals([n.nodeName for n in d.childNodes], ["bar", "baz", "bax"]) + self.assertEquals(d.lastChild().nodeName, "bax") + self.assertEquals(d.firstChild().nodeName, "bar") + self.assert_(d.hasChildNodes()) + self.assert_(not d.firstChild().hasChildNodes()) + + def testMutate(self): + s = "<foo />" + s1 = '<foo a="b"><bar/><foo/></foo>' + s2 = '<foo a="b">foo</foo>' + d = microdom.parseString(s).documentElement + d1 = microdom.parseString(s1).documentElement + d2 = microdom.parseString(s2).documentElement + + d.appendChild(d.cloneNode()) + d.setAttribute("a", "b") + child = d.childNodes[0] + self.assertEquals(child.getAttribute("a"), None) + self.assertEquals(child.nodeName, "foo") + + d.insertBefore(microdom.Element("bar"), child) + self.assertEquals(d.childNodes[0].nodeName, "bar") + self.assertEquals(d.childNodes[1], child) + for n in d.childNodes: + self.assertEquals(n.parentNode, d) + self.assert_(d.isEqualToNode(d1)) + + d.removeChild(child) + self.assertEquals(len(d.childNodes), 1) + self.assertEquals(d.childNodes[0].nodeName, "bar") + + t = microdom.Text("foo") + d.replaceChild(t, d.firstChild()) + self.assertEquals(d.firstChild(), t) + self.assert_(d.isEqualToNode(d2)) + + + def test_replaceNonChild(self): + """ + L{Node.replaceChild} raises L{ValueError} if the node given to be + replaced is not a child of the node C{replaceChild} is called on. + """ + parent = microdom.parseString('<foo />') + orphan = microdom.parseString('<bar />') + replacement = microdom.parseString('<baz />') + + self.assertRaises( + ValueError, parent.replaceChild, replacement, orphan) + + + def testSearch(self): + s = "<foo><bar id='me' /><baz><foo /></baz></foo>" + s2 = "<fOo><bAr id='me' /><bAz><fOO /></bAz></fOo>" + d = microdom.parseString(s) + d2 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1) + d3 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1) + + root = d.documentElement + self.assertEquals(root.firstChild(), d.getElementById('me')) + self.assertEquals(d.getElementsByTagName("foo"), + [root, root.lastChild().firstChild()]) + + root = d2.documentElement + self.assertEquals(root.firstChild(), d2.getElementById('me')) + self.assertEquals(d2.getElementsByTagName('fOo'), [root]) + self.assertEquals(d2.getElementsByTagName('fOO'), + [root.lastChild().firstChild()]) + self.assertEquals(d2.getElementsByTagName('foo'), []) + + root = d3.documentElement + self.assertEquals(root.firstChild(), d3.getElementById('me')) + self.assertEquals(d3.getElementsByTagName('FOO'), + [root, root.lastChild().firstChild()]) + self.assertEquals(d3.getElementsByTagName('fOo'), + [root, root.lastChild().firstChild()]) + + def testDoctype(self): + s = ('<?xml version="1.0"?>' + '<!DOCTYPE foo PUBLIC "baz" "http://www.example.com/example.dtd">' + '<foo></foo>') + s2 = '<foo/>' + d = microdom.parseString(s) + d2 = microdom.parseString(s2) + self.assertEquals(d.doctype, + 'foo PUBLIC "baz" "http://www.example.com/example.dtd"') + self.assertEquals(d.toxml(), s) + self.failIf(d.isEqualToDocument(d2)) + self.failUnless(d.documentElement.isEqualToNode(d2.documentElement)) + + samples = [("<img/>", "<img />"), + ("<foo A='b'>x</foo>", '<foo A="b">x</foo>'), + ("<foo><BAR /></foo>", "<foo><BAR></BAR></foo>"), + ("<foo>hello there & yoyoy</foo>", + "<foo>hello there & yoyoy</foo>"), + ] + + def testOutput(self): + for s, out in self.samples: + d = microdom.parseString(s, caseInsensitive=0) + d2 = microdom.parseString(out, caseInsensitive=0) + testOut = d.documentElement.toxml() + self.assertEquals(out, testOut) + self.assert_(d.isEqualToDocument(d2)) + + def testErrors(self): + for s in ["<foo>&am</foo>", "<foo", "<f>&</f>", "<() />"]: + self.assertRaises(Exception, microdom.parseString, s) + + def testCaseInsensitive(self): + s = "<foo a='b'><BAx>x</bax></FOO>" + s2 = '<foo a="b"><bax>x</bax></foo>' + s3 = "<FOO a='b'><BAx>x</BAx></FOO>" + s4 = "<foo A='b'>x</foo>" + d = microdom.parseString(s) + d2 = microdom.parseString(s2) + d3 = microdom.parseString(s3, caseInsensitive=1) + d4 = microdom.parseString(s4, caseInsensitive=1, preserveCase=1) + d5 = microdom.parseString(s4, caseInsensitive=1, preserveCase=0) + d6 = microdom.parseString(s4, caseInsensitive=0, preserveCase=0) + out = microdom.parseString(s).documentElement.toxml() + self.assertRaises(microdom.MismatchedTags, microdom.parseString, + s, caseInsensitive=0) + self.assertEquals(out, s2) + self.failUnless(d.isEqualToDocument(d2)) + self.failUnless(d.isEqualToDocument(d3)) + self.failUnless(d4.documentElement.hasAttribute('a')) + self.failIf(d6.documentElement.hasAttribute('a')) + self.assertEquals(d4.documentElement.toxml(), '<foo A="b">x</foo>') + self.assertEquals(d5.documentElement.toxml(), '<foo a="b">x</foo>') + def testEatingWhitespace(self): + s = """<hello> + </hello>""" + d = microdom.parseString(s) + self.failUnless(not d.documentElement.hasChildNodes(), + d.documentElement.childNodes) + self.failUnless(d.isEqualToDocument(microdom.parseString('<hello></hello>'))) + + def testLenientAmpersand(self): + prefix = "<?xml version='1.0'?>" + # we use <pre> so space will be preserved + for i, o in [("&", "&"), + ("& ", "& "), + ("&", "&"), + ("&hello monkey", "&hello monkey")]: + d = microdom.parseString("%s<pre>%s</pre>" + % (prefix, i), beExtremelyLenient=1) + self.assertEquals(d.documentElement.toxml(), "<pre>%s</pre>" % o) + # non-space preserving + d = microdom.parseString("<t>hello & there</t>", beExtremelyLenient=1) + self.assertEquals(d.documentElement.toxml(), "<t>hello & there</t>") + + def testInsensitiveLenient(self): + # testing issue #537 + d = microdom.parseString( + "<?xml version='1.0'?><bar><xA><y>c</Xa> <foo></bar>", + beExtremelyLenient=1) + self.assertEquals(d.documentElement.firstChild().toxml(), "<xa><y>c</y></xa>") + + def testLaterCloserSimple(self): + s = "<ul><li>foo<li>bar<li>baz</ul>" + d = microdom.parseString(s, beExtremelyLenient=1) + expected = "<ul><li>foo</li><li>bar</li><li>baz</li></ul>" + actual = d.documentElement.toxml() + self.assertEquals(expected, actual) + + def testLaterCloserCaseInsensitive(self): + s = "<DL><p><DT>foo<DD>bar</DL>" + d = microdom.parseString(s, beExtremelyLenient=1) + expected = "<dl><p></p><dt>foo</dt><dd>bar</dd></dl>" + actual = d.documentElement.toxml() + self.assertEquals(expected, actual) + + def testLaterCloserTable(self): + s = ("<table>" + "<tr><th>name<th>value<th>comment" + "<tr><th>this<td>tag<td>soup" + "<tr><th>must<td>be<td>handled" + "</table>") + expected = ("<table>" + "<tr><th>name</th><th>value</th><th>comment</th></tr>" + "<tr><th>this</th><td>tag</td><td>soup</td></tr>" + "<tr><th>must</th><td>be</td><td>handled</td></tr>" + "</table>") + d = microdom.parseString(s, beExtremelyLenient=1) + actual = d.documentElement.toxml() + self.assertEquals(expected, actual) + testLaterCloserTable.todo = "Table parsing needs to be fixed." + + def testLaterCloserDL(self): + s = ("<dl>" + "<dt>word<dd>definition" + "<dt>word<dt>word<dd>definition<dd>definition" + "</dl>") + expected = ("<dl>" + "<dt>word</dt><dd>definition</dd>" + "<dt>word</dt><dt>word</dt><dd>definition</dd><dd>definition</dd>" + "</dl>") + d = microdom.parseString(s, beExtremelyLenient=1) + actual = d.documentElement.toxml() + self.assertEquals(expected, actual) + + def testLaterCloserDL2(self): + s = ("<dl>" + "<dt>word<dd>definition<p>more definition" + "<dt>word" + "</dl>") + expected = ("<dl>" + "<dt>word</dt><dd>definition<p>more definition</p></dd>" + "<dt>word</dt>" + "</dl>") + d = microdom.parseString(s, beExtremelyLenient=1) + actual = d.documentElement.toxml() + self.assertEquals(expected, actual) + + testLaterCloserDL2.todo = "unclosed <p> messes it up." + + def testUnicodeTolerance(self): + import struct + s = '<foo><bar><baz /></bar></foo>' + j =(u'<?xml version="1.0" encoding="UCS-2" ?>\r\n<JAPANESE>\r\n' + u'<TITLE>\u5c02\u9580\u5bb6\u30ea\u30b9\u30c8 ') + j2=('\xff\xfe<\x00?\x00x\x00m\x00l\x00 \x00v\x00e\x00r\x00s\x00i\x00o' + '\x00n\x00=\x00"\x001\x00.\x000\x00"\x00 \x00e\x00n\x00c\x00o\x00d' + '\x00i\x00n\x00g\x00=\x00"\x00U\x00C\x00S\x00-\x002\x00"\x00 \x00?' + '\x00>\x00\r\x00\n\x00<\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E' + '\x00>\x00\r\x00\n\x00<\x00T\x00I\x00T\x00L\x00E\x00>\x00\x02\\' + '\x80\x95\xb6[\xea0\xb90\xc80 \x00<\x00/\x00T\x00I\x00T\x00L\x00E' + '\x00>\x00<\x00/\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E\x00>\x00') + def reverseBytes(s): + fmt = str(len(s) / 2) + 'H' + return struct.pack('<' + fmt, *struct.unpack('>' + fmt, s)) + urd = microdom.parseString(reverseBytes(s.encode('UTF-16'))) + ud = microdom.parseString(s.encode('UTF-16')) + sd = microdom.parseString(s) + self.assert_(ud.isEqualToDocument(sd)) + self.assert_(ud.isEqualToDocument(urd)) + ud = microdom.parseString(j) + urd = microdom.parseString(reverseBytes(j2)) + sd = microdom.parseString(j2) + self.assert_(ud.isEqualToDocument(sd)) + self.assert_(ud.isEqualToDocument(urd)) + + # test that raw text still gets encoded + # test that comments get encoded + j3=microdom.parseString(u'') + hdr='' + div=microdom.lmx().text(u'\u221a', raw=1).node + de=j3.documentElement + de.appendChild(div) + de.appendChild(j3.createComment(u'\u221a')) + self.assertEquals(j3.toxml(), hdr+ + u'
                              \u221a
                              '.encode('utf8')) + + def testNamedChildren(self): + tests = {"asdfadsf" + "" : 3, + 'asdf' : 0, + '' : 1, + } + for t in tests.keys(): + node = microdom.parseString(t).documentElement + result = domhelpers.namedChildren(node, 'bar') + self.assertEquals(len(result), tests[t]) + if result: + self.assert_(hasattr(result[0], 'tagName')) + + def testCloneNode(self): + s = 'x' + node = microdom.parseString(s).documentElement + clone = node.cloneNode(deep=1) + self.failIfEquals(node, clone) + self.assertEquals(len(node.childNodes), len(clone.childNodes)) + c1, c2 = node.firstChild(), clone.firstChild() + self.failIfEquals(c1, c2) + self.assertEquals(len(c1.childNodes), len(c2.childNodes)) + self.failIfEquals(c1.firstChild(), c2.firstChild()) + self.assertEquals(s, clone.toxml()) + self.assertEquals(node.namespace, clone.namespace) + + def testCloneDocument(self): + s = ('' + '') + + node = microdom.parseString(s) + clone = node.cloneNode(deep=1) + self.failIfEquals(node, clone) + self.assertEquals(len(node.childNodes), len(clone.childNodes)) + self.assertEquals(s, clone.toxml()) + + self.failUnless(clone.isEqualToDocument(node)) + self.failUnless(node.isEqualToDocument(clone)) + + + def testLMX(self): + n = microdom.Element("p") + lmx = microdom.lmx(n) + lmx.text("foo") + b = lmx.b(a="c") + b.foo()["z"] = "foo" + b.foo() + b.add("bar", c="y") + + s = '

                              foo

                              ' + self.assertEquals(s, n.toxml()) + + def testDict(self): + n = microdom.Element("p") + d = {n : 1} # will fail if Element is unhashable + + def testEscaping(self): + # issue 590 + raw = "&'some \"stuff\"', " + cooked = "&'some "stuff"', <what up?>" + esc1 = microdom.escape(raw) + self.assertEquals(esc1, cooked) + self.assertEquals(microdom.unescape(esc1), raw) + + def testNamespaces(self): + s = ''' + + + + here is some space + + + + ''' + d = microdom.parseString(s) + # at least make sure it doesn't traceback + s2 = d.toprettyxml() + self.assertEquals(d.documentElement.namespace, + "base") + self.assertEquals(d.documentElement.getElementsByTagName("y")[0].namespace, + "base") + self.assertEquals( + d.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'), + '1') + + d2 = microdom.parseString(s2) + self.assertEquals(d2.documentElement.namespace, + "base") + self.assertEquals(d2.documentElement.getElementsByTagName("y")[0].namespace, + "base") + self.assertEquals( + d2.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'), + '1') + + def testNamespaceDelete(self): + """ + Test that C{toxml} can support xml structures that remove namespaces. + """ + s1 = ('' + '') + s2 = microdom.parseString(s1).toxml() + self.assertEquals(s1, s2) + + def testNamespaceInheritance(self): + """ + Check that unspecified namespace is a thing separate from undefined + namespace. This test added after discovering some weirdness in Lore. + """ + # will only work if childNodes is mutated. not sure why. + child = microdom.Element('ol') + parent = microdom.Element('div', namespace='http://www.w3.org/1999/xhtml') + parent.childNodes = [child] + self.assertEquals(parent.toxml(), + '
                                ') + + def test_prefixedTags(self): + """ + XML elements with a prefixed name as per upper level tag definition + have a start-tag of C{""} and an end-tag of + C{""}. + + Refer to U{http://www.w3.org/TR/xml-names/#ns-using} for details. + """ + outerNamespace = "http://example.com/outer" + innerNamespace = "http://example.com/inner" + + document = microdom.Document() + # Create the root in one namespace. Microdom will probably make this + # the default namespace. + root = document.createElement("root", namespace=outerNamespace) + + # Give the root some prefixes to use. + root.addPrefixes({innerNamespace: "inner"}) + + # Append a child to the root from the namespace that prefix is bound + # to. + tag = document.createElement("tag", namespace=innerNamespace) + + # Give that tag a child too. This way we test rendering of tags with + # children and without children. + child = document.createElement("child", namespace=innerNamespace) + + tag.appendChild(child) + root.appendChild(tag) + document.appendChild(root) + + # ok, the xml should appear like this + xmlOk = ( + '' + '' + '' + '') + + xmlOut = document.toxml() + self.assertEquals(xmlOut, xmlOk) + + + def test_prefixPropagation(self): + """ + Children of prefixed tags respect the default namespace at the point + where they are rendered. Specifically, they are not influenced by the + prefix of their parent as that prefix has no bearing on them. + + See U{http://www.w3.org/TR/xml-names/#scoping} for details. + + To further clarify the matter, the following:: + + + + + + + + + + Should become this after all the namespace declarations have been + I{moved up}:: + + + + + + + + + """ + outerNamespace = "http://example.com/outer" + innerNamespace = "http://example.com/inner" + + document = microdom.Document() + # creates a root element + root = document.createElement("root", namespace=outerNamespace) + document.appendChild(root) + + # Create a child with a specific namespace with a prefix bound to it. + root.addPrefixes({innerNamespace: "inner"}) + mytag = document.createElement("mytag",namespace=innerNamespace) + root.appendChild(mytag) + + # Create a child of that which has the outer namespace. + mysubtag = document.createElement("mysubtag", namespace=outerNamespace) + mytag.appendChild(mysubtag) + + xmlOk = ( + '' + '' + '' + '' + '' + '' + ) + xmlOut = document.toxml() + self.assertEquals(xmlOut, xmlOk) + + + +class TestBrokenHTML(TestCase): + """ + Tests for when microdom encounters very bad HTML and C{beExtremelyLenient} + is enabled. These tests are inspired by some HTML generated in by a mailer, + which breaks up very long lines by splitting them with '!\n '. The expected + behaviour is loosely modelled on the way Firefox treats very bad HTML. + """ + + def checkParsed(self, input, expected, beExtremelyLenient=1): + """ + Check that C{input}, when parsed, produces a DOM where the XML + of the document element is equal to C{expected}. + """ + output = microdom.parseString(input, + beExtremelyLenient=beExtremelyLenient) + self.assertEquals(output.documentElement.toxml(), expected) + + + def test_brokenAttributeName(self): + """ + Check that microdom does its best to handle broken attribute names. + The important thing is that it doesn't raise an exception. + """ + input = '

                                Foo

                                ' + expected = ('

                                ' + 'Foo

                                ') + self.checkParsed(input, expected) + + + def test_brokenAttributeValue(self): + """ + Check that microdom encompasses broken attribute values. + """ + input = '

                                Foo

                                ' + expected = '

                                Foo

                                ' + self.checkParsed(input, expected) + + + def test_brokenOpeningTag(self): + """ + Check that microdom does its best to handle broken opening tags. + The important thing is that it doesn't raise an exception. + """ + input = '

                                Hello World!

                                ' + expected = '

                                Hello World!

                                ' + self.checkParsed(input, expected) + + + def test_brokenSelfClosingTag(self): + """ + Check that microdom does its best to handle broken self-closing tags + The important thing is that it doesn't raise an exception. + """ + self.checkParsed('', + '') + self.checkParsed('', '') + + + def test_brokenClosingTag(self): + """ + Check that microdom does its best to handle broken closing tags. + The important thing is that it doesn't raise an exception. + """ + input = '

                                Hello World!

                                ' + expected = '

                                Hello World!

                                ' + self.checkParsed(input, expected) + input = '

                                Hello World!

                                ' + self.checkParsed(input, expected) + input = '

                                Hello World!

                                ' + self.checkParsed(input, expected) + input = '

                                Hello World!

                                ' + expected = '

                                Hello World!

                                ' + self.checkParsed(input, expected) + + + + +class NodeTests(TestCase): + """ + Tests for L{Node}. + """ + def test_isNodeEqualTo(self): + """ + L{Node.isEqualToNode} returns C{True} if and only if passed a L{Node} + with the same children. + """ + # A node is equal to itself + node = microdom.Node(object()) + self.assertTrue(node.isEqualToNode(node)) + another = microdom.Node(object()) + # Two nodes with no children are equal + self.assertTrue(node.isEqualToNode(another)) + node.appendChild(microdom.Node(object())) + # A node with no children is not equal to a node with a child + self.assertFalse(node.isEqualToNode(another)) + another.appendChild(microdom.Node(object())) + # A node with a child and no grandchildren is equal to another node + # with a child and no grandchildren. + self.assertTrue(node.isEqualToNode(another)) + # A node with a child and a grandchild is not equal to another node + # with a child and no grandchildren. + node.firstChild().appendChild(microdom.Node(object())) + self.assertFalse(node.isEqualToNode(another)) + # A node with a child and a grandchild is equal to another node with a + # child and a grandchild. + another.firstChild().appendChild(microdom.Node(object())) + self.assertTrue(node.isEqualToNode(another)) + + def test_validChildInstance(self): + """ + Children of L{Node} instances must also be L{Node} instances. + """ + node = microdom.Node() + child = microdom.Node() + # Node.appendChild() only accepts Node instances. + node.appendChild(child) + self.assertRaises(TypeError, node.appendChild, None) + # Node.insertBefore() only accepts Node instances. + self.assertRaises(TypeError, node.insertBefore, child, None) + self.assertRaises(TypeError, node.insertBefore, None, child) + self.assertRaises(TypeError, node.insertBefore, None, None) + # Node.removeChild() only accepts Node instances. + node.removeChild(child) + self.assertRaises(TypeError, node.removeChild, None) + # Node.replaceChild() only accepts Node instances. + self.assertRaises(TypeError, node.replaceChild, child, None) + self.assertRaises(TypeError, node.replaceChild, None, child) + self.assertRaises(TypeError, node.replaceChild, None, None) + + +class DocumentTests(TestCase): + """ + Tests for L{Document}. + """ + doctype = 'foo PUBLIC "baz" "http://www.example.com/example.dtd"' + + def test_isEqualToNode(self): + """ + L{Document.isEqualToNode} returns C{True} if and only if passed a + L{Document} with the same C{doctype} and C{documentElement}. + """ + # A document is equal to itself + document = microdom.Document() + self.assertTrue(document.isEqualToNode(document)) + # A document without a doctype or documentElement is equal to another + # document without a doctype or documentElement. + another = microdom.Document() + self.assertTrue(document.isEqualToNode(another)) + # A document with a doctype is not equal to a document without a + # doctype. + document.doctype = self.doctype + self.assertFalse(document.isEqualToNode(another)) + # Two documents with the same doctype are equal + another.doctype = self.doctype + self.assertTrue(document.isEqualToNode(another)) + # A document with a documentElement is not equal to a document without + # a documentElement + document.appendChild(microdom.Node(object())) + self.assertFalse(document.isEqualToNode(another)) + # Two documents with equal documentElements are equal. + another.appendChild(microdom.Node(object())) + self.assertTrue(document.isEqualToNode(another)) + # Two documents with documentElements which are not equal are not + # equal. + document.documentElement.appendChild(microdom.Node(object())) + self.assertFalse(document.isEqualToNode(another)) + + + def test_childRestriction(self): + """ + L{Document.appendChild} raises L{ValueError} if the document already + has a child. + """ + document = microdom.Document() + child = microdom.Node() + another = microdom.Node() + document.appendChild(child) + self.assertRaises(ValueError, document.appendChild, another) + + + +class EntityReferenceTests(TestCase): + """ + Tests for L{EntityReference}. + """ + def test_isEqualToNode(self): + """ + L{EntityReference.isEqualToNode} returns C{True} if and only if passed + a L{EntityReference} with the same C{eref}. + """ + self.assertTrue( + microdom.EntityReference('quot').isEqualToNode( + microdom.EntityReference('quot'))) + self.assertFalse( + microdom.EntityReference('quot').isEqualToNode( + microdom.EntityReference('apos'))) + + + +class CharacterDataTests(TestCase): + """ + Tests for L{CharacterData}. + """ + def test_isEqualToNode(self): + """ + L{CharacterData.isEqualToNode} returns C{True} if and only if passed a + L{CharacterData} with the same value. + """ + self.assertTrue( + microdom.CharacterData('foo').isEqualToNode( + microdom.CharacterData('foo'))) + self.assertFalse( + microdom.CharacterData('foo').isEqualToNode( + microdom.CharacterData('bar'))) + + + +class CommentTests(TestCase): + """ + Tests for L{Comment}. + """ + def test_isEqualToNode(self): + """ + L{Comment.isEqualToNode} returns C{True} if and only if passed a + L{Comment} with the same value. + """ + self.assertTrue( + microdom.Comment('foo').isEqualToNode( + microdom.Comment('foo'))) + self.assertFalse( + microdom.Comment('foo').isEqualToNode( + microdom.Comment('bar'))) + + + +class TextTests(TestCase): + """ + Tests for L{Text}. + """ + def test_isEqualToNode(self): + """ + L{Text.isEqualToNode} returns C{True} if and only if passed a L{Text} + which represents the same data. + """ + self.assertTrue( + microdom.Text('foo', raw=True).isEqualToNode( + microdom.Text('foo', raw=True))) + self.assertFalse( + microdom.Text('foo', raw=True).isEqualToNode( + microdom.Text('foo', raw=False))) + self.assertFalse( + microdom.Text('foo', raw=True).isEqualToNode( + microdom.Text('bar', raw=True))) + + + +class CDATASectionTests(TestCase): + """ + Tests for L{CDATASection}. + """ + def test_isEqualToNode(self): + """ + L{CDATASection.isEqualToNode} returns C{True} if and only if passed a + L{CDATASection} which represents the same data. + """ + self.assertTrue( + microdom.CDATASection('foo').isEqualToNode( + microdom.CDATASection('foo'))) + self.assertFalse( + microdom.CDATASection('foo').isEqualToNode( + microdom.CDATASection('bar'))) + + + +class ElementTests(TestCase): + """ + Tests for L{Element}. + """ + def test_isEqualToNode(self): + """ + L{Element.isEqualToNode} returns C{True} if and only if passed a + L{Element} with the same C{nodeName}, C{namespace}, C{childNodes}, and + C{attributes}. + """ + self.assertTrue( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='bar'))) + + # Elements with different nodeName values do not compare equal. + self.assertFalse( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode( + microdom.Element( + 'bar', {'a': 'b'}, object(), namespace='bar'))) + + # Elements with different namespaces do not compare equal. + self.assertFalse( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='baz'))) + + # Elements with different childNodes do not compare equal. + one = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar') + two = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar') + two.appendChild(microdom.Node(object())) + self.assertFalse(one.isEqualToNode(two)) + + # Elements with different attributes do not compare equal. + self.assertFalse( + microdom.Element( + 'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode( + microdom.Element( + 'foo', {'a': 'c'}, object(), namespace='bar'))) diff --git a/vendor/Twisted-10.0.0/twisted/web/test/test_xmlrpc.py b/vendor/Twisted-10.0.0/twisted/web/test/test_xmlrpc.py new file mode 100644 index 000000000000..0d1b3618e3d9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/test/test_xmlrpc.py @@ -0,0 +1,510 @@ +# -*- test-case-name: twisted.web.test.test_xmlrpc -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for XML-RPC support in L{twisted.web.xmlrpc}. +""" + +import xmlrpclib + +from twisted.trial import unittest +from twisted.web import xmlrpc +from twisted.web.xmlrpc import XMLRPC, addIntrospection, _QueryFactory +from twisted.web import server, static, client, error, http +from twisted.internet import reactor, defer +from twisted.internet.error import ConnectionDone +from twisted.python import failure + + +class TestRuntimeError(RuntimeError): + pass + +class TestValueError(ValueError): + pass + + + +class Test(XMLRPC): + + # If you add xmlrpc_ methods to this class, go change test_listMethods + # below. + + FAILURE = 666 + NOT_FOUND = 23 + SESSION_EXPIRED = 42 + + def xmlrpc_echo(self, arg): + return arg + + # the doc string is part of the test + def xmlrpc_add(self, a, b): + """ + This function add two numbers. + """ + return a + b + + xmlrpc_add.signature = [['int', 'int', 'int'], + ['double', 'double', 'double']] + + # the doc string is part of the test + def xmlrpc_pair(self, string, num): + """ + This function puts the two arguments in an array. + """ + return [string, num] + + xmlrpc_pair.signature = [['array', 'string', 'int']] + + # the doc string is part of the test + def xmlrpc_defer(self, x): + """Help for defer.""" + return defer.succeed(x) + + def xmlrpc_deferFail(self): + return defer.fail(TestValueError()) + + # don't add a doc string, it's part of the test + def xmlrpc_fail(self): + raise TestRuntimeError + + def xmlrpc_fault(self): + return xmlrpc.Fault(12, "hello") + + def xmlrpc_deferFault(self): + return defer.fail(xmlrpc.Fault(17, "hi")) + + def xmlrpc_complex(self): + return {"a": ["b", "c", 12, []], "D": "foo"} + + def xmlrpc_dict(self, map, key): + return map[key] + xmlrpc_dict.help = 'Help for dict.' + + def _getFunction(self, functionPath): + try: + return XMLRPC._getFunction(self, functionPath) + except xmlrpc.NoSuchFunction: + if functionPath.startswith("SESSION"): + raise xmlrpc.Fault(self.SESSION_EXPIRED, + "Session non-existant/expired.") + else: + raise + + +class TestAuthHeader(Test): + """ + This is used to get the header info so that we can test + authentication. + """ + def __init__(self): + Test.__init__(self) + self.request = None + + def render(self, request): + self.request = request + return Test.render(self, request) + + def xmlrpc_authinfo(self): + return self.request.getUser(), self.request.getPassword() + + +class TestQueryProtocol(xmlrpc.QueryProtocol): + """ + QueryProtocol for tests that saves headers received inside the factory. + """ + def handleHeader(self, key, val): + self.factory.headers[key.lower()] = val + + +class TestQueryFactory(xmlrpc._QueryFactory): + """ + QueryFactory using L{TestQueryProtocol} for saving headers. + """ + protocol = TestQueryProtocol + + def __init__(self, *args, **kwargs): + self.headers = {} + xmlrpc._QueryFactory.__init__(self, *args, **kwargs) + + +class XMLRPCTestCase(unittest.TestCase): + + def setUp(self): + self.p = reactor.listenTCP(0, server.Site(Test()), + interface="127.0.0.1") + self.port = self.p.getHost().port + self.factories = [] + + def tearDown(self): + self.factories = [] + return self.p.stopListening() + + def queryFactory(self, *args, **kwargs): + """ + Specific queryFactory for proxy that uses our custom + L{TestQueryFactory}, and save factories. + """ + factory = TestQueryFactory(*args, **kwargs) + self.factories.append(factory) + return factory + + def proxy(self): + p = xmlrpc.Proxy("http://127.0.0.1:%d/" % self.port) + p.queryFactory = self.queryFactory + return p + + def test_results(self): + inputOutput = [ + ("add", (2, 3), 5), + ("defer", ("a",), "a"), + ("dict", ({"a": 1}, "a"), 1), + ("pair", ("a", 1), ["a", 1]), + ("complex", (), {"a": ["b", "c", 12, []], "D": "foo"})] + + dl = [] + for meth, args, outp in inputOutput: + d = self.proxy().callRemote(meth, *args) + d.addCallback(self.assertEquals, outp) + dl.append(d) + return defer.DeferredList(dl, fireOnOneErrback=True) + + def test_errors(self): + """ + Verify that for each way a method exposed via XML-RPC can fail, the + correct 'Content-type' header is set in the response and that the + client-side Deferred is errbacked with an appropriate C{Fault} + instance. + """ + dl = [] + for code, methodName in [(666, "fail"), (666, "deferFail"), + (12, "fault"), (23, "noSuchMethod"), + (17, "deferFault"), (42, "SESSION_TEST")]: + d = self.proxy().callRemote(methodName) + d = self.assertFailure(d, xmlrpc.Fault) + d.addCallback(lambda exc, code=code: + self.assertEquals(exc.faultCode, code)) + dl.append(d) + d = defer.DeferredList(dl, fireOnOneErrback=True) + def cb(ign): + for factory in self.factories: + self.assertEquals(factory.headers['content-type'], + 'text/xml') + self.flushLoggedErrors(TestRuntimeError, TestValueError) + d.addCallback(cb) + return d + + def test_errorGet(self): + """ + A classic GET on the xml server should return a NOT_ALLOWED. + """ + d = client.getPage("http://127.0.0.1:%d/" % (self.port,)) + d = self.assertFailure(d, error.Error) + d.addCallback( + lambda exc: self.assertEquals(int(exc.args[0]), http.NOT_ALLOWED)) + return d + + def test_errorXMLContent(self): + """ + Test that an invalid XML input returns an L{xmlrpc.Fault}. + """ + d = client.getPage("http://127.0.0.1:%d/" % (self.port,), + method="POST", postdata="foo") + def cb(result): + self.assertRaises(xmlrpc.Fault, xmlrpclib.loads, result) + d.addCallback(cb) + return d + + + def test_datetimeRoundtrip(self): + """ + If an L{xmlrpclib.DateTime} is passed as an argument to an XML-RPC + call and then returned by the server unmodified, the result should + be equal to the original object. + """ + when = xmlrpclib.DateTime() + d = self.proxy().callRemote("echo", when) + d.addCallback(self.assertEqual, when) + return d + + + def test_doubleEncodingError(self): + """ + If it is not possible to encode a response to the request (for example, + because L{xmlrpclib.dumps} raises an exception when encoding a + L{Fault}) the exception which prevents the response from being + generated is logged and the request object is finished anyway. + """ + d = self.proxy().callRemote("echo", "") + + # *Now* break xmlrpclib.dumps. Hopefully the client already used it. + def fakeDumps(*args, **kwargs): + raise RuntimeError("Cannot encode anything at all!") + self.patch(xmlrpclib, 'dumps', fakeDumps) + + # It doesn't matter how it fails, so long as it does. Also, it happens + # to fail with an implementation detail exception right now, not + # something suitable as part of a public interface. + d = self.assertFailure(d, Exception) + + def cbFailed(ignored): + # The fakeDumps exception should have been logged. + self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1) + d.addCallback(cbFailed) + return d + + + +class XMLRPCTestCase2(XMLRPCTestCase): + """ + Test with proxy that doesn't add a slash. + """ + + def proxy(self): + p = xmlrpc.Proxy("http://127.0.0.1:%d" % self.port) + p.queryFactory = self.queryFactory + return p + + + +class XMLRPCAllowNoneTestCase(unittest.TestCase): + """ + Test with allowNone set to True. + + These are not meant to be exhaustive serialization tests, since + L{xmlrpclib} does all of the actual serialization work. They are just + meant to exercise a few codepaths to make sure we are calling into + xmlrpclib correctly. + """ + + def setUp(self): + self.p = reactor.listenTCP( + 0, server.Site(Test(allowNone=True)), interface="127.0.0.1") + self.port = self.p.getHost().port + + + def tearDown(self): + return self.p.stopListening() + + + def proxy(self): + return xmlrpc.Proxy("http://127.0.0.1:%d" % (self.port,), + allowNone=True) + + + def test_deferredNone(self): + """ + Test that passing a C{None} as an argument to a remote method and + returning a L{Deferred} which fires with C{None} properly passes + over the network if allowNone is set to True. + """ + d = self.proxy().callRemote('defer', None) + d.addCallback(self.assertEquals, None) + return d + + + def test_dictWithNoneValue(self): + """ + Test that return a C{dict} with C{None} as a value works properly. + """ + d = self.proxy().callRemote('defer', {'a': None}) + d.addCallback(self.assertEquals, {'a': None}) + return d + + + +class XMLRPCTestAuthenticated(XMLRPCTestCase): + """ + Test with authenticated proxy. We run this with the same inout/ouput as + above. + """ + user = "username" + password = "asecret" + + def setUp(self): + self.p = reactor.listenTCP(0, server.Site(TestAuthHeader()), + interface="127.0.0.1") + self.port = self.p.getHost().port + self.factories = [] + + + def test_authInfoInURL(self): + p = xmlrpc.Proxy("http://%s:%s@127.0.0.1:%d/" % ( + self.user, self.password, self.port)) + d = p.callRemote("authinfo") + d.addCallback(self.assertEquals, [self.user, self.password]) + return d + + + def test_explicitAuthInfo(self): + p = xmlrpc.Proxy("http://127.0.0.1:%d/" % ( + self.port,), self.user, self.password) + d = p.callRemote("authinfo") + d.addCallback(self.assertEquals, [self.user, self.password]) + return d + + + def test_explicitAuthInfoOverride(self): + p = xmlrpc.Proxy("http://wrong:info@127.0.0.1:%d/" % ( + self.port,), self.user, self.password) + d = p.callRemote("authinfo") + d.addCallback(self.assertEquals, [self.user, self.password]) + return d + + +class XMLRPCTestIntrospection(XMLRPCTestCase): + + def setUp(self): + xmlrpc = Test() + addIntrospection(xmlrpc) + self.p = reactor.listenTCP(0, server.Site(xmlrpc),interface="127.0.0.1") + self.port = self.p.getHost().port + self.factories = [] + + def test_listMethods(self): + + def cbMethods(meths): + meths.sort() + self.assertEqual( + meths, + ['add', 'complex', 'defer', 'deferFail', + 'deferFault', 'dict', 'echo', 'fail', 'fault', + 'pair', 'system.listMethods', + 'system.methodHelp', + 'system.methodSignature']) + + d = self.proxy().callRemote("system.listMethods") + d.addCallback(cbMethods) + return d + + def test_methodHelp(self): + inputOutputs = [ + ("defer", "Help for defer."), + ("fail", ""), + ("dict", "Help for dict.")] + + dl = [] + for meth, expected in inputOutputs: + d = self.proxy().callRemote("system.methodHelp", meth) + d.addCallback(self.assertEquals, expected) + dl.append(d) + return defer.DeferredList(dl, fireOnOneErrback=True) + + def test_methodSignature(self): + inputOutputs = [ + ("defer", ""), + ("add", [['int', 'int', 'int'], + ['double', 'double', 'double']]), + ("pair", [['array', 'string', 'int']])] + + dl = [] + for meth, expected in inputOutputs: + d = self.proxy().callRemote("system.methodSignature", meth) + d.addCallback(self.assertEquals, expected) + dl.append(d) + return defer.DeferredList(dl, fireOnOneErrback=True) + + +class XMLRPCClientErrorHandling(unittest.TestCase): + """ + Test error handling on the xmlrpc client. + """ + def setUp(self): + self.resource = static.Data( + "This text is not a valid XML-RPC response.", + "text/plain") + self.resource.isLeaf = True + self.port = reactor.listenTCP(0, server.Site(self.resource), + interface='127.0.0.1') + + def tearDown(self): + return self.port.stopListening() + + def test_erroneousResponse(self): + """ + Test that calling the xmlrpc client on a static http server raises + an exception. + """ + proxy = xmlrpc.Proxy("http://127.0.0.1:%d/" % + (self.port.getHost().port,)) + return self.assertFailure(proxy.callRemote("someMethod"), Exception) + + + +class TestQueryFactoryParseResponse(unittest.TestCase): + """ + Test the behaviour of L{_QueryFactory.parseResponse}. + """ + + def setUp(self): + # The _QueryFactory that we are testing. We don't care about any + # of the constructor parameters. + self.queryFactory = _QueryFactory( + path=None, host=None, method='POST', user=None, password=None, + allowNone=False, args=()) + # An XML-RPC response that will parse without raising an error. + self.goodContents = xmlrpclib.dumps(('',)) + # An 'XML-RPC response' that will raise a parsing error. + self.badContents = 'invalid xml' + # A dummy 'reason' to pass to clientConnectionLost. We don't care + # what it is. + self.reason = failure.Failure(ConnectionDone()) + + + def test_parseResponseCallbackSafety(self): + """ + We can safely call L{_QueryFactory.clientConnectionLost} as a callback + of L{_QueryFactory.parseResponse}. + """ + d = self.queryFactory.deferred + # The failure mode is that this callback raises an AlreadyCalled + # error. We have to add it now so that it gets called synchronously + # and triggers the race condition. + d.addCallback(self.queryFactory.clientConnectionLost, self.reason) + self.queryFactory.parseResponse(self.goodContents) + return d + + + def test_parseResponseErrbackSafety(self): + """ + We can safely call L{_QueryFactory.clientConnectionLost} as an errback + of L{_QueryFactory.parseResponse}. + """ + d = self.queryFactory.deferred + # The failure mode is that this callback raises an AlreadyCalled + # error. We have to add it now so that it gets called synchronously + # and triggers the race condition. + d.addErrback(self.queryFactory.clientConnectionLost, self.reason) + self.queryFactory.parseResponse(self.badContents) + return d + + + def test_badStatusErrbackSafety(self): + """ + We can safely call L{_QueryFactory.clientConnectionLost} as an errback + of L{_QueryFactory.badStatus}. + """ + d = self.queryFactory.deferred + # The failure mode is that this callback raises an AlreadyCalled + # error. We have to add it now so that it gets called synchronously + # and triggers the race condition. + d.addErrback(self.queryFactory.clientConnectionLost, self.reason) + self.queryFactory.badStatus('status', 'message') + return d + + def test_parseResponseWithoutData(self): + """ + Some server can send a response without any data: + L{_QueryFactory.parseResponse} should catch the error and call the + result errback. + """ + content = """ + + + + + +""" + d = self.queryFactory.deferred + self.queryFactory.parseResponse(content) + return self.assertFailure(d, IndexError) diff --git a/vendor/Twisted-10.0.0/twisted/web/topfiles/NEWS b/vendor/Twisted-10.0.0/twisted/web/topfiles/NEWS new file mode 100644 index 000000000000..e0cd0dc5dc09 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/topfiles/NEWS @@ -0,0 +1,309 @@ +Ticket numbers in this file can be looked up by visiting +http://twistedmatrix.com/trac/ticket/ + +Twisted Web 10.0.0 (2010-03-01) +=============================== + +Features +-------- + - Twisted Web in 60 Seconds, a series of short tutorials with self- + contained examples on a range of common web topics, is now a part + of the Twisted Web howto documentation. (#4192) + +Bugfixes +-------- + - Data and File from twisted.web.static and + twisted.web.distrib.UserDirectory will now only generate a 200 + response for GET or HEAD requests. + twisted.web.client.HTTPPageGetter will no longer ignore the case of + a request method when considering whether to apply special HEAD + processing to a response. (#446) + + - twisted.web.http.HTTPClient now supports multi-line headers. + (#2062) + + - Resources served via twisted.web.distrib will no longer encounter a + Banana error when writing more than 640kB at once to the request + object. (#3212) + + - The Error, PageRedirect, and InfiniteRedirection exception in + twisted.web now initialize an empty message parameter by mapping + the HTTP status code parameter to a descriptive string. Previously + the lookup would always fail, leaving message empty. (#3806) + + - The 'wsgi.input' WSGI environment object now supports -1 and None + as arguments to the read and readlines methods. (#4114) + + - twisted.web.wsgi doesn't unquote QUERY_STRING anymore, thus + complying with the WSGI reference implementation. (#4143) + + - The HTTP proxy will no longer pass on keep-alive request headers + from the client, preventing pages from loading then "hanging" + (leaving the connection open with no hope of termination). (#4179) + +Deprecations and Removals +------------------------- + - Remove '--static' option from twistd web, that served as an alias + for the '--path' option. (#3907) + +Other +----- + - #3784, #4216, #4242 + + +Twisted Web 9.0.0 (2009-11-24) +============================== + +Features +-------- + - There is now an iweb.IRequest interface which specifies the interface that + request objects provide (#3416) + - downloadPage now supports the same cookie, redirect, and timeout features + that getPage supports (#2971) + - A chapter about WSGI has been added to the twisted.web documentation (#3510) + - The HTTP auth support in the web server now allows anonymous sessions by + logging in with ANONYMOUS credentials when no Authorization header is + provided in a request (#3924, #3936) + - HTTPClientFactory now accepts a parameter to enable a common deviation from + the HTTP 1.1 standard by responding to redirects in a POSTed request with a + GET instead of another POST (#3624) + - A new basic HTTP/1.1 client API is included in twisted.web.client.Agent + (#886, #3987) + +Fixes +----- + - Requests for "insecure" children of a static.File (such as paths containing + encoded directory separators) will now result in a 404 instead of a 500 + (#3549, #3469) + - When specifying a followRedirect argument to the getPage function, the state + of redirect-following for other getPage calls should now be unaffected. It + was previously overwriting a class attribute which would affect outstanding + getPage calls (#3192) + - Downloading an URL of the form "http://example.com:/" will now work, + ignoring the extraneous colon (#2402) + - microdom's appendChild method will no longer issue a spurious warning, and + microdom's methods in general should now issue more meaningful exceptions + when invalid parameters are passed (#3421) + - WSGI applications will no longer have spurious Content-Type headers added to + their responses by the twisted.web server. In addition, WSGI applications + will no longer be able to specify the server-restricted headers Server and + Date (#3569) + - http_headers.Headers now normalizes the case of raw headers passed directly + to it in the same way that it normalizes the headers passed to setRawHeaders + (#3557) + - The distrib module no longer relies on the deprecated woven package (#3559) + - twisted.web.domhelpers now works with both microdom and minidom (#3600) + - twisted.web servers will now ignore invalid If-Modified-Since headers instead + of returning a 500 error (#3601) + - Certain request-bound memory and file resources are cleaned up slightly + sooner by the request when the connection is lost (#1621, #3176) + - xmlrpclib.DateTime objects should now correctly round-trip over twisted.web's + XMLRPC support in all supported versions of Python, and errors during error + serialization will no longer hang a twisted.web XMLRPC response (#2446) + - request.content should now always be seeked to the beginning when + request.process is called, so application code should never need to seek + back manually (#3585) + - Fetching a child of static.File with a double-slash in the URL (such as + "example//foo.html") should now return a 404 instead of a traceback and + 500 error (#3631) + - downloadPage will now fire a Failure on its returned Deferred instead of + indicating success when the connection is prematurely lost (#3645) + - static.File will now provide a 404 instead of a 500 error when it was + constructed with a non-existent file (#3634) + - microdom should now serialize namespaces correctly (#3672) + - The HTTP Auth support resource wrapper should no longer corrupt requests and + cause them to skip a segment in the request path (#3679) + - The twisted.web WSGI support should now include leading slashes in PATH_INFO, + and SCRIPT_NAME will be empty if the application is at the root of the + resource tree. This means that WSGI applications should no longer generate + URLs with double-slashes in them even if they naively concatenate the values + (#3721) + - WSGI applications should now receive the requesting client's IP in the + REMOTE_ADDR environment variable (#3730) + - The distrib module should work again. It was unfortunately broken with the + refactoring of twisted.web's header support (#3697) + - static.File now supports multiple ranges specified in the Range header + (#3574) + - static.File should now generate a correct Content-Length value when the + requested Range value doesn't fit entirely within the file's contents (#3814) + - Attempting to call request.finish() after the connection has been lost will + now immediately raise a RuntimeError (#4013) + - An HTTP-auth resource should now be able to directly render the wrapped + avatar, whereas before it would only allow retrieval of child resources + (#4014) + - twisted.web's wsgi support should no longer attempt to call request.finish + twice, which would cause errors in certain cases (#4025) + - WSGI applications should now be able to handle requests with large bodies + (#4029) + - Exceptions raised from WSGI applications should now more reliably be turned + into 500 errors on the HTTP level (#4019) + - DeferredResource now correctly passes through exceptions raised from the + wrapped resource, instead of turning them all into 500 errors (#3932) + - Agent.request now generates a Host header when no headers are passed at + (#4131) + +Deprecations and Removals +------------------------- + - The unmaintained and untested twisted.web.monitor module was removed (#2763) + - The twisted.web.woven package has been removed (#1522) + - All of the error resources in twisted.web.error are now in + twisted.web.resource, and accessing them through twisted.web.error is now + deprecated (#3035) + - To facilitate a simplification of the timeout logic in server.Session, + various things have been deprecated (#3457) + - the loopFactory attribute is now ignored + - the checkExpired method now does nothing + - the lifetime parameter to startCheckingExpiration is now ignored + - The twisted.web.trp module is now deprecated (#2030) + +Other +----- + - #2763, #3540, #3575, #3610, #3605, #1176, #3539, #3750, #3761, #3779, #2677, + #3782, #3904, #3919, #3418, #3990, #1404, #4050 + + +Web 8.2.0 (2008-12-16) +====================== + +Features +-------- + - The web server can now deal with multi-value headers in the new attributes of + Request, requestHeaders and responseHeaders (#165) + - There is now a resource-wrapper which implements HTTP Basic and Digest auth + in terms of twisted.cred (#696) + - It's now possible to limit the number of redirects that client.getPage will + follow (#2412) + - The directory-listing code no longer uses Woven (#3257) + - static.File now supports Range headers with a single range (#1493) + - twisted.web now has a rudimentary WSGI container (#2753) + - The web server now supports chunked encoding in requests (#3385) + +Fixes +----- + - The xmlrpc client now raises an error when the server sends an empty + response (#3399) + - HTTPPageGetter no longer duplicates default headers when they're explicitly + overridden in the headers parameter (#1382) + - The server will no longer timeout clients which are still sending request + data (#1903) + - microdom's isEqualToNode now returns False when the nodes aren't equal + (#2542) + +Deprecations and Removals +------------------------- + + - Request.headers and Request.received_headers are not quite deprecated, but + they are discouraged in favor of requestHeaders and responseHeaders (#165) + +Other +----- + - #909, #687, #2938, #1152, #2930, #2025, #2683, #3471 + + +8.1.0 (2008-05-18) +================== + +Fixes +----- + + - Fixed an XMLRPC bug whereby sometimes a callRemote Deferred would + accidentally be fired twice when a connection was lost during the handling of + a response (#3152) + - Fixed a bug in the "Using Twisted Web" document which prevented an example + resource from being renderable (#3147) + - The deprecated mktap API is no longer used (#3127) + + +8.0.0 (2008-03-17) +================== + +Features +-------- + - Add support to twisted.web.client.getPage for the HTTP HEAD method. (#2750) + +Fixes +----- + - Set content-type in xmlrpc responses to "text/xml" (#2430) + - Add more error checking in the xmlrpc.XMLRPC render method, and enforce + POST requests. (#2505) + - Reject unicode input to twisted.web.client._parse to reject invalid + unicode URLs early. (#2628) + - Correctly re-quote URL path segments when generating an URL string to + return from Request.prePathURL. (#2934) + - Make twisted.web.proxy.ProxyClientFactory close the connection when + reporting a 501 error. (#1089) + - Fix twisted.web.proxy.ReverseProxyResource to specify the port in the + host header if different from 80. (#1117) + - Change twisted.web.proxy.ReverseProxyResource so that it correctly encodes + the request URI it sends on to the server for which it is a proxy. (#3013) + - Make "twistd web --personal" use PBServerFactory (#2681) + +Misc +---- + - #1996, #2382, #2211, #2633, #2634, #2640, #2752, #238, #2905 + + +0.7.0 (2007-01-02) +================== + +Features +-------- + - Python 2.5 is now supported (#1867) + - twisted.web.xmlrpc now supports the xml-rpc extension type + in both the server and the client (#469) + +Fixes +----- + - Microdom and SUX now manages certain malformed XML more resiliently + (#1984, #2225, #2298) + - twisted.web.client.getPage can now deal with an URL of the form + "http://example.com" (no trailing slash) (#1080) + - The HTTP server now allows (invalid) URLs with multiple question + marks (#1550) + - '=' can now be in the value of a cookie (#1051) + - Microdom now correctly handles xmlns="" (#2184) + +Deprecations and Removals +------------------------- + - websetroot was removed, because it wasn't working anyway (#945) + - woven.guard no longer supports the old twisted.cred API (#1440) + +Other +----- +The following changes are minor or closely related to other changes. + + - #1636, #1637, #1638, #1936, #1883, #447 + + +0.6.0 (2006-05-21) +================== + +Features +-------- + - Basic auth support for the XMLRPC client (#1474). + +Fixes +----- + - More correct datetime parsing. + - Efficiency improvements (#974) + - Handle popular non-RFC compliant formats for If-Modified-Since + headers (#976). + - Improve support for certain buggy CGI scripts. + - CONTENT_LENGTH is now available to CGI scripts. + - Support for even worse HTML in microdom (#1358). + - Trying to view a user's home page when the user doesn't have a + ~/public_html no longer displays a traceback (#551). + - Misc: #543, #1011, #1005, #1287, #1337, #1383, #1079, #1492, #1189, + #737, #872. + + +0.5.0 +===== + - Client properly reports timeouts as error + - "Socially deprecate" woven + - Fix memory leak in _c_urlarg library + - Stop using _c_urlarg library + - Fix 'gzip' and 'bzip2' content-encodings + - Escape log entries so remote user cannot corrupt the log + - Commented out range support because it's broken + - Fix HEAD responses without content-length diff --git a/vendor/Twisted-10.0.0/twisted/web/topfiles/README b/vendor/Twisted-10.0.0/twisted/web/topfiles/README new file mode 100644 index 000000000000..c245997f7225 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/topfiles/README @@ -0,0 +1 @@ +Twisted Web 10.0.0 diff --git a/vendor/Twisted-10.0.0/twisted/web/topfiles/setup.py b/vendor/Twisted-10.0.0/twisted/web/topfiles/setup.py new file mode 100644 index 000000000000..15eefc0ae05e --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/topfiles/setup.py @@ -0,0 +1,30 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +import sys + +try: + from twisted.python import dist +except ImportError: + raise SystemExit("twisted.python.dist module not found. Make sure you " + "have installed the Twisted core package before " + "attempting to install any other Twisted projects.") + +if __name__ == '__main__': + dist.setup( + twisted_subproject="web", + scripts=dist.getScripts("web"), + # metadata + name="Twisted Web", + description="Twisted web server, programmable in Python.", + author="Twisted Matrix Laboratories", + author_email="twisted-python@twistedmatrix.com", + maintainer="James Knight", + url="http://twistedmatrix.com/trac/wiki/TwistedWeb", + license="MIT", + long_description="""\ +Twisted Web is a complete web server, aimed at hosting web +applications using Twisted and Python, but fully able to serve static +pages, also. +""", + ) diff --git a/vendor/Twisted-10.0.0/twisted/web/trp.py b/vendor/Twisted-10.0.0/twisted/web/trp.py new file mode 100644 index 000000000000..e9cfcd3fd5ce --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/trp.py @@ -0,0 +1,23 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +I contain ResourceUnpickler, which will unpickle any python object +named with the file extension .trp. +""" + +import warnings +from pickle import Unpickler + +_msg = ("is deprecated as of Twisted 9.0. Resource persistence " + "is beyond the scope of Twisted Web.") + +warnings.warn("twisted.web.trp " + _msg , DeprecationWarning, stacklevel=2) + +def ResourceUnpickler(path, registry = None): + warnings.warn( + "twisted.web.trp.ResourceUnpickler " + _msg , + DeprecationWarning, stacklevel=2) + fl = open(path) + result = Unpickler(fl).load() + return result diff --git a/vendor/Twisted-10.0.0/twisted/web/twcgi.py b/vendor/Twisted-10.0.0/twisted/web/twcgi.py new file mode 100644 index 000000000000..c2868985c859 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/twcgi.py @@ -0,0 +1,253 @@ +# -*- test-case-name: twisted.web.test.test_cgi -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +I hold resource classes and helper classes that deal with CGI scripts. +""" + +# System Imports +import string +import os +import sys +import urllib + +# Twisted Imports +from twisted.web import http +from twisted.internet import reactor, protocol +from twisted.spread import pb +from twisted.python import log, filepath +from twisted.web import resource, server, static + + +class CGIDirectory(resource.Resource, filepath.FilePath): + def __init__(self, pathname): + resource.Resource.__init__(self) + filepath.FilePath.__init__(self, pathname) + + def getChild(self, path, request): + fnp = self.child(path) + if not fnp.exists(): + return static.File.childNotFound + elif fnp.isdir(): + return CGIDirectory(fnp.path) + else: + return CGIScript(fnp.path) + return resource.NoResource() + + def render(self, request): + notFound = resource.NoResource( + "CGI directories do not support directory listing.") + return notFound.render(request) + +class CGIScript(resource.Resource): + """I represent a CGI script. + + My implementation is complex due to the fact that it requires asynchronous + IPC with an external process with an unpleasant protocol. + """ + isLeaf = 1 + def __init__(self, filename, registry=None): + """Initialize, with the name of a CGI script file. + """ + self.filename = filename + + def render(self, request): + """Do various things to conform to the CGI specification. + + I will set up the usual slew of environment variables, then spin off a + process. + """ + script_name = "/"+string.join(request.prepath, '/') + python_path = string.join(sys.path, os.pathsep) + serverName = string.split(request.getRequestHostname(), ':')[0] + env = {"SERVER_SOFTWARE": server.version, + "SERVER_NAME": serverName, + "GATEWAY_INTERFACE": "CGI/1.1", + "SERVER_PROTOCOL": request.clientproto, + "SERVER_PORT": str(request.getHost().port), + "REQUEST_METHOD": request.method, + "SCRIPT_NAME": script_name, # XXX + "SCRIPT_FILENAME": self.filename, + "REQUEST_URI": request.uri, + } + + client = request.getClient() + if client is not None: + env['REMOTE_HOST'] = client + ip = request.getClientIP() + if ip is not None: + env['REMOTE_ADDR'] = ip + pp = request.postpath + if pp: + env["PATH_INFO"] = "/"+string.join(pp, '/') + + if hasattr(request, "content"): + # request.content is either a StringIO or a TemporaryFile, and + # the file pointer is sitting at the beginning (seek(0,0)) + request.content.seek(0,2) + length = request.content.tell() + request.content.seek(0,0) + env['CONTENT_LENGTH'] = str(length) + + qindex = string.find(request.uri, '?') + if qindex != -1: + qs = env['QUERY_STRING'] = request.uri[qindex+1:] + if '=' in qs: + qargs = [] + else: + qargs = [urllib.unquote(x) for x in qs.split('+')] + else: + env['QUERY_STRING'] = '' + qargs = [] + + # Propogate HTTP headers + for title, header in request.getAllHeaders().items(): + envname = string.upper(string.replace(title, '-', '_')) + if title not in ('content-type', 'content-length'): + envname = "HTTP_" + envname + env[envname] = header + # Propogate our environment + for key, value in os.environ.items(): + if not env.has_key(key): + env[key] = value + # And they're off! + self.runProcess(env, request, qargs) + return server.NOT_DONE_YET + + def runProcess(self, env, request, qargs=[]): + p = CGIProcessProtocol(request) + reactor.spawnProcess(p, self.filename, [self.filename]+qargs, env, os.path.dirname(self.filename)) + + +class FilteredScript(CGIScript): + """I am a special version of a CGI script, that uses a specific executable. + + This is useful for interfacing with other scripting languages that adhere + to the CGI standard (cf. PHPScript). My 'filter' attribute specifies what + executable to run, and my 'filename' init parameter describes which script + to pass to the first argument of that script. + """ + + filter = '/usr/bin/cat' + + def runProcess(self, env, request, qargs=[]): + p = CGIProcessProtocol(request) + reactor.spawnProcess(p, self.filter, [self.filter, self.filename]+qargs, env, os.path.dirname(self.filename)) + + +class PHP3Script(FilteredScript): + """I am a FilteredScript that uses the default PHP3 command on most systems. + """ + + filter = '/usr/bin/php3' + + +class PHPScript(FilteredScript): + """I am a FilteredScript that uses the PHP command on most systems. + Sometimes, php wants the path to itself as argv[0]. This is that time. + """ + + filter = '/usr/bin/php4' + + +class CGIProcessProtocol(protocol.ProcessProtocol, pb.Viewable): + handling_headers = 1 + headers_written = 0 + headertext = '' + errortext = '' + + # Remotely relay producer interface. + + def view_resumeProducing(self, issuer): + self.resumeProducing() + + def view_pauseProducing(self, issuer): + self.pauseProducing() + + def view_stopProducing(self, issuer): + self.stopProducing() + + def resumeProducing(self): + self.transport.resumeProducing() + + def pauseProducing(self): + self.transport.pauseProducing() + + def stopProducing(self): + self.transport.loseConnection() + + def __init__(self, request): + self.request = request + + def connectionMade(self): + self.request.registerProducer(self, 1) + self.request.content.seek(0, 0) + content = self.request.content.read() + if content: + self.transport.write(content) + self.transport.closeStdin() + + def errReceived(self, error): + self.errortext = self.errortext + error + + def outReceived(self, output): + """ + Handle a chunk of input + """ + # First, make sure that the headers from the script are sorted + # out (we'll want to do some parsing on these later.) + if self.handling_headers: + text = self.headertext + output + headerEnds = [] + for delimiter in '\n\n','\r\n\r\n','\r\r', '\n\r\n': + headerend = string.find(text,delimiter) + if headerend != -1: + headerEnds.append((headerend, delimiter)) + if headerEnds: + headerEnds.sort() + headerend, delimiter = headerEnds[0] + self.headertext = text[:headerend] + # This is a final version of the header text. + linebreak = delimiter[:len(delimiter)/2] + headers = string.split(self.headertext, linebreak) + for header in headers: + br = string.find(header,': ') + if br == -1: + log.msg( 'ignoring malformed CGI header: %s' % header ) + else: + headerName = string.lower(header[:br]) + headerText = header[br+2:] + if headerName == 'location': + self.request.setResponseCode(http.FOUND) + if headerName == 'status': + try: + statusNum = int(headerText[:3]) #"XXX " sometimes happens. + except: + log.msg( "malformed status header" ) + else: + self.request.setResponseCode(statusNum) + else: + self.request.setHeader(headerName,headerText) + output = text[headerend+len(delimiter):] + self.handling_headers = 0 + if self.handling_headers: + self.headertext = text + if not self.handling_headers: + self.request.write(output) + + def processEnded(self, reason): + if reason.value.exitCode != 0: + log.msg("CGI %s exited with exit code %s" % + (self.request.uri, reason.value.exitCode)) + if self.errortext: + log.msg("Errors from CGI %s: %s" % (self.request.uri, self.errortext)) + if self.handling_headers: + log.msg("Premature end of headers in %s: %s" % (self.request.uri, self.headertext)) + self.request.write( + resource.ErrorPage(http.INTERNAL_SERVER_ERROR, + "CGI Script Error", + "Premature end of script headers.").render(self.request)) + self.request.unregisterProducer() + self.request.finish() diff --git a/vendor/Twisted-10.0.0/twisted/web/util.py b/vendor/Twisted-10.0.0/twisted/web/util.py new file mode 100644 index 000000000000..6b4b222748d4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/util.py @@ -0,0 +1,380 @@ +# -*- test-case-name: twisted.web.test.test_web -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from cStringIO import StringIO +import linecache +import string, re +import types + +from twisted.python import failure + +from twisted.web import html, resource + + +def redirectTo(URL, request): + request.redirect(URL) + return """ + + + + + + click here + + +""" % {'url': URL} + +class Redirect(resource.Resource): + + isLeaf = 1 + + def __init__(self, url): + resource.Resource.__init__(self) + self.url = url + + def render(self, request): + return redirectTo(self.url, request) + + def getChild(self, name, request): + return self + +class ChildRedirector(Redirect): + isLeaf = 0 + def __init__(self, url): + # XXX is this enough? + if ((url.find('://') == -1) + and (not url.startswith('..')) + and (not url.startswith('/'))): + raise ValueError("It seems you've given me a redirect (%s) that is a child of myself! That's not good, it'll cause an infinite redirect." % url) + Redirect.__init__(self, url) + + def getChild(self, name, request): + newUrl = self.url + if not newUrl.endswith('/'): + newUrl += '/' + newUrl += name + return ChildRedirector(newUrl) + + +from twisted.python import urlpath + +class ParentRedirect(resource.Resource): + """ + I redirect to URLPath.here(). + """ + isLeaf = 1 + def render(self, request): + return redirectTo(urlpath.URLPath.fromRequest(request).here(), request) + + def getChild(self, request): + return self + + +class DeferredResource(resource.Resource): + """ + I wrap up a Deferred that will eventually result in a Resource + object. + """ + isLeaf = 1 + + def __init__(self, d): + resource.Resource.__init__(self) + self.d = d + + def getChild(self, name, request): + return self + + def render(self, request): + self.d.addCallback(self._cbChild, request).addErrback( + self._ebChild,request) + from twisted.web.server import NOT_DONE_YET + return NOT_DONE_YET + + def _cbChild(self, child, request): + request.render(resource.getChildForRequest(child, request)) + + def _ebChild(self, reason, request): + request.processingFailed(reason) + return reason + + +stylesheet = """ + +""" + + +def htmlrepr(x): + return htmlReprTypes.get(type(x), htmlUnknown)(x) + +def saferepr(x): + try: + rx = repr(x) + except: + rx = "" % (x.__class__, id(x)) + return rx + +def htmlUnknown(x): + return ''+html.escape(saferepr(x))+'' + +def htmlDict(d): + io = StringIO() + w = io.write + w('
                                Dictionary instance @ %s' % hex(id(d))) + w('') + for k, v in d.items(): + + if k == '__builtins__': + v = 'builtin dictionary' + w('' % (htmlrepr(k), htmlrepr(v))) + w('
                                %s%s
                                ') + return io.getvalue() + +def htmlList(l): + io = StringIO() + w = io.write + w('
                                List instance @ %s' % hex(id(l))) + for i in l: + w('
                                %s
                                ' % htmlrepr(i)) + w('
                                ') + return io.getvalue() + +def htmlInst(i): + if hasattr(i, "__html__"): + s = i.__html__() + else: + s = html.escape(saferepr(i)) + return '''
                                %s instance @ %s + %s
                                + ''' % (i.__class__, hex(id(i)), s) + +def htmlString(s): + return html.escape(saferepr(s)) + +def htmlFunc(f): + return ('
                                ' + + html.escape("function %s in file %s at line %s" % + (f.__name__, f.func_code.co_filename, + f.func_code.co_firstlineno))+ + '
                                ') + +htmlReprTypes = {types.DictType: htmlDict, + types.ListType: htmlList, + types.InstanceType: htmlInst, + types.StringType: htmlString, + types.FunctionType: htmlFunc} + + + +def htmlIndent(snippetLine): + ret = string.replace(string.replace(html.escape(string.rstrip(snippetLine)), + ' ', ' '), + '\t', '        ') + return ret + +def formatFailure(myFailure): + + exceptionHTML = """ +

                                %s: %s

                                +""" + + frameHTML = """ +
                                %s, line %s in %s
                                +""" + + snippetLineHTML = """ +
                                %s%s
                                +""" + + snippetHighlightLineHTML = """ +
                                %s%s
                                +""" + + variableHTML = """ +%s%s +""" + + if not isinstance(myFailure, failure.Failure): + return html.PRE(str(myFailure)) + io = StringIO() + w = io.write + w(stylesheet) + w('') + w(exceptionHTML % (html.escape(str(myFailure.type)), + html.escape(str(myFailure.value)))) + w('') + w('
                                ') + first = 1 + for method, filename, lineno, localVars, globalVars in myFailure.frames: + if filename == '': + continue + if first: + w('
                                ') + first = 0 + else: + w('
                                ') + w(frameHTML % (filename, lineno, method)) + + w('
                                ') + textSnippet = '' + for snipLineNo in range(lineno-2, lineno+2): + snipLine = linecache.getline(filename, snipLineNo) + textSnippet += snipLine + snipLine = htmlIndent(snipLine) + if snipLineNo == lineno: + w(snippetHighlightLineHTML % (snipLineNo, snipLine)) + else: + w(snippetLineHTML % (snipLineNo, snipLine)) + w('
                                ') + + # Instance variables + for name, var in localVars: + if name == 'self' and hasattr(var, '__dict__'): + usedVars = [ (key, value) for (key, value) in var.__dict__.items() + if re.search(r'\W'+'self.'+key+r'\W', textSnippet) ] + if usedVars: + w('
                                Self') + w('') + for key, value in usedVars: + w(variableHTML % (key, htmlrepr(value))) + w('
                                ') + break + + # Local and global vars + for nm, varList in ('Locals', localVars), ('Globals', globalVars): + usedVars = [ (name, var) for (name, var) in varList + if re.search(r'\W'+name+r'\W', textSnippet) ] + if usedVars: + w('
                                %s' % nm) + for name, var in usedVars: + w(variableHTML % (name, htmlrepr(var))) + w('
                                ') + + w('
                                ') # frame + w('
                                ') # stacktrace + w(' ') + w(exceptionHTML % (html.escape(str(myFailure.type)), + html.escape(str(myFailure.value)))) + + return io.getvalue() diff --git a/vendor/Twisted-10.0.0/twisted/web/vhost.py b/vendor/Twisted-10.0.0/twisted/web/vhost.py new file mode 100644 index 000000000000..6cf83e124f36 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/vhost.py @@ -0,0 +1,135 @@ +# -*- test-case-name: twisted.web. +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +I am a virtual hosts implementation. +""" + +# Twisted Imports +from twisted.python import roots +from twisted.web import resource + + +class VirtualHostCollection(roots.Homogenous): + """Wrapper for virtual hosts collection. + + This exists for configuration purposes. + """ + entityType = resource.Resource + + def __init__(self, nvh): + self.nvh = nvh + + def listStaticEntities(self): + return self.nvh.hosts.items() + + def getStaticEntity(self, name): + return self.nvh.hosts.get(self) + + def reallyPutEntity(self, name, entity): + self.nvh.addHost(name, entity) + + def delEntity(self, name): + self.nvh.removeHost(name) + + +class NameVirtualHost(resource.Resource): + """I am a resource which represents named virtual hosts. + """ + + default = None + + def __init__(self): + """Initialize. + """ + resource.Resource.__init__(self) + self.hosts = {} + + def listStaticEntities(self): + return resource.Resource.listStaticEntities(self) + [("Virtual Hosts", VirtualHostCollection(self))] + + def getStaticEntity(self, name): + if name == "Virtual Hosts": + return VirtualHostCollection(self) + else: + return resource.Resource.getStaticEntity(self, name) + + def addHost(self, name, resrc): + """Add a host to this virtual host. + + This will take a host named `name', and map it to a resource + `resrc'. For example, a setup for our virtual hosts would be:: + + nvh.addHost('divunal.com', divunalDirectory) + nvh.addHost('www.divunal.com', divunalDirectory) + nvh.addHost('twistedmatrix.com', twistedMatrixDirectory) + nvh.addHost('www.twistedmatrix.com', twistedMatrixDirectory) + """ + self.hosts[name] = resrc + + def removeHost(self, name): + """Remove a host.""" + del self.hosts[name] + + def _getResourceForRequest(self, request): + """(Internal) Get the appropriate resource for the given host. + """ + hostHeader = request.getHeader('host') + if hostHeader == None: + return self.default or resource.NoResource() + else: + host = hostHeader.lower().split(':', 1)[0] + return (self.hosts.get(host, self.default) + or resource.NoResource("host %s not in vhost map" % repr(host))) + + def render(self, request): + """Implementation of resource.Resource's render method. + """ + resrc = self._getResourceForRequest(request) + return resrc.render(request) + + def getChild(self, path, request): + """Implementation of resource.Resource's getChild method. + """ + resrc = self._getResourceForRequest(request) + if resrc.isLeaf: + request.postpath.insert(0,request.prepath.pop(-1)) + return resrc + else: + return resrc.getChildWithDefault(path, request) + +class _HostResource(resource.Resource): + + def getChild(self, path, request): + if ':' in path: + host, port = path.split(':', 1) + port = int(port) + else: + host, port = path, 80 + request.setHost(host, port) + prefixLen = 3+request.isSecure()+4+len(path)+len(request.prepath[-3]) + request.path = '/'+'/'.join(request.postpath) + request.uri = request.uri[prefixLen:] + del request.prepath[:3] + return request.site.getResourceFor(request) + + +class VHostMonsterResource(resource.Resource): + + """ + Use this to be able to record the hostname and method (http vs. https) + in the URL without disturbing your web site. If you put this resource + in a URL http://foo.com/bar then requests to + http://foo.com/bar/http/baz.com/something will be equivalent to + http://foo.com/something, except that the hostname the request will + appear to be accessing will be "baz.com". So if "baz.com" is redirecting + all requests for to foo.com, while foo.com is inaccessible from the outside, + then redirect and url generation will work correctly + """ + def getChild(self, path, request): + if path == 'http': + request.isSecure = lambda: 0 + elif path == 'https': + request.isSecure = lambda: 1 + return _HostResource() diff --git a/vendor/Twisted-10.0.0/twisted/web/wsgi.py b/vendor/Twisted-10.0.0/twisted/web/wsgi.py new file mode 100644 index 000000000000..cb18de379b31 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/wsgi.py @@ -0,0 +1,401 @@ +# Copyright (c) 2008-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +An implementation of +U{Web Resource Gateway Interface}. +""" + +__metaclass__ = type + +from sys import exc_info + +from zope.interface import implements + +from twisted.python.log import msg, err +from twisted.python.failure import Failure +from twisted.web.resource import IResource +from twisted.web.server import NOT_DONE_YET +from twisted.web.http import INTERNAL_SERVER_ERROR + + +class _ErrorStream: + """ + File-like object instances of which are used as the value for the + C{'wsgi.errors'} key in the C{environ} dictionary passed to the application + object. + + This simply passes writes on to L{logging} system as + error events from the C{'wsgi'} system. In the future, it may be desirable + to expose more information in the events it logs, such as the application + object which generated the message. + """ + def write(self, bytes): + """ + Generate an event for the logging system with the given bytes as the + message. + + This is called in a WSGI application thread, not the I/O thread. + """ + msg(bytes, system='wsgi', isError=True) + + + def writelines(self, iovec): + """ + Join the given lines and pass them to C{write} to be handled in the + usual way. + + This is called in a WSGI application thread, not the I/O thread. + + @param iovec: A C{list} of C{'\\n'}-terminated C{str} which will be + logged. + """ + self.write(''.join(iovec)) + + + def flush(self): + """ + Nothing is buffered, so flushing does nothing. This method is required + to exist by PEP 333, though. + + This is called in a WSGI application thread, not the I/O thread. + """ + + + +class _InputStream: + """ + File-like object instances of which are used as the value for the + C{'wsgi.input'} key in the C{environ} dictionary passed to the application + object. + + This only exists to make the handling of C{readline(-1)} consistent across + different possible underlying file-like object implementations. The other + supported methods pass through directly to the wrapped object. + """ + def __init__(self, input): + """ + Initialize the instance. + + This is called in the I/O thread, not a WSGI application thread. + """ + self._wrapped = input + + + def read(self, size=None): + """ + Pass through to the underlying C{read}. + + This is called in a WSGI application thread, not the I/O thread. + """ + # Avoid passing None because cStringIO and file don't like it. + if size is None: + return self._wrapped.read() + return self._wrapped.read(size) + + + def readline(self, size=None): + """ + Pass through to the underlying C{readline}, with a size of C{-1} replaced + with a size of C{None}. + + This is called in a WSGI application thread, not the I/O thread. + """ + # Check for -1 because StringIO doesn't handle it correctly. Check for + # None because files and tempfiles don't accept that. + if size == -1 or size is None: + return self._wrapped.readline() + return self._wrapped.readline(size) + + + def readlines(self, size=None): + """ + Pass through to the underlying C{readlines}. + + This is called in a WSGI application thread, not the I/O thread. + """ + # Avoid passing None because cStringIO and file don't like it. + if size is None: + return self._wrapped.readlines() + return self._wrapped.readlines(size) + + + def __iter__(self): + """ + Pass through to the underlying C{__iter__}. + + This is called in a WSGI application thread, not the I/O thread. + """ + return iter(self._wrapped) + + + +class _WSGIResponse: + """ + Helper for L{WSGIResource} which drives the WSGI application using a + threadpool and hooks it up to the L{Request}. + + @ivar started: A C{bool} indicating whether or not the response status and + headers have been written to the request yet. This may only be read or + written in the WSGI application thread. + + @ivar reactor: An L{IReactorThreads} provider which is used to call methods + on the request in the I/O thread. + + @ivar threadpool: A L{ThreadPool} which is used to call the WSGI + application object in a non-I/O thread. + + @ivar application: The WSGI application object. + + @ivar request: The L{Request} upon which the WSGI environment is based and + to which the application's output will be sent. + + @ivar environ: The WSGI environment C{dict}. + + @ivar status: The HTTP response status C{str} supplied to the WSGI + I{start_response} callable by the application. + + @ivar headers: A list of HTTP response headers supplied to the WSGI + I{start_response} callable by the application. + + @ivar _requestFinished: A flag which indicates whether it is possible to + generate more response data or not. This is C{False} until + L{Request.notifyFinish} tells us the request is done, then C{True}. + """ + + _requestFinished = False + + def __init__(self, reactor, threadpool, application, request): + self.started = False + self.reactor = reactor + self.threadpool = threadpool + self.application = application + self.request = request + self.request.notifyFinish().addBoth(self._finished) + + if request.prepath: + scriptName = '/' + '/'.join(request.prepath) + else: + scriptName = '' + + if request.postpath: + pathInfo = '/' + '/'.join(request.postpath) + else: + pathInfo = '' + + parts = request.uri.split('?', 1) + if len(parts) == 1: + queryString = '' + else: + queryString = parts[1] + + self.environ = { + 'REQUEST_METHOD': request.method, + 'REMOTE_ADDR': request.getClientIP(), + 'SCRIPT_NAME': scriptName, + 'PATH_INFO': pathInfo, + 'QUERY_STRING': queryString, + 'CONTENT_TYPE': request.getHeader('content-type') or '', + 'CONTENT_LENGTH': request.getHeader('content-length') or '', + 'SERVER_NAME': request.getRequestHostname(), + 'SERVER_PORT': str(request.getHost().port), + 'SERVER_PROTOCOL': request.clientproto} + + for name, values in request.requestHeaders.getAllRawHeaders(): + name = 'HTTP_' + name.upper().replace('-', '_') + # It might be preferable for http.HTTPChannel to clear out + # newlines. + self.environ[name] = ','.join([ + v.replace('\n', ' ') for v in values]) + + self.environ.update({ + 'wsgi.version': (1, 0), + 'wsgi.url_scheme': request.isSecure() and 'https' or 'http', + 'wsgi.run_once': False, + 'wsgi.multithread': True, + 'wsgi.multiprocess': False, + 'wsgi.errors': _ErrorStream(), + # Attend: request.content was owned by the I/O thread up until + # this point. By wrapping it and putting the result into the + # environment dictionary, it is effectively being given to + # another thread. This means that whatever it is, it has to be + # safe to access it from two different threads. The access + # *should* all be serialized (first the I/O thread writes to + # it, then the WSGI thread reads from it, then the I/O thread + # closes it). However, since the request is made available to + # arbitrary application code during resource traversal, it's + # possible that some other code might decide to use it in the + # I/O thread concurrently with its use in the WSGI thread. + # More likely than not, this will break. This seems like an + # unlikely possibility to me, but if it is to be allowed, + # something here needs to change. -exarkun + 'wsgi.input': _InputStream(request.content)}) + + + def _finished(self, ignored): + """ + Record the end of the response generation for the request being + serviced. + """ + self._requestFinished = True + + + def startResponse(self, status, headers, excInfo=None): + """ + The WSGI I{start_response} callable. The given values are saved until + they are needed to generate the response. + + This will be called in a non-I/O thread. + """ + if self.started and excInfo is not None: + raise excInfo[0], excInfo[1], excInfo[2] + self.status = status + self.headers = headers + return self.write + + + def write(self, bytes): + """ + The WSGI I{write} callable returned by the I{start_response} callable. + The given bytes will be written to the response body, possibly flushing + the status and headers first. + + This will be called in a non-I/O thread. + """ + def wsgiWrite(started): + if not started: + self._sendResponseHeaders() + self.request.write(bytes) + self.reactor.callFromThread(wsgiWrite, self.started) + self.started = True + + + def _sendResponseHeaders(self): + """ + Set the response code and response headers on the request object, but + do not flush them. The caller is responsible for doing a write in + order for anything to actually be written out in response to the + request. + + This must be called in the I/O thread. + """ + code, message = self.status.split(None, 1) + code = int(code) + self.request.setResponseCode(code, message) + + # twisted.web.server.Request.process always addes a content-type + # response header. That's not appropriate for us. + self.request.responseHeaders.removeHeader('content-type') + + for name, value in self.headers: + # Don't allow the application to control these required headers. + if name.lower() not in ('server', 'date'): + self.request.responseHeaders.addRawHeader(name, value) + + + def start(self): + """ + Start the WSGI application in the threadpool. + + This must be called in the I/O thread. + """ + self.threadpool.callInThread(self.run) + + + def run(self): + """ + Call the WSGI application object, iterate it, and handle its output. + + This must be called in a non-I/O thread (ie, a WSGI application + thread). + """ + try: + appIterator = self.application(self.environ, self.startResponse) + for elem in appIterator: + if elem: + self.write(elem) + if self._requestFinished: + break + close = getattr(appIterator, 'close', None) + if close is not None: + close() + except: + def wsgiError(started, type, value, traceback): + err(Failure(value, type, traceback), "WSGI application error") + if started: + self.request.transport.loseConnection() + else: + self.request.setResponseCode(INTERNAL_SERVER_ERROR) + self.request.finish() + self.reactor.callFromThread(wsgiError, self.started, *exc_info()) + else: + def wsgiFinish(started): + if not self._requestFinished: + if not started: + self._sendResponseHeaders() + self.request.finish() + self.reactor.callFromThread(wsgiFinish, self.started) + self.started = True + + + +class WSGIResource: + """ + An L{IResource} implementation which delegates responsibility for all + resources hierarchically inferior to it to a WSGI application. + + @ivar _reactor: An L{IReactorThreads} provider which will be passed on to + L{_WSGIResponse} to schedule calls in the I/O thread. + + @ivar _threadpool: A L{ThreadPool} which will be passed on to + L{_WSGIResponse} to run the WSGI application object. + + @ivar _application: The WSGI application object. + """ + implements(IResource) + + # Further resource segments are left up to the WSGI application object to + # handle. + isLeaf = True + + def __init__(self, reactor, threadpool, application): + self._reactor = reactor + self._threadpool = threadpool + self._application = application + + + def render(self, request): + """ + Turn the request into the appropriate C{environ} C{dict} suitable to be + passed to the WSGI application object and then pass it on. + + The WSGI application object is given almost complete control of the + rendering process. C{NOT_DONE_YET} will always be returned in order + and response completion will be dictated by the application object, as + will the status, headers, and the response body. + """ + response = _WSGIResponse( + self._reactor, self._threadpool, self._application, request) + response.start() + return NOT_DONE_YET + + + def getChildWithDefault(self, name, request): + """ + Reject attempts to retrieve a child resource. All path segments beyond + the one which refers to this resource are handled by the WSGI + application object. + """ + raise RuntimeError("Cannot get IResource children from WSGIResource") + + + def putChild(self, path, child): + """ + Reject attempts to add a child resource to this resource. The WSGI + application object handles all path segments beneath this resource, so + L{IResource} children can never be found. + """ + raise RuntimeError("Cannot put IResource children under WSGIResource") + + +__all__ = ['WSGIResource'] diff --git a/vendor/Twisted-10.0.0/twisted/web/xmlrpc.py b/vendor/Twisted-10.0.0/twisted/web/xmlrpc.py new file mode 100644 index 000000000000..cbe8bf36d200 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/web/xmlrpc.py @@ -0,0 +1,427 @@ +# -*- test-case-name: twisted.web.test.test_xmlrpc -*- +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +A generic resource for publishing objects via XML-RPC. + +Maintainer: Itamar Shtull-Trauring +""" + +# System Imports +import sys, xmlrpclib, urlparse + +# Sibling Imports +from twisted.web import resource, server, http +from twisted.internet import defer, protocol, reactor +from twisted.python import log, reflect, failure + +# These are deprecated, use the class level definitions +NOT_FOUND = 8001 +FAILURE = 8002 + + +# Useful so people don't need to import xmlrpclib directly +Fault = xmlrpclib.Fault +Binary = xmlrpclib.Binary +Boolean = xmlrpclib.Boolean +DateTime = xmlrpclib.DateTime + +# On Python 2.4 and earlier, DateTime.decode returns unicode. +if sys.version_info[:2] < (2, 5): + _decode = DateTime.decode + DateTime.decode = lambda self, value: _decode(self, value.encode('ascii')) + + +class NoSuchFunction(Fault): + """ + There is no function by the given name. + """ + + +class Handler: + """ + Handle a XML-RPC request and store the state for a request in progress. + + Override the run() method and return result using self.result, + a Deferred. + + We require this class since we're not using threads, so we can't + encapsulate state in a running function if we're going to have + to wait for results. + + For example, lets say we want to authenticate against twisted.cred, + run a LDAP query and then pass its result to a database query, all + as a result of a single XML-RPC command. We'd use a Handler instance + to store the state of the running command. + """ + + def __init__(self, resource, *args): + self.resource = resource # the XML-RPC resource we are connected to + self.result = defer.Deferred() + self.run(*args) + + def run(self, *args): + # event driven equivalent of 'raise UnimplementedError' + self.result.errback( + NotImplementedError("Implement run() in subclasses")) + + +class XMLRPC(resource.Resource): + """ + A resource that implements XML-RPC. + + You probably want to connect this to '/RPC2'. + + Methods published can return XML-RPC serializable results, Faults, + Binary, Boolean, DateTime, Deferreds, or Handler instances. + + By default methods beginning with 'xmlrpc_' are published. + + Sub-handlers for prefixed methods (e.g., system.listMethods) + can be added with putSubHandler. By default, prefixes are + separated with a '.'. Override self.separator to change this. + """ + + # Error codes for Twisted, if they conflict with yours then + # modify them at runtime. + NOT_FOUND = 8001 + FAILURE = 8002 + + isLeaf = 1 + separator = '.' + allowedMethods = ('POST',) + + def __init__(self, allowNone=False): + resource.Resource.__init__(self) + self.subHandlers = {} + self.allowNone = allowNone + + def putSubHandler(self, prefix, handler): + self.subHandlers[prefix] = handler + + def getSubHandler(self, prefix): + return self.subHandlers.get(prefix, None) + + def getSubHandlerPrefixes(self): + return self.subHandlers.keys() + + def render_POST(self, request): + request.content.seek(0, 0) + request.setHeader("content-type", "text/xml") + try: + args, functionPath = xmlrpclib.loads(request.content.read()) + except Exception, e: + f = Fault(self.FAILURE, "Can't deserialize input: %s" % (e,)) + self._cbRender(f, request) + else: + try: + function = self._getFunction(functionPath) + except Fault, f: + self._cbRender(f, request) + else: + d = defer.maybeDeferred(function, *args) + d.addErrback(self._ebRender) + d.addCallback(self._cbRender, request) + return server.NOT_DONE_YET + + + def _cbRender(self, result, request): + if isinstance(result, Handler): + result = result.result + if not isinstance(result, Fault): + result = (result,) + try: + try: + content = xmlrpclib.dumps( + result, methodresponse=True, + allow_none=self.allowNone) + except Exception, e: + f = Fault(self.FAILURE, "Can't serialize output: %s" % (e,)) + content = xmlrpclib.dumps(f, methodresponse=True, + allow_none=self.allowNone) + + request.setHeader("content-length", str(len(content))) + request.write(content) + except: + log.err() + request.finish() + + + def _ebRender(self, failure): + if isinstance(failure.value, Fault): + return failure.value + log.err(failure) + return Fault(self.FAILURE, "error") + + def _getFunction(self, functionPath): + """ + Given a string, return a function, or raise NoSuchFunction. + + This returned function will be called, and should return the result + of the call, a Deferred, or a Fault instance. + + Override in subclasses if you want your own policy. The default + policy is that given functionPath 'foo', return the method at + self.xmlrpc_foo, i.e. getattr(self, "xmlrpc_" + functionPath). + If functionPath contains self.separator, the sub-handler for + the initial prefix is used to search for the remaining path. + """ + if functionPath.find(self.separator) != -1: + prefix, functionPath = functionPath.split(self.separator, 1) + handler = self.getSubHandler(prefix) + if handler is None: + raise NoSuchFunction(self.NOT_FOUND, + "no such subHandler %s" % prefix) + return handler._getFunction(functionPath) + + f = getattr(self, "xmlrpc_%s" % functionPath, None) + if not f: + raise NoSuchFunction(self.NOT_FOUND, + "function %s not found" % functionPath) + elif not callable(f): + raise NoSuchFunction(self.NOT_FOUND, + "function %s not callable" % functionPath) + else: + return f + + def _listFunctions(self): + """ + Return a list of the names of all xmlrpc methods. + """ + return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_') + + +class XMLRPCIntrospection(XMLRPC): + """ + Implement the XML-RPC Introspection API. + + By default, the methodHelp method returns the 'help' method attribute, + if it exists, otherwise the __doc__ method attribute, if it exists, + otherwise the empty string. + + To enable the methodSignature method, add a 'signature' method attribute + containing a list of lists. See methodSignature's documentation for the + format. Note the type strings should be XML-RPC types, not Python types. + """ + + def __init__(self, parent): + """ + Implement Introspection support for an XMLRPC server. + + @param parent: the XMLRPC server to add Introspection support to. + """ + + XMLRPC.__init__(self) + self._xmlrpc_parent = parent + + def xmlrpc_listMethods(self): + """ + Return a list of the method names implemented by this server. + """ + functions = [] + todo = [(self._xmlrpc_parent, '')] + while todo: + obj, prefix = todo.pop(0) + functions.extend([prefix + name for name in obj._listFunctions()]) + todo.extend([ (obj.getSubHandler(name), + prefix + name + obj.separator) + for name in obj.getSubHandlerPrefixes() ]) + return functions + + xmlrpc_listMethods.signature = [['array']] + + def xmlrpc_methodHelp(self, method): + """ + Return a documentation string describing the use of the given method. + """ + method = self._xmlrpc_parent._getFunction(method) + return (getattr(method, 'help', None) + or getattr(method, '__doc__', None) or '') + + xmlrpc_methodHelp.signature = [['string', 'string']] + + def xmlrpc_methodSignature(self, method): + """ + Return a list of type signatures. + + Each type signature is a list of the form [rtype, type1, type2, ...] + where rtype is the return type and typeN is the type of the Nth + argument. If no signature information is available, the empty + string is returned. + """ + method = self._xmlrpc_parent._getFunction(method) + return getattr(method, 'signature', None) or '' + + xmlrpc_methodSignature.signature = [['array', 'string'], + ['string', 'string']] + + +def addIntrospection(xmlrpc): + """ + Add Introspection support to an XMLRPC server. + + @param xmlrpc: The xmlrpc server to add Introspection support to. + """ + xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc)) + + +class QueryProtocol(http.HTTPClient): + + def connectionMade(self): + self.sendCommand('POST', self.factory.path) + self.sendHeader('User-Agent', 'Twisted/XMLRPClib') + self.sendHeader('Host', self.factory.host) + self.sendHeader('Content-type', 'text/xml') + self.sendHeader('Content-length', str(len(self.factory.payload))) + if self.factory.user: + auth = '%s:%s' % (self.factory.user, self.factory.password) + auth = auth.encode('base64').strip() + self.sendHeader('Authorization', 'Basic %s' % (auth,)) + self.endHeaders() + self.transport.write(self.factory.payload) + + def handleStatus(self, version, status, message): + if status != '200': + self.factory.badStatus(status, message) + + def handleResponse(self, contents): + self.factory.parseResponse(contents) + + +payloadTemplate = """ + +%s +%s + +""" + + +class _QueryFactory(protocol.ClientFactory): + + deferred = None + protocol = QueryProtocol + + def __init__(self, path, host, method, user=None, password=None, + allowNone=False, args=()): + self.path, self.host = path, host + self.user, self.password = user, password + self.payload = payloadTemplate % (method, + xmlrpclib.dumps(args, allow_none=allowNone)) + self.deferred = defer.Deferred() + + def parseResponse(self, contents): + if not self.deferred: + return + try: + response = xmlrpclib.loads(contents)[0][0] + except: + deferred, self.deferred = self.deferred, None + deferred.errback(failure.Failure()) + else: + deferred, self.deferred = self.deferred, None + deferred.callback(response) + + def clientConnectionLost(self, _, reason): + if self.deferred is not None: + deferred, self.deferred = self.deferred, None + deferred.errback(reason) + + clientConnectionFailed = clientConnectionLost + + def badStatus(self, status, message): + deferred, self.deferred = self.deferred, None + deferred.errback(ValueError(status, message)) + + + +class Proxy: + """ + A Proxy for making remote XML-RPC calls. + + Pass the URL of the remote XML-RPC server to the constructor. + + Use proxy.callRemote('foobar', *args) to call remote method + 'foobar' with *args. + + @ivar queryFactory: object returning a factory for XML-RPC protocol. Mainly + useful for tests. + """ + queryFactory = _QueryFactory + + def __init__(self, url, user=None, password=None, allowNone=False): + """ + @type url: C{str} + @param url: The URL to which to post method calls. Calls will be made + over SSL if the scheme is HTTPS. If netloc contains username or + password information, these will be used to authenticate, as long as + the C{user} and C{password} arguments are not specified. + + @type user: C{str} or None + @param user: The username with which to authenticate with the server + when making calls. If specified, overrides any username information + embedded in C{url}. If not specified, a value may be taken from C{url} + if present. + + @type password: C{str} or None + @param password: The password with which to authenticate with the + server when making calls. If specified, overrides any password + information embedded in C{url}. If not specified, a value may be taken + from C{url} if present. + + @type allowNone: C{bool} or None + @param allowNone: allow the use of None values in parameters. It's + passed to the underlying xmlrpclib implementation. Default to False. + """ + scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) + netlocParts = netloc.split('@') + if len(netlocParts) == 2: + userpass = netlocParts.pop(0).split(':') + self.user = userpass.pop(0) + try: + self.password = userpass.pop(0) + except: + self.password = None + else: + self.user = self.password = None + hostport = netlocParts[0].split(':') + self.host = hostport.pop(0) + try: + self.port = int(hostport.pop(0)) + except: + self.port = None + self.path = path + if self.path in ['', None]: + self.path = '/' + self.secure = (scheme == 'https') + if user is not None: + self.user = user + if password is not None: + self.password = password + self.allowNone = allowNone + + def callRemote(self, method, *args): + """ + Call remote XML-RPC C{method} with given arguments. + + @return: a L{defer.Deferred} that will fire with the method response, + or a failure if the method failed. Generally, the failure type will + be L{Fault}, but you can also have an C{IndexError} on some buggy + servers giving empty responses. + """ + factory = self.queryFactory( + self.path, self.host, method, self.user, + self.password, self.allowNone, args) + if self.secure: + from twisted.internet import ssl + reactor.connectSSL(self.host, self.port or 443, + factory, ssl.ClientContextFactory()) + else: + reactor.connectTCP(self.host, self.port or 80, factory) + return factory.deferred + + +__all__ = [ + "XMLRPC", "Handler", "NoSuchFunction", "Proxy", + + "Fault", "Binary", "Boolean", "DateTime"] diff --git a/vendor/Twisted-10.0.0/twisted/words/__init__.py b/vendor/Twisted-10.0.0/twisted/words/__init__.py new file mode 100644 index 000000000000..725af4c93995 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/__init__.py @@ -0,0 +1,10 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""Twisted Words: a Twisted Chat service. +""" + +from twisted.words._version import version +__version__ = version.short() diff --git a/vendor/Twisted-10.0.0/twisted/words/_version.py b/vendor/Twisted-10.0.0/twisted/words/_version.py new file mode 100644 index 000000000000..ccfa3efdcc6e --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/_version.py @@ -0,0 +1,3 @@ +# This is an auto-generated file. Do not edit it. +from twisted.python import versions +version = versions.Version('twisted.words', 10, 0, 0) diff --git a/vendor/Twisted-10.0.0/twisted/words/ewords.py b/vendor/Twisted-10.0.0/twisted/words/ewords.py new file mode 100644 index 000000000000..5aa93450f2da --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/ewords.py @@ -0,0 +1,34 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +"""Exception definitions for Words +""" + +class WordsError(Exception): + def __str__(self): + return self.__class__.__name__ + ': ' + Exception.__str__(self) + +class NoSuchUser(WordsError): + pass + + +class DuplicateUser(WordsError): + pass + + +class NoSuchGroup(WordsError): + pass + + +class DuplicateGroup(WordsError): + pass + + +class AlreadyLoggedIn(WordsError): + pass + +__all__ = [ + 'WordsError', 'NoSuchUser', 'DuplicateUser', + 'NoSuchGroup', 'DuplicateGroup', 'AlreadyLoggedIn', + ] diff --git a/vendor/Twisted-10.0.0/twisted/words/im/__init__.py b/vendor/Twisted-10.0.0/twisted/words/im/__init__.py new file mode 100644 index 000000000000..9f511b7c8d14 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""Instance Messenger, Pan-protocol chat client.""" + +import warnings +warnings.warn("twisted.im will be undergoing a rewrite at some point in the future.") diff --git a/vendor/Twisted-10.0.0/twisted/words/im/baseaccount.py b/vendor/Twisted-10.0.0/twisted/words/im/baseaccount.py new file mode 100644 index 000000000000..4cbe669048e2 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/baseaccount.py @@ -0,0 +1,62 @@ +# -*- Python -*- +# +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# + + +class AccountManager: + """I am responsible for managing a user's accounts. + + That is, remembering what accounts are available, their settings, + adding and removal of accounts, etc. + + @ivar accounts: A collection of available accounts. + @type accounts: mapping of strings to L{Account}s. + """ + def __init__(self): + self.accounts = {} + + def getSnapShot(self): + """A snapshot of all the accounts and their status. + + @returns: A list of tuples, each of the form + (string:accountName, boolean:isOnline, + boolean:autoLogin, string:gatewayType) + """ + data = [] + for account in self.accounts.values(): + data.append((account.accountName, account.isOnline(), + account.autoLogin, account.gatewayType)) + return data + + def isEmpty(self): + return len(self.accounts) == 0 + + def getConnectionInfo(self): + connectioninfo = [] + for account in self.accounts.values(): + connectioninfo.append(account.isOnline()) + return connectioninfo + + def addAccount(self, account): + self.accounts[account.accountName] = account + + def delAccount(self, accountName): + del self.accounts[accountName] + + def connect(self, accountName, chatui): + """ + @returntype: Deferred L{interfaces.IClient} + """ + return self.accounts[accountName].logOn(chatui) + + def disconnect(self, accountName): + pass + #self.accounts[accountName].logOff() - not yet implemented + + def quit(self): + pass + #for account in self.accounts.values(): + # account.logOff() - not yet implemented diff --git a/vendor/Twisted-10.0.0/twisted/words/im/basechat.py b/vendor/Twisted-10.0.0/twisted/words/im/basechat.py new file mode 100644 index 000000000000..4555bef2245c --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/basechat.py @@ -0,0 +1,316 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# + +"""Base classes for Instance Messenger clients.""" + +from twisted.words.im.locals import OFFLINE, ONLINE, AWAY + +class ContactsList: + """A GUI object that displays a contacts list""" + def __init__(self, chatui): + """ + @param chatui: ??? + @type chatui: L{ChatUI} + """ + self.chatui = chatui + self.contacts = {} + self.onlineContacts = {} + self.clients = [] + + def setContactStatus(self, person): + """Inform the user that a person's status has changed. + + @type person: L{Person} + """ + if not self.contacts.has_key(person.name): + self.contacts[person.name] = person + if not self.onlineContacts.has_key(person.name) and \ + (person.status == ONLINE or person.status == AWAY): + self.onlineContacts[person.name] = person + if self.onlineContacts.has_key(person.name) and \ + person.status == OFFLINE: + del self.onlineContacts[person.name] + + def registerAccountClient(self, client): + """Notify the user that an account client has been signed on to. + + @type client: L{Client} + """ + if not client in self.clients: + self.clients.append(client) + + def unregisterAccountClient(self, client): + """Notify the user that an account client has been signed off + or disconnected from. + + @type client: L{Client} + """ + if client in self.clients: + self.clients.remove(client) + + def contactChangedNick(self, person, newnick): + oldname = person.name + if self.contacts.has_key(oldname): + del self.contacts[oldname] + person.name = newnick + self.contacts[newnick] = person + if self.onlineContacts.has_key(oldname): + del self.onlineContacts[oldname] + self.onlineContacts[newnick] = person + + +class Conversation: + """A GUI window of a conversation with a specific person""" + def __init__(self, person, chatui): + """ + @type person: L{Person} + @type chatui: L{ChatUI} + """ + self.chatui = chatui + self.person = person + + def show(self): + """Displays the ConversationWindow""" + raise NotImplementedError("Subclasses must implement this method") + + def hide(self): + """Hides the ConversationWindow""" + raise NotImplementedError("Subclasses must implement this method") + + def sendText(self, text): + """Sends text to the person with whom the user is conversing. + + @returntype: L{Deferred} + """ + self.person.sendMessage(text, None) + + def showMessage(self, text, metadata=None): + """Display a message sent from the person with whom she is conversing + + @type text: string + @type metadata: dict + """ + raise NotImplementedError("Subclasses must implement this method") + + def contactChangedNick(self, person, newnick): + """Change a person's name. + + @type person: L{Person} + @type newnick: string + """ + self.person.name = newnick + + +class GroupConversation: + """A conversation with a group of people.""" + def __init__(self, group, chatui): + """ + @type group: L{Group} + @param chatui: ??? + @type chatui: L{ChatUI} + """ + self.chatui = chatui + self.group = group + self.members = [] + + def show(self): + """Displays the GroupConversationWindow.""" + raise NotImplementedError("Subclasses must implement this method") + + def hide(self): + """Hides the GroupConversationWindow.""" + raise NotImplementedError("Subclasses must implement this method") + + def sendText(self, text): + """Sends text to the group. + + @type text: string + @returntype: L{Deferred} + """ + self.group.sendGroupMessage(text, None) + + def showGroupMessage(self, sender, text, metadata=None): + """Displays to the user a message sent to this group from the given sender + @type sender: string (XXX: Not Person?) + @type text: string + @type metadata: dict + """ + raise NotImplementedError("Subclasses must implement this method") + + def setGroupMembers(self, members): + """Sets the list of members in the group and displays it to the user + """ + self.members = members + + def setTopic(self, topic, author): + """Displays the topic (from the server) for the group conversation window + + @type topic: string + @type author: string (XXX: Not Person?) + """ + raise NotImplementedError("Subclasses must implement this method") + + def memberJoined(self, member): + """Adds the given member to the list of members in the group conversation + and displays this to the user + + @type member: string (XXX: Not Person?) + """ + if not member in self.members: + self.members.append(member) + + def memberChangedNick(self, oldnick, newnick): + """Changes the oldnick in the list of members to newnick and displays this + change to the user + + @type oldnick: string + @type newnick: string + """ + if oldnick in self.members: + self.members.remove(oldnick) + self.members.append(newnick) + #self.chatui.contactChangedNick(oldnick, newnick) + + def memberLeft(self, member): + """Deletes the given member from the list of members in the group + conversation and displays the change to the user + + @type member: string + """ + if member in self.members: + self.members.remove(member) + + +class ChatUI: + """A GUI chat client""" + def __init__(self): + self.conversations = {} # cache of all direct windows + self.groupConversations = {} # cache of all group windows + self.persons = {} # keys are (name, client) + self.groups = {} # cache of all groups + self.onlineClients = [] # list of message sources currently online + self.contactsList = ContactsList(self) + + def registerAccountClient(self, client): + """Notifies user that an account has been signed on to. + + @type client: L{Client} + @returns: client, so that I may be used in a callback chain + """ + print "signing onto", client.accountName + self.onlineClients.append(client) + self.contactsList.registerAccountClient(client) + return client + + def unregisterAccountClient(self, client): + """Notifies user that an account has been signed off or disconnected + + @type client: L{Client} + """ + print "signing off from", client.accountName + self.onlineClients.remove(client) + self.contactsList.unregisterAccountClient(client) + + def getContactsList(self): + """ + @returntype: L{ContactsList} + """ + return self.contactsList + + def getConversation(self, person, Class=Conversation, stayHidden=0): + """For the given person object, returns the conversation window + or creates and returns a new conversation window if one does not exist. + + @type person: L{Person} + @type Class: L{Conversation} class + @type stayHidden: boolean + + @returntype: L{Conversation} + """ + conv = self.conversations.get(person) + if not conv: + conv = Class(person, self) + self.conversations[person] = conv + if stayHidden: + conv.hide() + else: + conv.show() + return conv + + def getGroupConversation(self,group,Class=GroupConversation,stayHidden=0): + """For the given group object, returns the group conversation window or + creates and returns a new group conversation window if it doesn't exist + + @type group: L{Group} + @type Class: L{Conversation} class + @type stayHidden: boolean + + @returntype: L{GroupConversation} + """ + conv = self.groupConversations.get(group) + if not conv: + conv = Class(group, self) + self.groupConversations[group] = conv + if stayHidden: + conv.hide() + else: + conv.show() + return conv + + def getPerson(self, name, client): + """For the given name and account client, returns the instance of the + AbstractPerson subclass, or creates and returns a new AbstractPerson + subclass of the type Class + + @type name: string + @type client: L{Client} + + @returntype: L{Person} + """ + account = client.account + p = self.persons.get((name, account)) + if not p: + p = account.getPerson(name) + self.persons[name, account] = p + return p + + def getGroup(self, name, client): + """For the given name and account client, returns the instance of the + AbstractGroup subclass, or creates and returns a new AbstractGroup + subclass of the type Class + + @type name: string + @type client: L{Client} + + @returntype: L{Group} + """ + # I accept 'client' instead of 'account' in my signature for + # backwards compatibility. (Groups changed to be Account-oriented + # in CVS revision 1.8.) + account = client.account + g = self.groups.get((name, account)) + if not g: + g = account.getGroup(name) + self.groups[name, account] = g + return g + + def contactChangedNick(self, oldnick, newnick): + """For the given person, changes the person's name to newnick, and + tells the contact list and any conversation windows with that person + to change as well. + + @type oldnick: string + @type newnick: string + """ + if self.persons.has_key((person.name, person.account)): + conv = self.conversations.get(person) + if conv: + conv.contactChangedNick(person, newnick) + + self.contactsList.contactChangedNick(person, newnick) + + del self.persons[person.name, person.account] + person.name = newnick + self.persons[person.name, person.account] = person diff --git a/vendor/Twisted-10.0.0/twisted/words/im/basesupport.py b/vendor/Twisted-10.0.0/twisted/words/im/basesupport.py new file mode 100644 index 000000000000..1b2a2a946ad5 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/basesupport.py @@ -0,0 +1,270 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# + +"""Instance Messenger base classes for protocol support. + +You will find these useful if you're adding a new protocol to IM. +""" + +# Abstract representation of chat "model" classes + +from twisted.words.im.locals import ONLINE, OFFLINE, OfflineError +from twisted.words.im import interfaces + +from twisted.internet.protocol import Protocol + +from twisted.python.reflect import prefixedMethods +from twisted.persisted import styles + +from twisted.internet import error + +class AbstractGroup: + def __init__(self, name, account): + self.name = name + self.account = account + + def getGroupCommands(self): + """finds group commands + + these commands are methods on me that start with imgroup_; they are + called with no arguments + """ + return prefixedMethods(self, "imgroup_") + + def getTargetCommands(self, target): + """finds group commands + + these commands are methods on me that start with imgroup_; they are + called with a user present within this room as an argument + + you may want to override this in your group in order to filter for + appropriate commands on the given user + """ + return prefixedMethods(self, "imtarget_") + + def join(self): + if not self.account.client: + raise OfflineError + self.account.client.joinGroup(self.name) + + def leave(self): + if not self.account.client: + raise OfflineError + self.account.client.leaveGroup(self.name) + + def __repr__(self): + return '<%s %r>' % (self.__class__, self.name) + + def __str__(self): + return '%s@%s' % (self.name, self.account.accountName) + +class AbstractPerson: + def __init__(self, name, baseAccount): + self.name = name + self.account = baseAccount + self.status = OFFLINE + + def getPersonCommands(self): + """finds person commands + + these commands are methods on me that start with imperson_; they are + called with no arguments + """ + return prefixedMethods(self, "imperson_") + + def getIdleTime(self): + """ + Returns a string. + """ + return '--' + + def __repr__(self): + return '<%s %r/%s>' % (self.__class__, self.name, self.status) + + def __str__(self): + return '%s@%s' % (self.name, self.account.accountName) + +class AbstractClientMixin: + """Designed to be mixed in to a Protocol implementing class. + + Inherit from me first. + + @ivar _logonDeferred: Fired when I am done logging in. + """ + def __init__(self, account, chatui, logonDeferred): + for base in self.__class__.__bases__: + if issubclass(base, Protocol): + self.__class__._protoBase = base + break + else: + pass + self.account = account + self.chat = chatui + self._logonDeferred = logonDeferred + + def connectionMade(self): + self._protoBase.connectionMade(self) + + def connectionLost(self, reason): + self.account._clientLost(self, reason) + self.unregisterAsAccountClient() + return self._protoBase.connectionLost(self, reason) + + def unregisterAsAccountClient(self): + """Tell the chat UI that I have `signed off'. + """ + self.chat.unregisterAccountClient(self) + + +class AbstractAccount(styles.Versioned): + """Base class for Accounts. + + I am the start of an implementation of L{IAccount}, I + implement L{isOnline} and most of L{logOn}, though you'll need to implement + L{_startLogOn} in a subclass. + + @cvar _groupFactory: A Callable that will return a L{IGroup} appropriate + for this account type. + @cvar _personFactory: A Callable that will return a L{IPerson} appropriate + for this account type. + + @type _isConnecting: boolean + @ivar _isConnecting: Whether I am in the process of establishing a + connection to the server. + @type _isOnline: boolean + @ivar _isOnline: Whether I am currently on-line with the server. + + @ivar accountName: + @ivar autoLogin: + @ivar username: + @ivar password: + @ivar host: + @ivar port: + """ + + _isOnline = 0 + _isConnecting = 0 + client = None + + _groupFactory = AbstractGroup + _personFactory = AbstractPerson + + persistanceVersion = 2 + + def __init__(self, accountName, autoLogin, username, password, host, port): + self.accountName = accountName + self.autoLogin = autoLogin + self.username = username + self.password = password + self.host = host + self.port = port + + self._groups = {} + self._persons = {} + + def upgrateToVersion2(self): + # Added in CVS revision 1.16. + for k in ('_groups', '_persons'): + if not hasattr(self, k): + setattr(self, k, {}) + + def __getstate__(self): + state = styles.Versioned.__getstate__(self) + for k in ('client', '_isOnline', '_isConnecting'): + try: + del state[k] + except KeyError: + pass + return state + + def isOnline(self): + return self._isOnline + + def logOn(self, chatui): + """Log on to this account. + + Takes care to not start a connection if a connection is + already in progress. You will need to implement + L{_startLogOn} for this to work, and it would be a good idea + to override L{_loginFailed} too. + + @returntype: Deferred L{interfaces.IClient} + """ + if (not self._isConnecting) and (not self._isOnline): + self._isConnecting = 1 + d = self._startLogOn(chatui) + d.addCallback(self._cb_logOn) + # if chatui is not None: + # (I don't particularly like having to pass chatUI to this function, + # but we haven't factored it out yet.) + d.addCallback(chatui.registerAccountClient) + d.addErrback(self._loginFailed) + return d + else: + raise error.ConnectError("Connection in progress") + + def getGroup(self, name): + """Group factory. + + @param name: Name of the group on this account. + @type name: string + """ + group = self._groups.get(name) + if group is None: + group = self._groupFactory(name, self) + self._groups[name] = group + return group + + def getPerson(self, name): + """Person factory. + + @param name: Name of the person on this account. + @type name: string + """ + person = self._persons.get(name) + if person is None: + person = self._personFactory(name, self) + self._persons[name] = person + return person + + def _startLogOn(self, chatui): + """Start the sign on process. + + Factored out of L{logOn}. + + @returntype: Deferred L{interfaces.IClient} + """ + raise NotImplementedError() + + def _cb_logOn(self, client): + self._isConnecting = 0 + self._isOnline = 1 + self.client = client + return client + + def _loginFailed(self, reason): + """Errorback for L{logOn}. + + @type reason: Failure + + @returns: I{reason}, for further processing in the callback chain. + @returntype: Failure + """ + self._isConnecting = 0 + self._isOnline = 0 # just in case + return reason + + def _clientLost(self, client, reason): + self.client = None + self._isConnecting = 0 + self._isOnline = 0 + return reason + + def __repr__(self): + return "<%s: %s (%s@%s:%s)>" % (self.__class__, + self.accountName, + self.username, + self.host, + self.port) diff --git a/vendor/Twisted-10.0.0/twisted/words/im/instancemessenger.glade b/vendor/Twisted-10.0.0/twisted/words/im/instancemessenger.glade new file mode 100644 index 000000000000..33ffaa277980 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/instancemessenger.glade @@ -0,0 +1,3165 @@ + + + + + InstanceMessenger + instancemessenger + + src + pixmaps + C + True + True + True + + + + GtkWindow + UnseenConversationWindow + False + Unseen Conversation Window + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkVBox + ConversationWidget + False + 0 + + + GtkVPaned + vpaned1 + 10 + 6 + 0 + + 0 + True + True + + + + GtkScrolledWindow + scrolledwindow10 + GTK_POLICY_NEVER + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + False + True + + + + GtkText + ConversationOutput + False + + + + + + GtkScrolledWindow + scrolledwindow11 + GTK_POLICY_NEVER + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + True + False + + + + GtkText + ConversationMessageEntry + True + True + + key_press_event + handle_key_press_event + Tue, 29 Jan 2002 12:42:58 GMT + + True + + + + + + + GtkHBox + hbox9 + True + 0 + + 3 + False + True + + + + GtkButton + button42 + True + + GTK_RELIEF_NORMAL + + 3 + True + True + + + + + GtkButton + AddRemoveContact + True + + GTK_RELIEF_NORMAL + + 3 + True + True + + + + + GtkButton + CloseContact + True + + GTK_RELIEF_NORMAL + + 3 + True + True + + + + + + + + GtkWindow + MainIMWindow + + destroy + on_MainIMWindow_destroy + Sun, 21 Jul 2002 08:16:08 GMT + + Instance Messenger + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + True + True + False + + + GtkNotebook + ContactsNotebook + True + + key_press_event + on_ContactsWidget_key_press_event + Tue, 07 May 2002 03:02:33 GMT + + True + True + GTK_POS_TOP + False + 2 + 2 + False + + + GtkVBox + vbox11 + False + 0 + + + GtkLabel + OnlineCount + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + 0 + False + False + + + + + GtkScrolledWindow + scrolledwindow14 + GTK_POLICY_AUTOMATIC + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCTree + OnlineContactsTree + True + + tree_select_row + on_OnlineContactsTree_tree_select_row + Tue, 07 May 2002 03:06:32 GMT + + + select_row + on_OnlineContactsTree_select_row + Tue, 07 May 2002 04:36:10 GMT + + 4 + 109,35,23,80 + GTK_SELECTION_SINGLE + True + GTK_SHADOW_IN + + + GtkLabel + CTree:title + label77 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CTree:title + label78 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CTree:title + label79 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CTree:title + label80 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkVBox + vbox30 + False + 2 + + 1 + False + True + + + + GtkEntry + ContactNameEntry + True + + activate + on_ContactNameEntry_activate + Tue, 07 May 2002 04:07:25 GMT + + True + True + 0 + + + 0 + False + False + + + + + GtkOptionMenu + AccountsListPopup + True + Nothing +To +Speak +Of + + 1 + + 0 + False + False + + + + + GtkHBox + hbox7 + False + 0 + + 0 + True + True + + + + GtkButton + PlainSendIM + True + + clicked + on_PlainSendIM_clicked + Tue, 29 Jan 2002 03:17:35 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + GtkButton + PlainGetInfo + True + + clicked + on_PlainGetInfo_clicked + Tue, 07 May 2002 04:06:59 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + GtkButton + PlainJoinChat + True + + clicked + on_PlainJoinChat_clicked + Tue, 29 Jan 2002 13:04:49 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + GtkButton + PlainGoAway + True + + clicked + on_PlainGoAway_clicked + Tue, 07 May 2002 04:06:53 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + + GtkHBox + hbox8 + False + 0 + + 0 + True + True + + + + GtkButton + AddContactButton + True + + clicked + on_AddContactButton_clicked + Tue, 07 May 2002 04:06:33 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + GtkButton + RemoveContactButton + True + + clicked + on_RemoveContactButton_clicked + Tue, 07 May 2002 04:06:28 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + + + + GtkLabel + Notebook:tab + label35 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkVBox + vbox14 + False + 0 + + + GtkScrolledWindow + OfflineContactsScroll + GTK_POLICY_AUTOMATIC + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + OfflineContactsList + True + + select_row + on_OfflineContactsList_select_row + Tue, 07 May 2002 03:00:07 GMT + + 4 + 66,80,80,80 + GTK_SELECTION_SINGLE + True + GTK_SHADOW_IN + + + GtkLabel + CList:title + label41 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label42 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label43 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label44 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + + GtkLabel + Notebook:tab + label36 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkVBox + AccountManWidget + False + 0 + + + GtkScrolledWindow + scrolledwindow12 + GTK_POLICY_AUTOMATIC + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + accountsList + True + 4 + 80,36,34,80 + GTK_SELECTION_SINGLE + True + GTK_SHADOW_IN + + + GtkLabel + CList:title + label45 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label46 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label47 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label48 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkTable + table5 + 2 + 3 + False + 0 + 0 + + 3 + False + True + + + + GtkButton + NewAccountButton + True + True + + clicked + on_NewAccountButton_clicked + Sun, 27 Jan 2002 10:32:20 GMT + + + GTK_RELIEF_NORMAL + + 0 + 1 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkButton + button46 + False + True + + GTK_RELIEF_NORMAL + + 1 + 2 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkButton + LogOnButton + True + True + True + True + + clicked + on_LogOnButton_clicked + Mon, 28 Jan 2002 04:06:23 GMT + + + GTK_RELIEF_NORMAL + + 2 + 3 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkButton + DeleteAccountButton + True + True + + clicked + on_DeleteAccountButton_clicked + Mon, 28 Jan 2002 00:18:22 GMT + + + GTK_RELIEF_NORMAL + + 2 + 3 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkButton + ConsoleButton + True + True + + clicked + on_ConsoleButton_clicked + Mon, 29 Apr 2002 09:13:32 GMT + + + GTK_RELIEF_NORMAL + + 1 + 2 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkButton + button75 + True + True + + GTK_RELIEF_NORMAL + + 0 + 1 + 1 + 2 + 0 + 0 + True + True + False + False + True + True + + + + + + + GtkLabel + Notebook:tab + label107 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkWindow + UnseenGroupWindow + False + Unseen Group Window + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkVBox + GroupChatBox + False + 0 + + + GtkHBox + hbox5 + False + 0 + + 0 + False + True + + + + GtkEntry + TopicEntry + True + + activate + on_TopicEntry_activate + Sat, 23 Feb 2002 02:57:41 GMT + + + focus_out_event + on_TopicEntry_focus_out_event + Sun, 21 Jul 2002 09:36:54 GMT + + True + True + 0 + <TOPIC NOT RECEIVED> + + 0 + True + True + + + + + GtkLabel + AuthorLabel + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + 0 + False + False + + + + + GtkButton + HideButton + True + + clicked + on_HideButton_clicked + Tue, 29 Jan 2002 14:10:00 GMT + + + GTK_RELIEF_NORMAL + + 0 + False + False + + + + + + GtkVPaned + vpaned2 + 10 + 6 + 0 + + 0 + True + True + + + + GtkHPaned + GroupHPaned + 6 + 6 + + False + True + + + + GtkScrolledWindow + scrolledwindow4 + GTK_POLICY_NEVER + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + False + True + + + + GtkText + GroupOutput + True + False + + + + + + GtkVBox + actionvbox + 110 + False + 1 + + True + False + + + + GtkScrolledWindow + scrolledwindow5 + GTK_POLICY_NEVER + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + ParticipantList + True + + select_row + on_ParticipantList_select_row + Sat, 13 Jul 2002 08:11:12 GMT + + + unselect_row + on_ParticipantList_unselect_row + Sat, 13 Jul 2002 08:23:25 GMT + + 1 + 80 + GTK_SELECTION_SINGLE + False + GTK_SHADOW_IN + + + GtkLabel + CList:title + label18 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkFrame + frame10 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + False + False + + + + GtkVBox + GroupActionsBox + False + 0 + + + Placeholder + + + + Placeholder + + + + Placeholder + + + + + + GtkFrame + PersonFrame + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + False + False + + + + GtkVBox + PersonActionsBox + False + 0 + + + Placeholder + + + + Placeholder + + + + Placeholder + + + + + + + + GtkHBox + hbox6 + False + 0 + + True + False + + + + GtkLabel + NickLabel + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + 4 + False + False + + + + + GtkScrolledWindow + scrolledwindow9 + GTK_POLICY_NEVER + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkText + GroupInput + True + True + + key_press_event + handle_key_press_event + Tue, 29 Jan 2002 12:41:03 GMT + + True + + + + + + + + + + GtkWindow + NewAccountWindow + 3 + False + + destroy + on_NewAccountWindow_destroy + Sun, 27 Jan 2002 10:35:19 GMT + + New Account + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + True + + + GtkVBox + vbox17 + False + 0 + + + GtkHBox + hbox11 + False + 0 + + 3 + False + True + + + + GtkLabel + label49 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + 0 + False + True + + + + + GtkOptionMenu + GatewayOptionMenu + True + Twisted (Perspective Broker) +Internet Relay Chat +AIM (TOC) +AIM (OSCAR) + + 0 + + 4 + True + True + + + + + + GtkFrame + GatewayFrame + 3 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + True + True + + + + Placeholder + + + + + GtkFrame + frame2 + 3 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + False + True + + + + GtkTable + table1 + 3 + 2 + 2 + False + 0 + 0 + + + GtkCheckButton + AutoLogin + True + + False + True + + 1 + 2 + 0 + 1 + 0 + 0 + True + True + False + False + True + False + + + + + GtkEntry + accountName + True + True + True + 0 + + + 1 + 2 + 1 + 2 + 0 + 0 + True + True + False + False + True + False + + + + + GtkLabel + label50 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 0 + 1 + 0 + 0 + False + True + False + False + True + True + + + + + GtkLabel + label51 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 1 + 2 + 0 + 0 + False + True + False + False + True + True + + + + + + + GtkHButtonBox + hbuttonbox2 + GTK_BUTTONBOX_SPREAD + 30 + 85 + 27 + 7 + 0 + + 0 + False + True + + + + GtkButton + button50 + True + True + + clicked + createAccount + Sun, 27 Jan 2002 11:25:05 GMT + + + GTK_RELIEF_NORMAL + + + + GtkButton + button51 + True + True + + clicked + destroyMe + Sun, 27 Jan 2002 11:27:12 GMT + + + GTK_RELIEF_NORMAL + + + + + + + GtkWindow + PBAccountWindow + False + PB Account Window + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkVBox + PBAccountWidget + 4 + False + 0 + + + GtkTable + table3 + 4 + 2 + False + 0 + 0 + + 0 + False + True + + + + GtkEntry + hostname + True + True + True + 0 + twistedmatrix.com + + 1 + 2 + 2 + 3 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + identity + True + True + + changed + on_identity_changed + Sun, 27 Jan 2002 11:52:17 GMT + + True + True + 0 + + + 1 + 2 + 0 + 1 + 0 + 0 + True + False + False + False + True + False + + + + + GtkLabel + label52 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 2 + 3 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label54 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkEntry + password + True + True + False + 0 + + + 1 + 2 + 1 + 2 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + portno + True + True + True + 0 + 8787 + + 1 + 2 + 3 + 4 + 0 + 0 + True + False + False + False + True + False + + + + + GtkLabel + label55 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label53 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 3 + 4 + 0 + 0 + False + False + False + False + True + False + + + + + + GtkFrame + frame3 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + True + True + + + + GtkVBox + vbox19 + 3 + False + 0 + + + GtkScrolledWindow + scrolledwindow13 + GTK_POLICY_AUTOMATIC + GTK_POLICY_ALWAYS + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + serviceList + True + + select_row + on_serviceList_select_row + Sun, 27 Jan 2002 12:04:38 GMT + + 3 + 80,80,80 + GTK_SELECTION_SINGLE + True + GTK_SHADOW_IN + + + GtkLabel + CList:title + label60 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label61 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label62 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkTable + table4 + 3 + 2 + False + 0 + 0 + + 0 + False + True + + + + GtkLabel + label63 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 2 + 3 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label59 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkCombo + serviceCombo + False + True + False + True + False + twisted.words +twisted.reality +twisted.manhole + + + 1 + 2 + 0 + 1 + 0 + 0 + True + False + False + False + True + False + + + + GtkEntry + GtkCombo:entry + serviceType + True + + changed + on_serviceType_changed + Sun, 27 Jan 2002 11:49:07 GMT + + True + True + 0 + twisted.words + + + + + GtkLabel + label64 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkEntry + serviceName + True + True + True + 0 + + + 1 + 2 + 1 + 2 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + perspectiveName + True + True + True + 0 + + + 1 + 2 + 2 + 3 + 0 + 0 + True + False + False + False + True + False + + + + + + GtkHBox + hbox13 + False + 0 + + 0 + False + True + + + + GtkButton + button53 + True + + clicked + addPerspective + Mon, 28 Jan 2002 01:07:15 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + GtkButton + button54 + True + + clicked + removePerspective + Sun, 27 Jan 2002 11:34:36 GMT + + + GTK_RELIEF_NORMAL + + 0 + True + False + + + + + + + + + + GtkWindow + IRCAccountWindow + IRC Account Window + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkTable + IRCAccountWidget + 5 + 2 + False + 0 + 0 + + + GtkLabel + label65 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label66 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label67 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 2 + 3 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label68 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 3 + 4 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label69 + + GTK_JUSTIFY_RIGHT + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 4 + 5 + 0 + 0 + False + False + False + False + True + False + + + + + GtkEntry + ircNick + True + True + True + 0 + + + 1 + 2 + 0 + 1 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + ircServer + True + True + True + 0 + + + 1 + 2 + 1 + 2 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + ircPort + True + True + True + 0 + 6667 + + 1 + 2 + 2 + 3 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + ircChannels + True + True + True + 0 + + + 1 + 2 + 3 + 4 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + ircPassword + True + True + True + 0 + + + 1 + 2 + 4 + 5 + 0 + 0 + True + False + False + False + True + False + + + + + + + GtkWindow + TOCAccountWindow + TOC Account Window + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkTable + TOCAccountWidget + 4 + 2 + False + 0 + 0 + + + GtkLabel + label70 + + GTK_JUSTIFY_CENTER + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 0 + 1 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label71 + + GTK_JUSTIFY_CENTER + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 1 + 2 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label72 + + GTK_JUSTIFY_CENTER + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 2 + 3 + 0 + 0 + False + False + False + False + True + False + + + + + GtkLabel + label73 + + GTK_JUSTIFY_CENTER + False + 0 + 0.5 + 0 + 0 + + 0 + 1 + 3 + 4 + 0 + 0 + False + False + False + False + True + False + + + + + GtkEntry + TOCName + True + True + True + 0 + + + 1 + 2 + 0 + 1 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + TOCPass + True + True + False + 0 + + + 1 + 2 + 1 + 2 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + TOCHost + True + True + True + 0 + toc.oscar.aol.com + + 1 + 2 + 2 + 3 + 0 + 0 + True + False + False + False + True + False + + + + + GtkEntry + TOCPort + True + True + True + 0 + 9898 + + 1 + 2 + 3 + 4 + 0 + 0 + True + False + False + False + True + False + + + + + + + GtkWindow + JoinGroupWindow + 5 + False + Group to Join + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkVBox + vbox20 + False + 0 + + + GtkOptionMenu + AccountSelector + True + None +In +Particular + + 0 + + 0 + False + False + + + + + GtkHBox + hbox15 + False + 5 + + 0 + True + True + + + + GtkEntry + GroupNameEntry + True + True + + activate + on_GroupJoinButton_clicked + Tue, 29 Jan 2002 13:27:18 GMT + + True + True + 0 + + + 0 + True + True + + + + + GtkButton + GroupJoinButton + True + True + True + + clicked + on_GroupJoinButton_clicked + Tue, 29 Jan 2002 13:16:50 GMT + + + GTK_RELIEF_NORMAL + + 0 + False + False + + + + + + + + GtkWindow + UnifiedWindow + Twisted Instance Messenger + GTK_WINDOW_TOPLEVEL + GTK_WIN_POS_NONE + False + False + True + False + + + GtkVBox + vbox25 + False + 0 + + + GtkHBox + hbox28 + False + 0 + + 0 + False + True + + + + GtkButton + button74 + True + + GTK_RELIEF_NORMAL + + 0 + False + False + + + + + GtkEntry + entry3 + True + True + True + 0 + + + 0 + True + True + + + + + GtkOptionMenu + optionmenu3 + List +Of +Online +Accounts + + 0 + + 0 + False + False + + + + + GtkOptionMenu + optionmenu4 + True + Contact +Person +Group +Account + + 0 + + 0 + False + False + + + + + + GtkHPaned + hpaned1 + 10 + 6 + 0 + + 0 + True + True + + + + GtkVBox + vbox26 + False + 0 + + True + False + + + + GtkFrame + frame7 + 2 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + True + True + + + + GtkVBox + vbox27 + False + 0 + + + GtkScrolledWindow + scrolledwindow18 + GTK_POLICY_AUTOMATIC + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + clist4 + 4 + 18,25,25,80 + GTK_SELECTION_SINGLE + False + GTK_SHADOW_IN + + + GtkLabel + CList:title + label95 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label96 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label97 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label98 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkHBox + hbox23 + True + 2 + + 0 + True + True + + + + GtkButton + button65 + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button66 + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button67 + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + + + + GtkFrame + frame8 + 2 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + True + True + + + + GtkVBox + vbox28 + False + 0 + + + GtkScrolledWindow + scrolledwindow19 + GTK_POLICY_AUTOMATIC + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + clist5 + 3 + 18,17,80 + GTK_SELECTION_SINGLE + False + GTK_SHADOW_IN + + + GtkLabel + CList:title + label99 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label100 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label101 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkHBox + hbox24 + True + 2 + + 0 + False + True + + + + GtkButton + button68 + True + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button69 + True + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button70 + True + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button71 + True + + GTK_RELIEF_NORMAL + + 0 + False + False + + + + + + + + GtkFrame + frame9 + 2 + + 0 + GTK_SHADOW_ETCHED_IN + + 0 + True + True + + + + GtkVBox + vbox29 + False + 0 + + + GtkScrolledWindow + scrolledwindow20 + GTK_POLICY_AUTOMATIC + GTK_POLICY_AUTOMATIC + GTK_UPDATE_CONTINUOUS + GTK_UPDATE_CONTINUOUS + + 0 + True + True + + + + GtkCList + clist6 + 3 + 21,75,80 + GTK_SELECTION_SINGLE + False + GTK_SHADOW_IN + + + GtkLabel + CList:title + label102 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label103 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + GtkLabel + CList:title + label104 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + + + + + GtkHBox + hbox27 + True + 2 + + 0 + False + True + + + + GtkButton + button72 + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + GtkButton + button73 + + GTK_RELIEF_NORMAL + + 0 + True + True + + + + + + + + GtkHSeparator + hseparator2 + + 0 + True + True + + + + + GtkLabel + label105 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 3 + + 0 + False + False + + + + + + GtkLabel + label106 + + GTK_JUSTIFY_CENTER + False + 0.5 + 0.5 + 0 + 0 + + True + True + + + + + + + diff --git a/vendor/Twisted-10.0.0/twisted/words/im/interfaces.py b/vendor/Twisted-10.0.0/twisted/words/im/interfaces.py new file mode 100644 index 000000000000..b616674a5a5e --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/interfaces.py @@ -0,0 +1,364 @@ +# -*- Python -*- +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Pan-protocol chat client. +""" + +from zope.interface import Interface, Attribute + +from twisted.words.im import locals + +# (Random musings, may not reflect on current state of code:) +# +# Accounts have Protocol components (clients) +# Persons have Conversation components +# Groups have GroupConversation components +# Persons and Groups are associated with specific Accounts +# At run-time, Clients/Accounts are slaved to a User Interface +# (Note: User may be a bot, so don't assume all UIs are built on gui toolkits) + + +class IAccount(Interface): + """ + I represent a user's account with a chat service. + """ + + client = Attribute('The L{IClient} currently connecting to this account, if any.') + gatewayType = Attribute('A C{str} that identifies the protocol used by this account.') + + def __init__(accountName, autoLogin, username, password, host, port): + """ + @type accountName: string + @param accountName: A name to refer to the account by locally. + @type autoLogin: boolean + @type username: string + @type password: string + @type host: string + @type port: integer + """ + + def isOnline(): + """ + Am I online? + + @rtype: boolean + """ + + def logOn(chatui): + """ + Go on-line. + + @type chatui: Implementor of C{IChatUI} + + @rtype: L{Deferred} L{Client} + """ + + def logOff(): + """ + Sign off. + """ + + def getGroup(groupName): + """ + @rtype: L{Group} + """ + + def getPerson(personName): + """ + @rtype: L{Person} + """ + +class IClient(Interface): + + account = Attribute('The L{IAccount} I am a Client for') + + def __init__(account, chatui, logonDeferred): + """ + @type account: L{IAccount} + @type chatui: L{IChatUI} + @param logonDeferred: Will be called back once I am logged on. + @type logonDeferred: L{Deferred} + """ + + def joinGroup(groupName): + """ + @param groupName: The name of the group to join. + @type groupName: string + """ + + def leaveGroup(groupName): + """ + @param groupName: The name of the group to leave. + @type groupName: string + """ + + def getGroupConversation(name, hide=0): + pass + + def getPerson(name): + pass + + +class IPerson(Interface): + + def __init__(name, account): + """ + Initialize me. + + @param name: My name, as the server knows me. + @type name: string + @param account: The account I am accessed through. + @type account: I{Account} + """ + + def isOnline(): + """ + Am I online right now? + + @rtype: boolean + """ + + def getStatus(): + """ + What is my on-line status? + + @return: L{locals.StatusEnum} + """ + + def getIdleTime(): + """ + @rtype: string (XXX: How about a scalar?) + """ + + def sendMessage(text, metadata=None): + """ + Send a message to this person. + + @type text: string + @type metadata: dict + """ + + +class IGroup(Interface): + """ + A group which you may have a conversation with. + + Groups generally have a loosely-defined set of members, who may + leave and join at any time. + """ + + name = Attribute('My C{str} name, as the server knows me.') + account = Attribute('The L{Account} I am accessed through.') + + def __init__(name, account): + """ + Initialize me. + + @param name: My name, as the server knows me. + @type name: str + @param account: The account I am accessed through. + @type account: L{Account} + """ + + def setTopic(text): + """ + Set this Groups topic on the server. + + @type text: string + """ + + def sendGroupMessage(text, metadata=None): + """ + Send a message to this group. + + @type text: str + + @type metadata: dict + @param metadata: Valid keys for this dictionary include: + + - C{'style'}: associated with one of: + - C{'emote'}: indicates this is an action + """ + + def join(): + """ + Join this group. + """ + + def leave(): + """ + Depart this group. + """ + + +class IConversation(Interface): + """ + A conversation with a specific person. + """ + + def __init__(person, chatui): + """ + @type person: L{IPerson} + """ + + def show(): + """ + doesn't seem like it belongs in this interface. + """ + + def hide(): + """ + nor this neither. + """ + + def sendText(text, metadata): + pass + + def showMessage(text, metadata): + pass + + def changedNick(person, newnick): + """ + @param person: XXX Shouldn't this always be Conversation.person? + """ + +class IGroupConversation(Interface): + + def show(): + """ + doesn't seem like it belongs in this interface. + """ + + def hide(): + """ + nor this neither. + """ + + def sendText(text, metadata): + pass + + def showGroupMessage(sender, text, metadata): + pass + + def setGroupMembers(members): + """ + Sets the list of members in the group and displays it to the user. + """ + + def setTopic(topic, author): + """ + Displays the topic (from the server) for the group conversation window. + + @type topic: string + @type author: string (XXX: Not Person?) + """ + + def memberJoined(member): + """ + Adds the given member to the list of members in the group conversation + and displays this to the user, + + @type member: string (XXX: Not Person?) + """ + + def memberChangedNick(oldnick, newnick): + """ + Changes the oldnick in the list of members to C{newnick} and displays this + change to the user, + + @type oldnick: string (XXX: Not Person?) + @type newnick: string + """ + + def memberLeft(member): + """ + Deletes the given member from the list of members in the group + conversation and displays the change to the user. + + @type member: string (XXX: Not Person?) + """ + + +class IChatUI(Interface): + + def registerAccountClient(client): + """ + Notifies user that an account has been signed on to. + + @type client: L{Client} + """ + + def unregisterAccountClient(client): + """ + Notifies user that an account has been signed off or disconnected. + + @type client: L{Client} + """ + + def getContactsList(): + """ + @rtype: L{ContactsList} + """ + + # WARNING: You'll want to be polymorphed into something with + # intrinsic stoning resistance before continuing. + + def getConversation(person, Class, stayHidden=0): + """ + For the given person object, returns the conversation window + or creates and returns a new conversation window if one does not exist. + + @type person: L{Person} + @type Class: L{Conversation} class + @type stayHidden: boolean + + @rtype: L{Conversation} + """ + + def getGroupConversation(group, Class, stayHidden=0): + """ + For the given group object, returns the group conversation window or + creates and returns a new group conversation window if it doesn't exist. + + @type group: L{Group} + @type Class: L{Conversation} class + @type stayHidden: boolean + + @rtype: L{GroupConversation} + """ + + def getPerson(name, client): + """ + Get a Person for a client. + + Duplicates L{IAccount.getPerson}. + + @type name: string + @type client: L{Client} + + @rtype: L{Person} + """ + + def getGroup(name, client): + """ + Get a Group for a client. + + Duplicates L{IAccount.getGroup}. + + @type name: string + @type client: L{Client} + + @rtype: L{Group} + """ + + def contactChangedNick(oldnick, newnick): + """ + For the given person, changes the person's name to newnick, and + tells the contact list and any conversation windows with that person + to change as well. + + @type oldnick: string + @type newnick: string + """ diff --git a/vendor/Twisted-10.0.0/twisted/words/im/ircsupport.py b/vendor/Twisted-10.0.0/twisted/words/im/ircsupport.py new file mode 100644 index 000000000000..01db52a0e6a0 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/ircsupport.py @@ -0,0 +1,261 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""IRC support for Instance Messenger.""" + +import string + +from twisted.words.protocols import irc +from twisted.words.im.locals import ONLINE +from twisted.internet import defer, reactor, protocol +from twisted.internet.defer import succeed +from twisted.words.im import basesupport, interfaces, locals +from zope.interface import implements + + +class IRCPerson(basesupport.AbstractPerson): + + def imperson_whois(self): + if self.account.client is None: + raise locals.OfflineError + self.account.client.sendLine("WHOIS %s" % self.name) + + ### interface impl + + def isOnline(self): + return ONLINE + + def getStatus(self): + return ONLINE + + def setStatus(self,status): + self.status=status + self.chat.getContactsList().setContactStatus(self) + + def sendMessage(self, text, meta=None): + if self.account.client is None: + raise locals.OfflineError + for line in string.split(text, '\n'): + if meta and meta.get("style", None) == "emote": + self.account.client.ctcpMakeQuery(self.name,[('ACTION', line)]) + else: + self.account.client.msg(self.name, line) + return succeed(text) + +class IRCGroup(basesupport.AbstractGroup): + + implements(interfaces.IGroup) + + def imgroup_testAction(self): + pass + + def imtarget_kick(self, target): + if self.account.client is None: + raise locals.OfflineError + reason = "for great justice!" + self.account.client.sendLine("KICK #%s %s :%s" % ( + self.name, target.name, reason)) + + ### Interface Implementation + + def setTopic(self, topic): + if self.account.client is None: + raise locals.OfflineError + self.account.client.topic(self.name, topic) + + def sendGroupMessage(self, text, meta={}): + if self.account.client is None: + raise locals.OfflineError + if meta and meta.get("style", None) == "emote": + self.account.client.me(self.name,text) + return succeed(text) + #standard shmandard, clients don't support plain escaped newlines! + for line in string.split(text, '\n'): + self.account.client.say(self.name, line) + return succeed(text) + + def leave(self): + if self.account.client is None: + raise locals.OfflineError + self.account.client.leave(self.name) + self.account.client.getGroupConversation(self.name,1) + + +class IRCProto(basesupport.AbstractClientMixin, irc.IRCClient): + def __init__(self, account, chatui, logonDeferred=None): + basesupport.AbstractClientMixin.__init__(self, account, chatui, + logonDeferred) + self._namreplies={} + self._ingroups={} + self._groups={} + self._topics={} + + def getGroupConversation(self, name, hide=0): + name=string.lower(name) + return self.chat.getGroupConversation(self.chat.getGroup(name, self), + stayHidden=hide) + + def getPerson(self,name): + return self.chat.getPerson(name, self) + + def connectionMade(self): + # XXX: Why do I duplicate code in IRCClient.register? + try: + if self.account.password: + self.sendLine("PASS :%s" % self.account.password) + self.setNick(self.account.username) + self.sendLine("USER %s foo bar :Twisted-IM user" % (self.nickname,)) + for channel in self.account.channels: + self.joinGroup(channel) + self.account._isOnline=1 + if self._logonDeferred is not None: + self._logonDeferred.callback(self) + self.chat.getContactsList() + except: + import traceback + traceback.print_exc() + + def setNick(self,nick): + self.name=nick + self.accountName="%s (IRC)"%nick + irc.IRCClient.setNick(self,nick) + + def kickedFrom(self, channel, kicker, message): + """ + Called when I am kicked from a channel. + """ + return self.chat.getGroupConversation( + self.chat.getGroup(channel[1:], self), 1) + + def userKicked(self, kickee, channel, kicker, message): + pass + + def noticed(self, username, channel, message): + self.privmsg(username, channel, message, {"dontAutoRespond": 1}) + + def privmsg(self, username, channel, message, metadata=None): + if metadata is None: + metadata = {} + username=string.split(username,'!',1)[0] + if username==self.name: return + if channel[0]=='#': + group=channel[1:] + self.getGroupConversation(group).showGroupMessage(username, message, metadata) + return + self.chat.getConversation(self.getPerson(username)).showMessage(message, metadata) + + def action(self,username,channel,emote): + username=string.split(username,'!',1)[0] + if username==self.name: return + meta={'style':'emote'} + if channel[0]=='#': + group=channel[1:] + self.getGroupConversation(group).showGroupMessage(username, emote, meta) + return + self.chat.getConversation(self.getPerson(username)).showMessage(emote,meta) + + def irc_RPL_NAMREPLY(self,prefix,params): + """ + RPL_NAMREPLY + >> NAMES #bnl + << :Arlington.VA.US.Undernet.Org 353 z3p = #bnl :pSwede Dan-- SkOyg AG + """ + group=string.lower(params[2][1:]) + users=string.split(params[3]) + for ui in range(len(users)): + while users[ui][0] in ["@","+"]: # channel modes + users[ui]=users[ui][1:] + if not self._namreplies.has_key(group): + self._namreplies[group]=[] + self._namreplies[group].extend(users) + for nickname in users: + try: + self._ingroups[nickname].append(group) + except: + self._ingroups[nickname]=[group] + + def irc_RPL_ENDOFNAMES(self,prefix,params): + group=params[1][1:] + self.getGroupConversation(group).setGroupMembers(self._namreplies[string.lower(group)]) + del self._namreplies[string.lower(group)] + + def irc_RPL_TOPIC(self,prefix,params): + self._topics[params[1][1:]]=params[2] + + def irc_333(self,prefix,params): + group=params[1][1:] + self.getGroupConversation(group).setTopic(self._topics[group],params[2]) + del self._topics[group] + + def irc_TOPIC(self,prefix,params): + nickname = string.split(prefix,"!")[0] + group = params[0][1:] + topic = params[1] + self.getGroupConversation(group).setTopic(topic,nickname) + + def irc_JOIN(self,prefix,params): + nickname=string.split(prefix,"!")[0] + group=string.lower(params[0][1:]) + if nickname!=self.nickname: + try: + self._ingroups[nickname].append(group) + except: + self._ingroups[nickname]=[group] + self.getGroupConversation(group).memberJoined(nickname) + + def irc_PART(self,prefix,params): + nickname=string.split(prefix,"!")[0] + group=string.lower(params[0][1:]) + if nickname!=self.nickname: + if group in self._ingroups[nickname]: + self._ingroups[nickname].remove(group) + self.getGroupConversation(group).memberLeft(nickname) + + def irc_QUIT(self,prefix,params): + nickname=string.split(prefix,"!")[0] + if self._ingroups.has_key(nickname): + for group in self._ingroups[nickname]: + self.getGroupConversation(group).memberLeft(nickname) + self._ingroups[nickname]=[] + + def irc_NICK(self, prefix, params): + fromNick = string.split(prefix, "!")[0] + toNick = params[0] + if not self._ingroups.has_key(fromNick): + return + for group in self._ingroups[fromNick]: + self.getGroupConversation(group).memberChangedNick(fromNick, toNick) + self._ingroups[toNick] = self._ingroups[fromNick] + del self._ingroups[fromNick] + + def irc_unknown(self, prefix, command, params): + pass + + # GTKIM calls + def joinGroup(self,name): + self.join(name) + self.getGroupConversation(name) + +class IRCAccount(basesupport.AbstractAccount): + implements(interfaces.IAccount) + gatewayType = "IRC" + + _groupFactory = IRCGroup + _personFactory = IRCPerson + + def __init__(self, accountName, autoLogin, username, password, host, port, + channels=''): + basesupport.AbstractAccount.__init__(self, accountName, autoLogin, + username, password, host, port) + self.channels = map(string.strip,string.split(channels,',')) + if self.channels == ['']: + self.channels = [] + + def _startLogOn(self, chatui): + logonDeferred = defer.Deferred() + cc = protocol.ClientCreator(reactor, IRCProto, self, chatui, + logonDeferred) + d = cc.connectTCP(self.host, self.port) + d.addErrback(logonDeferred.errback) + return logonDeferred diff --git a/vendor/Twisted-10.0.0/twisted/words/im/locals.py b/vendor/Twisted-10.0.0/twisted/words/im/locals.py new file mode 100644 index 000000000000..02025f928d05 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/locals.py @@ -0,0 +1,26 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +class Enum: + group = None + + def __init__(self, label): + self.label = label + + def __repr__(self): + return '<%s: %s>' % (self.group, self.label) + + def __str__(self): + return self.label + + +class StatusEnum(Enum): + group = 'Status' + +OFFLINE = Enum('Offline') +ONLINE = Enum('Online') +AWAY = Enum('Away') + +class OfflineError(Exception): + """The requested action can't happen while offline.""" diff --git a/vendor/Twisted-10.0.0/twisted/words/im/pbsupport.py b/vendor/Twisted-10.0.0/twisted/words/im/pbsupport.py new file mode 100644 index 000000000000..7e750b7306da --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/pbsupport.py @@ -0,0 +1,260 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""L{twisted.words} support for Instance Messenger.""" + +from __future__ import nested_scopes + +from twisted.internet import defer +from twisted.internet import error +from twisted.python import log +from twisted.python.failure import Failure +from twisted.spread import pb + +from twisted.words.im.locals import ONLINE, OFFLINE, AWAY + +from twisted.words.im import basesupport, interfaces +from zope.interface import implements + + +class TwistedWordsPerson(basesupport.AbstractPerson): + """I a facade for a person you can talk to through a twisted.words service. + """ + def __init__(self, name, wordsAccount): + basesupport.AbstractPerson.__init__(self, name, wordsAccount) + self.status = OFFLINE + + def isOnline(self): + return ((self.status == ONLINE) or + (self.status == AWAY)) + + def getStatus(self): + return self.status + + def sendMessage(self, text, metadata): + """Return a deferred... + """ + if metadata: + d=self.account.client.perspective.directMessage(self.name, + text, metadata) + d.addErrback(self.metadataFailed, "* "+text) + return d + else: + return self.account.client.perspective.callRemote('directMessage',self.name, text) + + def metadataFailed(self, result, text): + print "result:",result,"text:",text + return self.account.client.perspective.directMessage(self.name, text) + + def setStatus(self, status): + self.status = status + self.chat.getContactsList().setContactStatus(self) + +class TwistedWordsGroup(basesupport.AbstractGroup): + implements(interfaces.IGroup) + def __init__(self, name, wordsClient): + basesupport.AbstractGroup.__init__(self, name, wordsClient) + self.joined = 0 + + def sendGroupMessage(self, text, metadata=None): + """Return a deferred. + """ + #for backwards compatibility with older twisted.words servers. + if metadata: + d=self.account.client.perspective.callRemote( + 'groupMessage', self.name, text, metadata) + d.addErrback(self.metadataFailed, "* "+text) + return d + else: + return self.account.client.perspective.callRemote('groupMessage', + self.name, text) + + def setTopic(self, text): + self.account.client.perspective.callRemote( + 'setGroupMetadata', + {'topic': text, 'topic_author': self.client.name}, + self.name) + + def metadataFailed(self, result, text): + print "result:",result,"text:",text + return self.account.client.perspective.callRemote('groupMessage', + self.name, text) + + def joining(self): + self.joined = 1 + + def leaving(self): + self.joined = 0 + + def leave(self): + return self.account.client.perspective.callRemote('leaveGroup', + self.name) + + + +class TwistedWordsClient(pb.Referenceable, basesupport.AbstractClientMixin): + """In some cases, this acts as an Account, since it a source of text + messages (multiple Words instances may be on a single PB connection) + """ + def __init__(self, acct, serviceName, perspectiveName, chatui, + _logonDeferred=None): + self.accountName = "%s (%s:%s)" % (acct.accountName, serviceName, perspectiveName) + self.name = perspectiveName + print "HELLO I AM A PB SERVICE", serviceName, perspectiveName + self.chat = chatui + self.account = acct + self._logonDeferred = _logonDeferred + + def getPerson(self, name): + return self.chat.getPerson(name, self) + + def getGroup(self, name): + return self.chat.getGroup(name, self) + + def getGroupConversation(self, name): + return self.chat.getGroupConversation(self.getGroup(name)) + + def addContact(self, name): + self.perspective.callRemote('addContact', name) + + def remote_receiveGroupMembers(self, names, group): + print 'received group members:', names, group + self.getGroupConversation(group).setGroupMembers(names) + + def remote_receiveGroupMessage(self, sender, group, message, metadata=None): + print 'received a group message', sender, group, message, metadata + self.getGroupConversation(group).showGroupMessage(sender, message, metadata) + + def remote_memberJoined(self, member, group): + print 'member joined', member, group + self.getGroupConversation(group).memberJoined(member) + + def remote_memberLeft(self, member, group): + print 'member left' + self.getGroupConversation(group).memberLeft(member) + + def remote_notifyStatusChanged(self, name, status): + self.chat.getPerson(name, self).setStatus(status) + + def remote_receiveDirectMessage(self, name, message, metadata=None): + self.chat.getConversation(self.chat.getPerson(name, self)).showMessage(message, metadata) + + def remote_receiveContactList(self, clist): + for name, status in clist: + self.chat.getPerson(name, self).setStatus(status) + + def remote_setGroupMetadata(self, dict_, groupName): + if dict_.has_key("topic"): + self.getGroupConversation(groupName).setTopic(dict_["topic"], dict_.get("topic_author", None)) + + def joinGroup(self, name): + self.getGroup(name).joining() + return self.perspective.callRemote('joinGroup', name).addCallback(self._cbGroupJoined, name) + + def leaveGroup(self, name): + self.getGroup(name).leaving() + return self.perspective.callRemote('leaveGroup', name).addCallback(self._cbGroupLeft, name) + + def _cbGroupJoined(self, result, name): + groupConv = self.chat.getGroupConversation(self.getGroup(name)) + groupConv.showGroupMessage("sys", "you joined") + self.perspective.callRemote('getGroupMembers', name) + + def _cbGroupLeft(self, result, name): + print 'left',name + groupConv = self.chat.getGroupConversation(self.getGroup(name), 1) + groupConv.showGroupMessage("sys", "you left") + + def connected(self, perspective): + print 'Connected Words Client!', perspective + if self._logonDeferred is not None: + self._logonDeferred.callback(self) + self.perspective = perspective + self.chat.getContactsList() + + +pbFrontEnds = { + "twisted.words": TwistedWordsClient, + "twisted.reality": None + } + + +class PBAccount(basesupport.AbstractAccount): + implements(interfaces.IAccount) + gatewayType = "PB" + _groupFactory = TwistedWordsGroup + _personFactory = TwistedWordsPerson + + def __init__(self, accountName, autoLogin, username, password, host, port, + services=None): + """ + @param username: The name of your PB Identity. + @type username: string + """ + basesupport.AbstractAccount.__init__(self, accountName, autoLogin, + username, password, host, port) + self.services = [] + if not services: + services = [('twisted.words', 'twisted.words', username)] + for serviceType, serviceName, perspectiveName in services: + self.services.append([pbFrontEnds[serviceType], serviceName, + perspectiveName]) + + def logOn(self, chatui): + """ + @returns: this breaks with L{interfaces.IAccount} + @returntype: DeferredList of L{interfaces.IClient}s + """ + # Overriding basesupport's implementation on account of the + # fact that _startLogOn tends to return a deferredList rather + # than a simple Deferred, and we need to do registerAccountClient. + if (not self._isConnecting) and (not self._isOnline): + self._isConnecting = 1 + d = self._startLogOn(chatui) + d.addErrback(self._loginFailed) + def registerMany(results): + for success, result in results: + if success: + chatui.registerAccountClient(result) + self._cb_logOn(result) + else: + log.err(result) + d.addCallback(registerMany) + return d + else: + raise error.ConnectionError("Connection in progress") + + + def _startLogOn(self, chatui): + print 'Connecting...', + d = pb.getObjectAt(self.host, self.port) + d.addCallbacks(self._cbConnected, self._ebConnected, + callbackArgs=(chatui,)) + return d + + def _cbConnected(self, root, chatui): + print 'Connected!' + print 'Identifying...', + d = pb.authIdentity(root, self.username, self.password) + d.addCallbacks(self._cbIdent, self._ebConnected, + callbackArgs=(chatui,)) + return d + + def _cbIdent(self, ident, chatui): + if not ident: + print 'falsely identified.' + return self._ebConnected(Failure(Exception("username or password incorrect"))) + print 'Identified!' + dl = [] + for handlerClass, sname, pname in self.services: + d = defer.Deferred() + dl.append(d) + handler = handlerClass(self, sname, pname, chatui, d) + ident.callRemote('attach', sname, pname, handler).addCallback(handler.connected) + return defer.DeferredList(dl) + + def _ebConnected(self, error): + print 'Not connected.' + return error + diff --git a/vendor/Twisted-10.0.0/twisted/words/im/proxyui.py b/vendor/Twisted-10.0.0/twisted/words/im/proxyui.py new file mode 100644 index 000000000000..de11bf7560b1 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/proxyui.py @@ -0,0 +1,24 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# + +from twisted.words.protocols.irc import IRC +from twisted.python import log +from twisted.internet.protocol import Factory + +class IRCUserInterface(IRC): + def connectionLost(self): + del self.factory.ircui + +class IRCUIFactory(Factory): + ircui = None + def buildProtocol(self): + if self.ircui: + log.msg("already logged in") + return None + i = IRCUserInterface() + i.factory = self + self.ircui = i + return i + diff --git a/vendor/Twisted-10.0.0/twisted/words/im/tap.py b/vendor/Twisted-10.0.0/twisted/words/im/tap.py new file mode 100644 index 000000000000..64bddce4abc5 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/tap.py @@ -0,0 +1,15 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + +# + +from twisted.words.im.proxyui import IRCUIFactory +from twisted.python import usage + +class Options(usage.Options): + optParameters = [["ircport", "p", "6667", + "Port to start the IRC server on."]] + +def updateApplication(app, config): + factory = IRCUIFactory() + app.listenTCP(int(config.opts['ircport']), IRCUIFactory()) diff --git a/vendor/Twisted-10.0.0/twisted/words/im/tocsupport.py b/vendor/Twisted-10.0.0/twisted/words/im/tocsupport.py new file mode 100644 index 000000000000..36ac2cd9b298 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/im/tocsupport.py @@ -0,0 +1,220 @@ +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +"""TOC (i.e. AIM) support for Instance Messenger.""" + +# System Imports +import string, re +from zope.interface import implements + +# Twisted Imports +from twisted.words.protocols import toc +from twisted.words.im.locals import ONLINE, OFFLINE, AWAY +from twisted.internet import defer, reactor, protocol +from twisted.internet.defer import succeed + +# Sibling Imports +from twisted.words.im import basesupport, interfaces, locals + +def dehtml(text): + text=string.replace(text,"
                                ","\n") + text=string.replace(text,"
                                ","\n") + text=string.replace(text,"
                                ","\n") # XXX make this a regexp + text=string.replace(text,"
                                ","\n") + text=re.sub('<.*?>','',text) + text=string.replace(text,'>','>') + text=string.replace(text,'<','<') + text=string.replace(text,'&','&') + text=string.replace(text,' ',' ') + text=string.replace(text,'"','"') + return text + +def html(text): + text=string.replace(text,'"','"') + text=string.replace(text,'&','&') + text=string.replace(text,'<','<') + text=string.replace(text,'>','>') + text=string.replace(text,"\n","
                                ") + return '%s'%text + +class TOCPerson(basesupport.AbstractPerson): + def isOnline(self): + return self.status != OFFLINE + + def getStatus(self): + return self.status + + def getIdleTime(self): + return str(self.idletime) + + def setStatusAndIdle(self, status, idletime): + if self.account.client is None: + raise locals.OfflineError + self.status = status + self.idletime = idletime + self.account.client.chat.getContactsList().setContactStatus(self) + + def sendMessage(self, text, meta=None): + if self.account.client is None: + raise locals.OfflineError + if meta: + if meta.get("style", None) == "emote": + text="* "+text+"* " + self.account.client.say(self.name,html(text)) + return succeed(text) + +class TOCGroup(basesupport.AbstractGroup): + implements(interfaces.IGroup) + def __init__(self, name, tocAccount): + basesupport.AbstractGroup.__init__(self, name, tocAccount) + self.roomID = self.client.roomID[self.name] + + def sendGroupMessage(self, text, meta=None): + if self.account.client is None: + raise locals.OfflineError + if meta: + if meta.get("style", None) == "emote": + text="* "+text+"* " + self.account.client.chat_say(self.roomID,html(text)) + return succeed(text) + + def leave(self): + if self.account.client is None: + raise locals.OfflineError + self.account.client.chat_leave(self.roomID) + + +class TOCProto(basesupport.AbstractClientMixin, toc.TOCClient): + def __init__(self, account, chatui, logonDeferred): + toc.TOCClient.__init__(self, account.username, account.password) + basesupport.AbstractClientMixin.__init__(self, account, chatui, + logonDeferred) + self.roomID = {} + self.roomIDreverse = {} + + def _debug(self, m): + pass #print '', repr(m) + + def getGroupConversation(self, name, hide=0): + return self.chat.getGroupConversation( + self.chat.getGroup(name, self), hide) + + def addContact(self, name): + self.add_buddy([name]) + if not self._buddylist.has_key('TwistedIM'): + self._buddylist['TwistedIM'] = [] + if name in self._buddylist['TwistedIM']: + # whoops, don't add again + return + self._buddylist['TwistedIM'].append(name) + self.set_config(self._config_mode, self._buddylist, self._permit, self._deny) + + def getPerson(self,name): + return self.chat.getPerson(name, self) + + def onLine(self): + self.account._isOnline = 1 + #print '$$!&*$&!(@$*& TOC ONLINE *!#@&$(!*%&' + + def gotConfig(self, mode, buddylist, permit, deny): + #print 'got toc config', repr(mode), repr(buddylist), repr(permit), repr(deny) + self._config_mode = mode + self._buddylist = buddylist + self._permit = permit + self._deny = deny + if permit: + self._debug('adding permit') + self.add_permit(permit) + if deny: + self._debug('adding deny') + self.add_deny(deny) + clist=[] + for k in buddylist.keys(): + self.add_buddy(buddylist[k]) + for name in buddylist[k]: + self.getPerson(name).setStatusAndIdle(OFFLINE, '--') + self.signon() + name = None + def tocNICK(self,data): + if not self.name: + print 'Waiting for second NICK', data + self.name=data[0] + self.accountName = '%s (TOC)' % self.name + self.chat.getContactsList() + else: + print 'reregistering...?', data + self.name=data[0] + # self.accountName = "%s (TOC)"%data[0] + if self._logonDeferred is not None: + self._logonDeferred.callback(self) + self._logonDeferred = None + + ### Error Messages + def hearError(self, code, args): + print '*** TOC ERROR ***', repr(code), repr(args) + def hearWarning(self, newamount, username): + print '*** TOC WARNING ***', repr(newamount), repr(username) + ### Buddy Messages + def hearMessage(self,username,message,autoreply): + if autoreply: + message=': '+message + self.chat.getConversation(self.getPerson(username) + ).showMessage(dehtml(message)) + def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away): + if away: + status=AWAY + elif online: + status=ONLINE + else: + status=OFFLINE + self.getPerson(username).setStatusAndIdle(status, idletime) + + ### Group Chat + def chatJoined(self, roomid, roomname, users): + self.roomID[roomname]=roomid + self.roomIDreverse[roomid]=roomname + self.getGroupConversation(roomname).setGroupMembers(users) + def chatUpdate(self,roomid,member,inroom): + group=self.roomIDreverse[roomid] + if inroom: + self.getGroupConversation(group).memberJoined(member) + else: + self.getGroupConversation(group).memberLeft(member) + def chatHearMessage(self, roomid, username, message): + if toc.normalize(username) == toc.normalize(self.name): + return # ignore the message + group=self.roomIDreverse[roomid] + self.getGroupConversation(group).showGroupMessage(username, dehtml(message)) + def chatHearWhisper(self, roomid, username, message): + print '*** user whispered *** ', roomid, username, message + def chatInvited(self, roomid, roomname, username, message): + print '*** user invited us to chat *** ',roomid, roomname, username, message + def chatLeft(self, roomid): + group=self.roomIDreverse[roomid] + self.getGroupConversation(group,1) + del self.roomID[group] + del self.roomIDreverse[roomid] + def rvousProposal(self,type,cookie,user,vip,port,**kw): + print '*** rendezvous. ***', type, cookie, user, vip, port, kw + def receiveBytes(self, user, file, chunk, sofar, total): + print '*** File transfer! ***', user, file, chunk, sofar, total + + def joinGroup(self,name): + self.chat_join(4,toc.normalize(name)) + +class TOCAccount(basesupport.AbstractAccount): + implements(interfaces.IAccount) + gatewayType = "AIM (TOC)" + + _groupFactory = TOCGroup + _personFactory = TOCPerson + + def _startLogOn(self, chatui): + logonDeferred = defer.Deferred() + cc = protocol.ClientCreator(reactor, TOCProto, self, chatui, + logonDeferred) + d = cc.connectTCP(self.host, self.port) + d.addErrback(logonDeferred.errback) + return logonDeferred + diff --git a/vendor/Twisted-10.0.0/twisted/words/iwords.py b/vendor/Twisted-10.0.0/twisted/words/iwords.py new file mode 100644 index 000000000000..1b9da0769a2e --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/iwords.py @@ -0,0 +1,266 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import Interface, Attribute, implements + +class IProtocolPlugin(Interface): + """Interface for plugins providing an interface to a Words service + """ + + name = Attribute("A single word describing what kind of interface this is (eg, irc or web)") + + def getFactory(realm, portal): + """Retrieve a C{twisted.internet.interfaces.IServerFactory} provider + + @param realm: An object providing C{twisted.cred.portal.IRealm} and + C{IChatService}, with which service information should be looked up. + + @param portal: An object providing C{twisted.cred.portal.IPortal}, + through which logins should be performed. + """ + + +class IGroup(Interface): + name = Attribute("A short string, unique among groups.") + + def add(user): + """Include the given user in this group. + + @type user: L{IUser} + """ + + def remove(user, reason=None): + """Remove the given user from this group. + + @type user: L{IUser} + @type reason: C{unicode} + """ + + def size(): + """Return the number of participants in this group. + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with an C{int} representing the the + number of participants in this group. + """ + + def receive(sender, recipient, message): + """ + Broadcast the given message from the given sender to other + users in group. + + The message is not re-transmitted to the sender. + + @param sender: L{IUser} + + @type recipient: L{IGroup} + @param recipient: This is probably a wart. Maybe it will be removed + in the future. For now, it should be the group object the message + is being delivered to. + + @param message: C{dict} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with None when delivery has been + attempted for all users. + """ + + def setMetadata(meta): + """Change the metadata associated with this group. + + @type meta: C{dict} + """ + + def iterusers(): + """Return an iterator of all users in this group. + """ + + +class IChatClient(Interface): + """Interface through which IChatService interacts with clients. + """ + + name = Attribute("A short string, unique among users. This will be set by the L{IChatService} at login time.") + + def receive(sender, recipient, message): + """ + Callback notifying this user of the given message sent by the + given user. + + This will be invoked whenever another user sends a message to a + group this user is participating in, or whenever another user sends + a message directly to this user. In the former case, C{recipient} + will be the group to which the message was sent; in the latter, it + will be the same object as the user who is receiving the message. + + @type sender: L{IUser} + @type recipient: L{IUser} or L{IGroup} + @type message: C{dict} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires when the message has been delivered, + or which fails in some way. If the Deferred fails and the message + was directed at a group, this user will be removed from that group. + """ + + def groupMetaUpdate(group, meta): + """ + Callback notifying this user that the metadata for the given + group has changed. + + @type group: L{IGroup} + @type meta: C{dict} + + @rtype: L{twisted.internet.defer.Deferred} + """ + + def userJoined(group, user): + """ + Callback notifying this user that the given user has joined + the given group. + + @type group: L{IGroup} + @type user: L{IUser} + + @rtype: L{twisted.internet.defer.Deferred} + """ + + def userLeft(group, user, reason=None): + """ + Callback notifying this user that the given user has left the + given group for the given reason. + + @type group: L{IGroup} + @type user: L{IUser} + @type reason: C{unicode} + + @rtype: L{twisted.internet.defer.Deferred} + """ + + +class IUser(Interface): + """Interface through which clients interact with IChatService. + """ + + realm = Attribute("A reference to the Realm to which this user belongs. Set if and only if the user is logged in.") + mind = Attribute("A reference to the mind which logged in to this user. Set if and only if the user is logged in.") + name = Attribute("A short string, unique among users.") + + lastMessage = Attribute("A POSIX timestamp indicating the time of the last message received from this user.") + signOn = Attribute("A POSIX timestamp indicating this user's most recent sign on time.") + + def loggedIn(realm, mind): + """Invoked by the associated L{IChatService} when login occurs. + + @param realm: The L{IChatService} through which login is occurring. + @param mind: The mind object used for cred login. + """ + + def send(recipient, message): + """Send the given message to the given user or group. + + @type recipient: Either L{IUser} or L{IGroup} + @type message: C{dict} + """ + + def join(group): + """Attempt to join the given group. + + @type group: L{IGroup} + @rtype: L{twisted.internet.defer.Deferred} + """ + + def leave(group): + """Discontinue participation in the given group. + + @type group: L{IGroup} + @rtype: L{twisted.internet.defer.Deferred} + """ + + def itergroups(): + """ + Return an iterator of all groups of which this user is a + member. + """ + + +class IChatService(Interface): + name = Attribute("A short string identifying this chat service (eg, a hostname)") + + createGroupOnRequest = Attribute( + "A boolean indicating whether L{getGroup} should implicitly " + "create groups which are requested but which do not yet exist.") + + createUserOnRequest = Attribute( + "A boolean indicating whether L{getUser} should implicitly " + "create users which are requested but which do not yet exist.") + + def itergroups(): + """Return all groups available on this service. + + @rtype: C{twisted.internet.defer.Deferred} + @return: A Deferred which fires with a list of C{IGroup} providers. + """ + + def getGroup(name): + """Retrieve the group by the given name. + + @type name: C{str} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with the group with the given + name if one exists (or if one is created due to the setting of + L{createGroupOnRequest}, or which fails with + L{twisted.words.ewords.NoSuchGroup} if no such group exists. + """ + + def createGroup(name): + """Create a new group with the given name. + + @type name: C{str} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with the created group, or + with fails with L{twisted.words.ewords.DuplicateGroup} if a + group by that name exists already. + """ + + def lookupGroup(name): + """Retrieve a group by name. + + Unlike C{getGroup}, this will never implicitly create a group. + + @type name: C{str} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with the group by the given + name, or which fails with L{twisted.words.ewords.NoSuchGroup}. + """ + + def getUser(name): + """Retrieve the user by the given name. + + @type name: C{str} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with the user with the given + name if one exists (or if one is created due to the setting of + L{createUserOnRequest}, or which fails with + L{twisted.words.ewords.NoSuchUser} if no such user exists. + """ + + def createUser(name): + """Create a new user with the given name. + + @type name: C{str} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with the created user, or + with fails with L{twisted.words.ewords.DuplicateUser} if a + user by that name exists already. + """ + +__all__ = [ + 'IChatInterface', 'IGroup', 'IChatClient', 'IUser', 'IChatService', + ] diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/__init__.py b/vendor/Twisted-10.0.0/twisted/words/protocols/__init__.py new file mode 100644 index 000000000000..5b4f7e508727 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/__init__.py @@ -0,0 +1 @@ +"Chat protocols" diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/irc.py b/vendor/Twisted-10.0.0/twisted/words/protocols/irc.py new file mode 100644 index 000000000000..a96da2c22474 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/irc.py @@ -0,0 +1,3166 @@ +# -*- test-case-name: twisted.words.test.test_irc -*- +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Internet Relay Chat Protocol for client and server. + +Future Plans +============ + +The way the IRCClient class works here encourages people to implement +IRC clients by subclassing the ephemeral protocol class, and it tends +to end up with way more state than it should for an object which will +be destroyed as soon as the TCP transport drops. Someone oughta do +something about that, ya know? + +The DCC support needs to have more hooks for the client for it to be +able to ask the user things like "Do you want to accept this session?" +and "Transfer #2 is 67% done." and otherwise manage the DCC sessions. + +Test coverage needs to be better. + +@author: Kevin Turner + +@see: RFC 1459: Internet Relay Chat Protocol +@see: RFC 2812: Internet Relay Chat: Client Protocol +@see: U{The Client-To-Client-Protocol +} +""" + +import errno, os, random, re, stat, struct, sys, time, types, traceback +import string, socket +import warnings +from os import path + +from twisted.internet import reactor, protocol +from twisted.persisted import styles +from twisted.protocols import basic +from twisted.python import log, reflect, text + +NUL = chr(0) +CR = chr(015) +NL = chr(012) +LF = NL +SPC = chr(040) + +CHANNEL_PREFIXES = '&#!+' + +class IRCBadMessage(Exception): + pass + +class IRCPasswordMismatch(Exception): + pass + + + +class IRCBadModes(ValueError): + """ + A malformed mode was encountered while attempting to parse a mode string. + """ + + + +def parsemsg(s): + """Breaks a message from an IRC server into its prefix, command, and arguments. + """ + prefix = '' + trailing = [] + if not s: + raise IRCBadMessage("Empty line.") + if s[0] == ':': + prefix, s = s[1:].split(' ', 1) + if s.find(' :') != -1: + s, trailing = s.split(' :', 1) + args = s.split() + args.append(trailing) + else: + args = s.split() + command = args.pop(0) + return prefix, command, args + + +def split(str, length = 80): + """I break a message into multiple lines. + + I prefer to break at whitespace near str[length]. I also break at \\n. + + @returns: list of strings + """ + if length <= 0: + raise ValueError("Length must be a number greater than zero") + r = [] + while len(str) > length: + w, n = str[:length].rfind(' '), str[:length].find('\n') + if w == -1 and n == -1: + line, str = str[:length], str[length:] + else: + if n == -1: + i = w + else: + i = n + if i == 0: # just skip the space or newline. don't append any output. + str = str[1:] + continue + line, str = str[:i], str[i+1:] + r.append(line) + if len(str): + r.extend(str.split('\n')) + return r + + + +def _intOrDefault(value, default=None): + """ + Convert a value to an integer if possible. + + @rtype: C{int} or type of L{default} + @return: An integer when C{value} can be converted to an integer, + otherwise return C{default} + """ + if value: + try: + return int(value) + except (TypeError, ValueError): + pass + return default + + + +class UnhandledCommand(RuntimeError): + """ + A command dispatcher could not locate an appropriate command handler. + """ + + + +class _CommandDispatcherMixin(object): + """ + Dispatch commands to handlers based on their name. + + Command handler names should be of the form C{prefix_commandName}, + where C{prefix} is the value specified by L{prefix}, and must + accept the parameters as given to L{dispatch}. + + Attempting to mix this in more than once for a single class will cause + strange behaviour, due to L{prefix} being overwritten. + + @type prefix: C{str} + @ivar prefix: Command handler prefix, used to locate handler attributes + """ + prefix = None + + def dispatch(self, commandName, *args): + """ + Perform actual command dispatch. + """ + def _getMethodName(command): + return '%s_%s' % (self.prefix, command) + + def _getMethod(name): + return getattr(self, _getMethodName(name), None) + + method = _getMethod(commandName) + if method is not None: + return method(*args) + + method = _getMethod('unknown') + if method is None: + raise UnhandledCommand("No handler for %r could be found" % (_getMethodName(commandName),)) + return method(commandName, *args) + + + + + +def parseModes(modes, params, paramModes=('', '')): + """ + Parse an IRC mode string. + + The mode string is parsed into two lists of mode changes (added and + removed), with each mode change represented as C{(mode, param)} where mode + is the mode character, and param is the parameter passed for that mode, or + C{None} if no parameter is required. + + @type modes: C{str} + @param modes: Modes string to parse. + + @type params: C{list} + @param params: Parameters specified along with L{modes}. + + @type paramModes: C{(str, str)} + @param paramModes: A pair of strings (C{(add, remove)}) that indicate which modes take + parameters when added or removed. + + @returns: Two lists of mode changes, one for modes added and the other for + modes removed respectively, mode changes in each list are represented as + C{(mode, param)}. + """ + if len(modes) == 0: + raise IRCBadModes('Empty mode string') + + if modes[0] not in '+-': + raise IRCBadModes('Malformed modes string: %r' % (modes,)) + + changes = ([], []) + + direction = None + count = -1 + for ch in modes: + if ch in '+-': + if count == 0: + raise IRCBadModes('Empty mode sequence: %r' % (modes,)) + direction = '+-'.index(ch) + count = 0 + else: + param = None + if ch in paramModes[direction]: + try: + param = params.pop(0) + except IndexError: + raise IRCBadModes('Not enough parameters: %r' % (ch,)) + changes[direction].append((ch, param)) + count += 1 + + if len(params) > 0: + raise IRCBadModes('Too many parameters: %r %r' % (modes, params)) + + if count == 0: + raise IRCBadModes('Empty mode sequence: %r' % (modes,)) + + return changes + + + +class IRC(protocol.Protocol): + """ + Internet Relay Chat server protocol. + """ + + buffer = "" + hostname = None + + encoding = None + + def connectionMade(self): + self.channels = [] + if self.hostname is None: + self.hostname = socket.getfqdn() + + + def sendLine(self, line): + if self.encoding is not None: + if isinstance(line, unicode): + line = line.encode(self.encoding) + self.transport.write("%s%s%s" % (line, CR, LF)) + + + def sendMessage(self, command, *parameter_list, **prefix): + """ + Send a line formatted as an IRC message. + + First argument is the command, all subsequent arguments are parameters + to that command. If a prefix is desired, it may be specified with the + keyword argument 'prefix'. + """ + + if not command: + raise ValueError, "IRC message requires a command." + + if ' ' in command or command[0] == ':': + # Not the ONLY way to screw up, but provides a little + # sanity checking to catch likely dumb mistakes. + raise ValueError, "Somebody screwed up, 'cuz this doesn't" \ + " look like a command to me: %s" % command + + line = string.join([command] + list(parameter_list)) + if prefix.has_key('prefix'): + line = ":%s %s" % (prefix['prefix'], line) + self.sendLine(line) + + if len(parameter_list) > 15: + log.msg("Message has %d parameters (RFC allows 15):\n%s" % + (len(parameter_list), line)) + + + def dataReceived(self, data): + """ + This hack is to support mIRC, which sends LF only, even though the RFC + says CRLF. (Also, the flexibility of LineReceiver to turn "line mode" + on and off was not required.) + """ + lines = (self.buffer + data).split(LF) + # Put the (possibly empty) element after the last LF back in the + # buffer + self.buffer = lines.pop() + + for line in lines: + if len(line) <= 2: + # This is a blank line, at best. + continue + if line[-1] == CR: + line = line[:-1] + prefix, command, params = parsemsg(line) + # mIRC is a big pile of doo-doo + command = command.upper() + # DEBUG: log.msg( "%s %s %s" % (prefix, command, params)) + + self.handleCommand(command, prefix, params) + + + def handleCommand(self, command, prefix, params): + """ + Determine the function to call for the given command and call it with + the given arguments. + """ + method = getattr(self, "irc_%s" % command, None) + try: + if method is not None: + method(prefix, params) + else: + self.irc_unknown(prefix, command, params) + except: + log.deferr() + + + def irc_unknown(self, prefix, command, params): + """ + Called by L{handleCommand} on a command that doesn't have a defined + handler. Subclasses should override this method. + """ + raise NotImplementedError(command, prefix, params) + + + # Helper methods + def privmsg(self, sender, recip, message): + """ + Send a message to a channel or user + + @type sender: C{str} or C{unicode} + @param sender: Who is sending this message. Should be of the form + username!ident@hostmask (unless you know better!). + + @type recip: C{str} or C{unicode} + @param recip: The recipient of this message. If a channel, it must + start with a channel prefix. + + @type message: C{str} or C{unicode} + @param message: The message being sent. + """ + self.sendLine(":%s PRIVMSG %s :%s" % (sender, recip, lowQuote(message))) + + + def notice(self, sender, recip, message): + """ + Send a "notice" to a channel or user. + + Notices differ from privmsgs in that the RFC claims they are different. + Robots are supposed to send notices and not respond to them. Clients + typically display notices differently from privmsgs. + + @type sender: C{str} or C{unicode} + @param sender: Who is sending this message. Should be of the form + username!ident@hostmask (unless you know better!). + + @type recip: C{str} or C{unicode} + @param recip: The recipient of this message. If a channel, it must + start with a channel prefix. + + @type message: C{str} or C{unicode} + @param message: The message being sent. + """ + self.sendLine(":%s NOTICE %s :%s" % (sender, recip, message)) + + + def action(self, sender, recip, message): + """ + Send an action to a channel or user. + + @type sender: C{str} or C{unicode} + @param sender: Who is sending this message. Should be of the form + username!ident@hostmask (unless you know better!). + + @type recip: C{str} or C{unicode} + @param recip: The recipient of this message. If a channel, it must + start with a channel prefix. + + @type message: C{str} or C{unicode} + @param message: The action being sent. + """ + self.sendLine(":%s ACTION %s :%s" % (sender, recip, message)) + + + def topic(self, user, channel, topic, author=None): + """ + Send the topic to a user. + + @type user: C{str} or C{unicode} + @param user: The user receiving the topic. Only their nick name, not + the full hostmask. + + @type channel: C{str} or C{unicode} + @param channel: The channel for which this is the topic. + + @type topic: C{str} or C{unicode} or C{None} + @param topic: The topic string, unquoted, or None if there is no topic. + + @type author: C{str} or C{unicode} + @param author: If the topic is being changed, the full username and + hostmask of the person changing it. + """ + if author is None: + if topic is None: + self.sendLine(':%s %s %s %s :%s' % ( + self.hostname, RPL_NOTOPIC, user, channel, 'No topic is set.')) + else: + self.sendLine(":%s %s %s %s :%s" % ( + self.hostname, RPL_TOPIC, user, channel, lowQuote(topic))) + else: + self.sendLine(":%s TOPIC %s :%s" % (author, channel, lowQuote(topic))) + + + def topicAuthor(self, user, channel, author, date): + """ + Send the author of and time at which a topic was set for the given + channel. + + This sends a 333 reply message, which is not part of the IRC RFC. + + @type user: C{str} or C{unicode} + @param user: The user receiving the topic. Only their nick name, not + the full hostmask. + + @type channel: C{str} or C{unicode} + @param channel: The channel for which this information is relevant. + + @type author: C{str} or C{unicode} + @param author: The nickname (without hostmask) of the user who last set + the topic. + + @type date: C{int} + @param date: A POSIX timestamp (number of seconds since the epoch) at + which the topic was last set. + """ + self.sendLine(':%s %d %s %s %s %d' % ( + self.hostname, 333, user, channel, author, date)) + + + def names(self, user, channel, names): + """ + Send the names of a channel's participants to a user. + + @type user: C{str} or C{unicode} + @param user: The user receiving the name list. Only their nick name, + not the full hostmask. + + @type channel: C{str} or C{unicode} + @param channel: The channel for which this is the namelist. + + @type names: C{list} of C{str} or C{unicode} + @param names: The names to send. + """ + # XXX If unicode is given, these limits are not quite correct + prefixLength = len(channel) + len(user) + 10 + namesLength = 512 - prefixLength + + L = [] + count = 0 + for n in names: + if count + len(n) + 1 > namesLength: + self.sendLine(":%s %s %s = %s :%s" % ( + self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L))) + L = [n] + count = len(n) + else: + L.append(n) + count += len(n) + 1 + if L: + self.sendLine(":%s %s %s = %s :%s" % ( + self.hostname, RPL_NAMREPLY, user, channel, ' '.join(L))) + self.sendLine(":%s %s %s %s :End of /NAMES list" % ( + self.hostname, RPL_ENDOFNAMES, user, channel)) + + + def who(self, user, channel, memberInfo): + """ + Send a list of users participating in a channel. + + @type user: C{str} or C{unicode} + @param user: The user receiving this member information. Only their + nick name, not the full hostmask. + + @type channel: C{str} or C{unicode} + @param channel: The channel for which this is the member information. + + @type memberInfo: C{list} of C{tuples} + @param memberInfo: For each member of the given channel, a 7-tuple + containing their username, their hostmask, the server to which they + are connected, their nickname, the letter "H" or "G" (standing for + "Here" or "Gone"), the hopcount from C{user} to this member, and + this member's real name. + """ + for info in memberInfo: + (username, hostmask, server, nickname, flag, hops, realName) = info + assert flag in ("H", "G") + self.sendLine(":%s %s %s %s %s %s %s %s %s :%d %s" % ( + self.hostname, RPL_WHOREPLY, user, channel, + username, hostmask, server, nickname, flag, hops, realName)) + + self.sendLine(":%s %s %s %s :End of /WHO list." % ( + self.hostname, RPL_ENDOFWHO, user, channel)) + + + def whois(self, user, nick, username, hostname, realName, server, serverInfo, oper, idle, signOn, channels): + """ + Send information about the state of a particular user. + + @type user: C{str} or C{unicode} + @param user: The user receiving this information. Only their nick name, + not the full hostmask. + + @type nick: C{str} or C{unicode} + @param nick: The nickname of the user this information describes. + + @type username: C{str} or C{unicode} + @param username: The user's username (eg, ident response) + + @type hostname: C{str} + @param hostname: The user's hostmask + + @type realName: C{str} or C{unicode} + @param realName: The user's real name + + @type server: C{str} or C{unicode} + @param server: The name of the server to which the user is connected + + @type serverInfo: C{str} or C{unicode} + @param serverInfo: A descriptive string about that server + + @type oper: C{bool} + @param oper: Indicates whether the user is an IRC operator + + @type idle: C{int} + @param idle: The number of seconds since the user last sent a message + + @type signOn: C{int} + @param signOn: A POSIX timestamp (number of seconds since the epoch) + indicating the time the user signed on + + @type channels: C{list} of C{str} or C{unicode} + @param channels: A list of the channels which the user is participating in + """ + self.sendLine(":%s %s %s %s %s %s * :%s" % ( + self.hostname, RPL_WHOISUSER, user, nick, username, hostname, realName)) + self.sendLine(":%s %s %s %s %s :%s" % ( + self.hostname, RPL_WHOISSERVER, user, nick, server, serverInfo)) + if oper: + self.sendLine(":%s %s %s %s :is an IRC operator" % ( + self.hostname, RPL_WHOISOPERATOR, user, nick)) + self.sendLine(":%s %s %s %s %d %d :seconds idle, signon time" % ( + self.hostname, RPL_WHOISIDLE, user, nick, idle, signOn)) + self.sendLine(":%s %s %s %s :%s" % ( + self.hostname, RPL_WHOISCHANNELS, user, nick, ' '.join(channels))) + self.sendLine(":%s %s %s %s :End of WHOIS list." % ( + self.hostname, RPL_ENDOFWHOIS, user, nick)) + + + def join(self, who, where): + """ + Send a join message. + + @type who: C{str} or C{unicode} + @param who: The name of the user joining. Should be of the form + username!ident@hostmask (unless you know better!). + + @type where: C{str} or C{unicode} + @param where: The channel the user is joining. + """ + self.sendLine(":%s JOIN %s" % (who, where)) + + + def part(self, who, where, reason=None): + """ + Send a part message. + + @type who: C{str} or C{unicode} + @param who: The name of the user joining. Should be of the form + username!ident@hostmask (unless you know better!). + + @type where: C{str} or C{unicode} + @param where: The channel the user is joining. + + @type reason: C{str} or C{unicode} + @param reason: A string describing the misery which caused this poor + soul to depart. + """ + if reason: + self.sendLine(":%s PART %s :%s" % (who, where, reason)) + else: + self.sendLine(":%s PART %s" % (who, where)) + + + def channelMode(self, user, channel, mode, *args): + """ + Send information about the mode of a channel. + + @type user: C{str} or C{unicode} + @param user: The user receiving the name list. Only their nick name, + not the full hostmask. + + @type channel: C{str} or C{unicode} + @param channel: The channel for which this is the namelist. + + @type mode: C{str} + @param mode: A string describing this channel's modes. + + @param args: Any additional arguments required by the modes. + """ + self.sendLine(":%s %s %s %s %s %s" % ( + self.hostname, RPL_CHANNELMODEIS, user, channel, mode, ' '.join(args))) + + + +class ServerSupportedFeatures(_CommandDispatcherMixin): + """ + Handle ISUPPORT messages. + + Feature names match those in the ISUPPORT RFC draft identically. + + Information regarding the specifics of ISUPPORT was gleaned from + . + """ + prefix = 'isupport' + + def __init__(self): + self._features = { + 'CHANNELLEN': 200, + 'CHANTYPES': tuple('#&'), + 'MODES': 3, + 'NICKLEN': 9, + 'PREFIX': self._parsePrefixParam('(ovh)@+%'), + # The ISUPPORT draft explicitly says that there is no default for + # CHANMODES, but we're defaulting it here to handle the case where + # the IRC server doesn't send us any ISUPPORT information, since + # IRCClient.getChannelModeParams relies on this value. + 'CHANMODES': self._parseChanModesParam(['b', '', 'lk'])} + + + def _splitParamArgs(cls, params, valueProcessor=None): + """ + Split ISUPPORT parameter arguments. + + Values can optionally be processed by C{valueProcessor}. + + For example:: + + >>> ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2']) + (('A', '1'), ('B', '2')) + + @type params: C{iterable} of C{str} + + @type valueProcessor: C{callable} taking {str} + @param valueProcessor: Callable to process argument values, or C{None} + to perform no processing + + @rtype: C{list} of C{(str, object)} + @return: Sequence of C{(name, processedValue)} + """ + if valueProcessor is None: + valueProcessor = lambda x: x + + def _parse(): + for param in params: + if ':' not in param: + param += ':' + a, b = param.split(':', 1) + yield a, valueProcessor(b) + return list(_parse()) + _splitParamArgs = classmethod(_splitParamArgs) + + + def _unescapeParamValue(cls, value): + """ + Unescape an ISUPPORT parameter. + + The only form of supported escape is C{\\xHH}, where HH must be a valid + 2-digit hexadecimal number. + + @rtype: C{str} + """ + def _unescape(): + parts = value.split('\\x') + # The first part can never be preceeded by the escape. + yield parts.pop(0) + for s in parts: + octet, rest = s[:2], s[2:] + try: + octet = int(octet, 16) + except ValueError: + raise ValueError('Invalid hex octet: %r' % (octet,)) + yield chr(octet) + rest + + if '\\x' not in value: + return value + return ''.join(_unescape()) + _unescapeParamValue = classmethod(_unescapeParamValue) + + + def _splitParam(cls, param): + """ + Split an ISUPPORT parameter. + + @type param: C{str} + + @rtype: C{(str, list)} + @return C{(key, arguments)} + """ + if '=' not in param: + param += '=' + key, value = param.split('=', 1) + return key, map(cls._unescapeParamValue, value.split(',')) + _splitParam = classmethod(_splitParam) + + + def _parsePrefixParam(cls, prefix): + """ + Parse the ISUPPORT "PREFIX" parameter. + + The order in which the parameter arguments appear is significant, the + earlier a mode appears the more privileges it gives. + + @rtype: C{dict} mapping C{str} to C{(str, int)} + @return: A dictionary mapping a mode character to a two-tuple of + C({symbol, priority)}, the lower a priority (the lowest being + C{0}) the more privileges it gives + """ + if not prefix: + return None + if prefix[0] != '(' and ')' not in prefix: + raise ValueError('Malformed PREFIX parameter') + modes, symbols = prefix.split(')', 1) + symbols = zip(symbols, xrange(len(symbols))) + modes = modes[1:] + return dict(zip(modes, symbols)) + _parsePrefixParam = classmethod(_parsePrefixParam) + + + def _parseChanModesParam(self, params): + """ + Parse the ISUPPORT "CHANMODES" parameter. + + See L{isupport_CHANMODES} for a detailed explanation of this parameter. + """ + names = ('addressModes', 'param', 'setParam', 'noParam') + if len(params) > len(names): + raise ValueError( + 'Expecting a maximum of %d channel mode parameters, got %d' % ( + len(names), len(params))) + items = map(lambda key, value: (key, value or ''), names, params) + return dict(items) + _parseChanModesParam = classmethod(_parseChanModesParam) + + + def getFeature(self, feature, default=None): + """ + Get a server supported feature's value. + + A feature with the value C{None} is equivalent to the feature being + unsupported. + + @type feature: C{str} + @param feature: Feature name + + @type default: C{object} + @param default: The value to default to, assuming that C{feature} + is not supported + + @return: Feature value + """ + return self._features.get(feature, default) + + + def hasFeature(self, feature): + """ + Determine whether a feature is supported or not. + + @rtype: C{bool} + """ + return self.getFeature(feature) is not None + + + def parse(self, params): + """ + Parse ISUPPORT parameters. + + If an unknown parameter is encountered, it is simply added to the + dictionary, keyed by its name, as a tuple of the parameters provided. + + @type params: C{iterable} of C{str} + @param params: Iterable of ISUPPORT parameters to parse + """ + for param in params: + key, value = self._splitParam(param) + if key.startswith('-'): + self._features.pop(key[1:], None) + else: + self._features[key] = self.dispatch(key, value) + + + def isupport_unknown(self, command, params): + """ + Unknown ISUPPORT parameter. + """ + return tuple(params) + + + def isupport_CHANLIMIT(self, params): + """ + The maximum number of each channel type a user may join. + """ + return self._splitParamArgs(params, _intOrDefault) + + + def isupport_CHANMODES(self, params): + """ + Available channel modes. + + There are 4 categories of channel mode:: + + addressModes - Modes that add or remove an address to or from a + list, these modes always take a parameter. + + param - Modes that change a setting on a channel, these modes + always take a parameter. + + setParam - Modes that change a setting on a channel, these modes + only take a parameter when being set. + + noParam - Modes that change a setting on a channel, these modes + never take a parameter. + """ + try: + return self._parseChanModesParam(params) + except ValueError: + return self.getFeature('CHANMODES') + + + def isupport_CHANNELLEN(self, params): + """ + Maximum length of a channel name a client may create. + """ + return _intOrDefault(params[0], self.getFeature('CHANNELLEN')) + + + def isupport_CHANTYPES(self, params): + """ + Valid channel prefixes. + """ + return tuple(params[0]) + + + def isupport_EXCEPTS(self, params): + """ + Mode character for "ban exceptions". + + The presence of this parameter indicates that the server supports + this functionality. + """ + return params[0] or 'e' + + + def isupport_IDCHAN(self, params): + """ + Safe channel identifiers. + + The presence of this parameter indicates that the server supports + this functionality. + """ + return self._splitParamArgs(params) + + + def isupport_INVEX(self, params): + """ + Mode character for "invite exceptions". + + The presence of this parameter indicates that the server supports + this functionality. + """ + return params[0] or 'I' + + + def isupport_KICKLEN(self, params): + """ + Maximum length of a kick message a client may provide. + """ + return _intOrDefault(params[0]) + + + def isupport_MAXLIST(self, params): + """ + Maximum number of "list modes" a client may set on a channel at once. + + List modes are identified by the "addressModes" key in CHANMODES. + """ + return self._splitParamArgs(params, _intOrDefault) + + + def isupport_MODES(self, params): + """ + Maximum number of modes accepting parameters that may be sent, by a + client, in a single MODE command. + """ + return _intOrDefault(params[0]) + + + def isupport_NETWORK(self, params): + """ + IRC network name. + """ + return params[0] + + + def isupport_NICKLEN(self, params): + """ + Maximum length of a nickname the client may use. + """ + return _intOrDefault(params[0], self.getFeature('NICKLEN')) + + + def isupport_PREFIX(self, params): + """ + Mapping of channel modes that clients may have to status flags. + """ + try: + return self._parsePrefixParam(params[0]) + except ValueError: + return self.getFeature('PREFIX') + + + def isupport_SAFELIST(self, params): + """ + Flag indicating that a client may request a LIST without being + disconnected due to the large amount of data generated. + """ + return True + + + def isupport_STATUSMSG(self, params): + """ + The server supports sending messages to only to clients on a channel + with a specific status. + """ + return params[0] + + + def isupport_TARGMAX(self, params): + """ + Maximum number of targets allowable for commands that accept multiple + targets. + """ + return dict(self._splitParamArgs(params, _intOrDefault)) + + + def isupport_TOPICLEN(self, params): + """ + Maximum length of a topic that may be set. + """ + return _intOrDefault(params[0]) + + + +class IRCClient(basic.LineReceiver): + """Internet Relay Chat client protocol, with sprinkles. + + In addition to providing an interface for an IRC client protocol, + this class also contains reasonable implementations of many common + CTCP methods. + + TODO + ==== + - Limit the length of messages sent (because the IRC server probably + does). + - Add flood protection/rate limiting for my CTCP replies. + - NickServ cooperation. (a mix-in?) + - Heartbeat. The transport may die in such a way that it does not realize + it is dead until it is written to. Sending something (like "PING + this.irc-host.net") during idle peroids would alleviate that. If + you're concerned with the stability of the host as well as that of the + transport, you might care to watch for the corresponding PONG. + + @ivar nickname: Nickname the client will use. + @ivar password: Password used to log on to the server. May be C{None}. + @ivar realname: Supplied to the server during login as the "Real name" + or "ircname". May be C{None}. + @ivar username: Supplied to the server during login as the "User name". + May be C{None} + + @ivar userinfo: Sent in reply to a C{USERINFO} CTCP query. If C{None}, no + USERINFO reply will be sent. + "This is used to transmit a string which is settable by + the user (and never should be set by the client)." + @ivar fingerReply: Sent in reply to a C{FINGER} CTCP query. If C{None}, no + FINGER reply will be sent. + @type fingerReply: Callable or String + + @ivar versionName: CTCP VERSION reply, client name. If C{None}, no VERSION + reply will be sent. + @type versionName: C{str}, or None. + @ivar versionNum: CTCP VERSION reply, client version. + @type versionNum: C{str}, or None. + @ivar versionEnv: CTCP VERSION reply, environment the client is running in. + @type versionEnv: C{str}, or None. + + @ivar sourceURL: CTCP SOURCE reply, a URL where the source code of this + client may be found. If C{None}, no SOURCE reply will be sent. + + @ivar lineRate: Minimum delay between lines sent to the server. If + C{None}, no delay will be imposed. + @type lineRate: Number of Seconds. + + @ivar motd: Either L{None} or, between receipt of I{RPL_MOTDSTART} and + I{RPL_ENDOFMOTD}, a L{list} of L{str}, each of which is the content + of an I{RPL_MOTD} message. + + @ivar erroneousNickFallback: Default nickname assigned when an unregistered + client triggers an C{ERR_ERRONEUSNICKNAME} while trying to register + with an illegal nickname. + @type erroneousNickFallback: C{str} + + @ivar _registered: Whether or not the user is registered. It becomes True + once a welcome has been received from the server. + @type _registered: C{bool} + + @ivar _attemptedNick: The nickname that will try to get registered. It may + change if it is illegal or already taken. L{nickname} becomes the + L{_attemptedNick} that is successfully registered. + @type _attemptedNick: C{str} + + @type supported: L{ServerSupportedFeatures} + @ivar supported: Available ISUPPORT features on the server + """ + motd = None + nickname = 'irc' + password = None + realname = None + username = None + ### Responses to various CTCP queries. + + userinfo = None + # fingerReply is a callable returning a string, or a str()able object. + fingerReply = None + versionName = None + versionNum = None + versionEnv = None + + sourceURL = "http://twistedmatrix.com/downloads/" + + dcc_destdir = '.' + dcc_sessions = None + + # If this is false, no attempt will be made to identify + # ourself to the server. + performLogin = 1 + + lineRate = None + _queue = None + _queueEmptying = None + + delimiter = '\n' # '\r\n' will also work (see dataReceived) + + __pychecker__ = 'unusednames=params,prefix,channel' + + _registered = False + _attemptedNick = '' + erroneousNickFallback = 'defaultnick' + + def _reallySendLine(self, line): + return basic.LineReceiver.sendLine(self, lowQuote(line) + '\r') + + def sendLine(self, line): + if self.lineRate is None: + self._reallySendLine(line) + else: + self._queue.append(line) + if not self._queueEmptying: + self._sendLine() + + def _sendLine(self): + if self._queue: + self._reallySendLine(self._queue.pop(0)) + self._queueEmptying = reactor.callLater(self.lineRate, + self._sendLine) + else: + self._queueEmptying = None + + + ### Interface level client->user output methods + ### + ### You'll want to override these. + + ### Methods relating to the server itself + + def created(self, when): + """Called with creation date information about the server, usually at logon. + + @type when: C{str} + @param when: A string describing when the server was created, probably. + """ + + def yourHost(self, info): + """Called with daemon information about the server, usually at logon. + + @type info: C{str} + @param when: A string describing what software the server is running, probably. + """ + + def myInfo(self, servername, version, umodes, cmodes): + """Called with information about the server, usually at logon. + + @type servername: C{str} + @param servername: The hostname of this server. + + @type version: C{str} + @param version: A description of what software this server runs. + + @type umodes: C{str} + @param umodes: All the available user modes. + + @type cmodes: C{str} + @param cmodes: All the available channel modes. + """ + + def luserClient(self, info): + """Called with information about the number of connections, usually at logon. + + @type info: C{str} + @param info: A description of the number of clients and servers + connected to the network, probably. + """ + + def bounce(self, info): + """Called with information about where the client should reconnect. + + @type info: C{str} + @param info: A plaintext description of the address that should be + connected to. + """ + + def isupport(self, options): + """Called with various information about what the server supports. + + @type options: C{list} of C{str} + @param options: Descriptions of features or limits of the server, possibly + in the form "NAME=VALUE". + """ + + def luserChannels(self, channels): + """Called with the number of channels existant on the server. + + @type channels: C{int} + """ + + def luserOp(self, ops): + """Called with the number of ops logged on to the server. + + @type ops: C{int} + """ + + def luserMe(self, info): + """Called with information about the server connected to. + + @type info: C{str} + @param info: A plaintext string describing the number of users and servers + connected to this server. + """ + + ### Methods involving me directly + + def privmsg(self, user, channel, message): + """Called when I have a message from a user to me or a channel. + """ + pass + + def joined(self, channel): + """ + Called when I finish joining a channel. + + channel has the starting character (C{'#'}, C{'&'}, C{'!'}, or C{'+'}) + intact. + """ + + def left(self, channel): + """ + Called when I have left a channel. + + channel has the starting character (C{'#'}, C{'&'}, C{'!'}, or C{'+'}) + intact. + """ + + def noticed(self, user, channel, message): + """Called when I have a notice from a user to me or a channel. + + By default, this is equivalent to IRCClient.privmsg, but if your + client makes any automated replies, you must override this! + From the RFC:: + + The difference between NOTICE and PRIVMSG is that + automatic replies MUST NEVER be sent in response to a + NOTICE message. [...] The object of this rule is to avoid + loops between clients automatically sending something in + response to something it received. + """ + self.privmsg(user, channel, message) + + def modeChanged(self, user, channel, set, modes, args): + """Called when users or channel's modes are changed. + + @type user: C{str} + @param user: The user and hostmask which instigated this change. + + @type channel: C{str} + @param channel: The channel where the modes are changed. If args is + empty the channel for which the modes are changing. If the changes are + at server level it could be equal to C{user}. + + @type set: C{bool} or C{int} + @param set: True if the mode(s) is being added, False if it is being + removed. If some modes are added and others removed at the same time + this function will be called twice, the first time with all the added + modes, the second with the removed ones. (To change this behaviour + override the irc_MODE method) + + @type modes: C{str} + @param modes: The mode or modes which are being changed. + + @type args: C{tuple} + @param args: Any additional information required for the mode + change. + """ + + def pong(self, user, secs): + """Called with the results of a CTCP PING query. + """ + pass + + def signedOn(self): + """Called after sucessfully signing on to the server. + """ + pass + + def kickedFrom(self, channel, kicker, message): + """Called when I am kicked from a channel. + """ + pass + + def nickChanged(self, nick): + """Called when my nick has been changed. + """ + self.nickname = nick + + + ### Things I observe other people doing in a channel. + + def userJoined(self, user, channel): + """Called when I see another user joining a channel. + """ + pass + + def userLeft(self, user, channel): + """Called when I see another user leaving a channel. + """ + pass + + def userQuit(self, user, quitMessage): + """Called when I see another user disconnect from the network. + """ + pass + + def userKicked(self, kickee, channel, kicker, message): + """Called when I observe someone else being kicked from a channel. + """ + pass + + def action(self, user, channel, data): + """Called when I see a user perform an ACTION on a channel. + """ + pass + + def topicUpdated(self, user, channel, newTopic): + """In channel, user changed the topic to newTopic. + + Also called when first joining a channel. + """ + pass + + def userRenamed(self, oldname, newname): + """A user changed their name from oldname to newname. + """ + pass + + ### Information from the server. + + def receivedMOTD(self, motd): + """I received a message-of-the-day banner from the server. + + motd is a list of strings, where each string was sent as a seperate + message from the server. To display, you might want to use:: + + '\\n'.join(motd) + + to get a nicely formatted string. + """ + pass + + ### user input commands, client->server + ### Your client will want to invoke these. + + def join(self, channel, key=None): + """ + Join a channel. + + @type channel: C{str} + @param channel: The name of the channel to join. If it has no prefix, + C{'#'} will be prepended to it. + @type key: C{str} + @param key: If specified, the key used to join the channel. + """ + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + if key: + self.sendLine("JOIN %s %s" % (channel, key)) + else: + self.sendLine("JOIN %s" % (channel,)) + + def leave(self, channel, reason=None): + """ + Leave a channel. + + @type channel: C{str} + @param channel: The name of the channel to leave. If it has no prefix, + C{'#'} will be prepended to it. + @type reason: C{str} + @param reason: If given, the reason for leaving. + """ + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + if reason: + self.sendLine("PART %s :%s" % (channel, reason)) + else: + self.sendLine("PART %s" % (channel,)) + + def kick(self, channel, user, reason=None): + """ + Attempt to kick a user from a channel. + + @type channel: C{str} + @param channel: The name of the channel to kick the user from. If it has + no prefix, C{'#'} will be prepended to it. + @type user: C{str} + @param user: The nick of the user to kick. + @type reason: C{str} + @param reason: If given, the reason for kicking the user. + """ + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + if reason: + self.sendLine("KICK %s %s :%s" % (channel, user, reason)) + else: + self.sendLine("KICK %s %s" % (channel, user)) + + part = leave + + def topic(self, channel, topic=None): + """ + Attempt to set the topic of the given channel, or ask what it is. + + If topic is None, then I sent a topic query instead of trying to set the + topic. The server should respond with a TOPIC message containing the + current topic of the given channel. + + @type channel: C{str} + @param channel: The name of the channel to change the topic on. If it + has no prefix, C{'#'} will be prepended to it. + @type topic: C{str} + @param topic: If specified, what to set the topic to. + """ + # << TOPIC #xtestx :fff + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + if topic != None: + self.sendLine("TOPIC %s :%s" % (channel, topic)) + else: + self.sendLine("TOPIC %s" % (channel,)) + + def mode(self, chan, set, modes, limit = None, user = None, mask = None): + """ + Change the modes on a user or channel. + + The C{limit}, C{user}, and C{mask} parameters are mutually exclusive. + + @type chan: C{str} + @param chan: The name of the channel to operate on. + @type set: C{bool} + @param set: True to give the user or channel permissions and False to + remove them. + @type modes: C{str} + @param modes: The mode flags to set on the user or channel. + @type limit: C{int} + @param limit: In conjuction with the C{'l'} mode flag, limits the + number of users on the channel. + @type user: C{str} + @param user: The user to change the mode on. + @type mask: C{str} + @param mask: In conjuction with the C{'b'} mode flag, sets a mask of + users to be banned from the channel. + """ + if set: + line = 'MODE %s +%s' % (chan, modes) + else: + line = 'MODE %s -%s' % (chan, modes) + if limit is not None: + line = '%s %d' % (line, limit) + elif user is not None: + line = '%s %s' % (line, user) + elif mask is not None: + line = '%s %s' % (line, mask) + self.sendLine(line) + + + def say(self, channel, message, length = None): + """ + Send a message to a channel + + @type channel: C{str} + @param channel: The channel to say the message on. If it has no prefix, + C{'#'} will be prepended to it. + @type message: C{str} + @param message: The message to say. + @type length: C{int} + @param length: The maximum number of octets to send at a time. This has + the effect of turning a single call to C{msg()} into multiple + commands to the server. This is useful when long messages may be + sent that would otherwise cause the server to kick us off or + silently truncate the text we are sending. If None is passed, the + entire message is always send in one command. + """ + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + self.msg(channel, message, length) + + + def msg(self, user, message, length = None): + """Send a message to a user or channel. + + @type user: C{str} + @param user: The username or channel name to which to direct the + message. + + @type message: C{str} + @param message: The text to send + + @type length: C{int} + @param length: The maximum number of octets to send at a time. This + has the effect of turning a single call to msg() into multiple + commands to the server. This is useful when long messages may be + sent that would otherwise cause the server to kick us off or silently + truncate the text we are sending. If None is passed, the entire + message is always send in one command. + """ + + fmt = "PRIVMSG %s :%%s" % (user,) + + if length is None: + self.sendLine(fmt % (message,)) + else: + # NOTE: minimumLength really equals len(fmt) - 2 (for '%s') + n + # where n is how many bytes sendLine sends to end the line. + # n was magic numbered to 2, I think incorrectly + minimumLength = len(fmt) + if length <= minimumLength: + raise ValueError("Maximum length must exceed %d for message " + "to %s" % (minimumLength, user)) + lines = split(message, length - minimumLength) + map(lambda line, self=self, fmt=fmt: self.sendLine(fmt % line), + lines) + + def notice(self, user, message): + """ + Send a notice to a user. + + Notices are like normal message, but should never get automated + replies. + + @type user: C{str} + @param user: The user to send a notice to. + @type message: C{str} + @param message: The contents of the notice to send. + """ + self.sendLine("NOTICE %s :%s" % (user, message)) + + def away(self, message=''): + """ + Mark this client as away. + + @type message: C{str} + @param message: If specified, the away message. + """ + self.sendLine("AWAY :%s" % message) + + + + def back(self): + """ + Clear the away status. + """ + # An empty away marks us as back + self.away() + + + def whois(self, nickname, server=None): + """ + Retrieve user information about the given nick name. + + @type nickname: C{str} + @param nickname: The nick name about which to retrieve information. + + @since: 8.2 + """ + if server is None: + self.sendLine('WHOIS ' + nickname) + else: + self.sendLine('WHOIS %s %s' % (server, nickname)) + + + def register(self, nickname, hostname='foo', servername='bar'): + """ + Login to the server. + + @type nickname: C{str} + @param nickname: The nickname to register. + @type hostname: C{str} + @param hostname: If specified, the hostname to logon as. + @type servername: C{str} + @param servername: If specified, the servername to logon as. + """ + if self.password is not None: + self.sendLine("PASS %s" % self.password) + self.setNick(nickname) + if self.username is None: + self.username = nickname + self.sendLine("USER %s %s %s :%s" % (self.username, hostname, servername, self.realname)) + + def setNick(self, nickname): + """ + Set this client's nickname. + + @type nickname: C{str} + @param nickname: The nickname to change to. + """ + self._attemptedNick = nickname + self.sendLine("NICK %s" % nickname) + + def quit(self, message = ''): + """ + Disconnect from the server + + @type message: C{str} + + @param message: If specified, the message to give when quitting the + server. + """ + self.sendLine("QUIT :%s" % message) + + ### user input commands, client->client + + def describe(self, channel, action): + """ + Strike a pose. + + @type channel: C{str} + @param channel: The name of the channel to have an action on. If it + has no prefix, it is sent to the user of that name. + @type action: C{str} + @param action: The action to preform. + @since: 9.0 + """ + self.ctcpMakeQuery(channel, [('ACTION', action)]) + + + def me(self, channel, action): + """ + Strike a pose. + + This function is deprecated since Twisted 9.0. Use describe(). + + @type channel: C{str} + @param channel: The name of the channel to have an action on. If it + has no prefix, C{'#'} will be prepended to it. + @type action: C{str} + @param action: The action to preform. + """ + warnings.warn("me() is deprecated since Twisted 9.0. Use IRCClient.describe().", + DeprecationWarning, stacklevel=2) + + if channel[0] not in CHANNEL_PREFIXES: + channel = '#' + channel + self.describe(channel, action) + + + _pings = None + _MAX_PINGRING = 12 + + def ping(self, user, text = None): + """ + Measure round-trip delay to another IRC client. + """ + if self._pings is None: + self._pings = {} + + if text is None: + chars = string.letters + string.digits + string.punctuation + key = ''.join([random.choice(chars) for i in range(12)]) + else: + key = str(text) + self._pings[(user, key)] = time.time() + self.ctcpMakeQuery(user, [('PING', key)]) + + if len(self._pings) > self._MAX_PINGRING: + # Remove some of the oldest entries. + byValue = [(v, k) for (k, v) in self._pings.items()] + byValue.sort() + excess = self._MAX_PINGRING - len(self._pings) + for i in xrange(excess): + del self._pings[byValue[i][1]] + + def dccSend(self, user, file): + if type(file) == types.StringType: + file = open(file, 'r') + + size = fileSize(file) + + name = getattr(file, "name", "file@%s" % (id(file),)) + + factory = DccSendFactory(file) + port = reactor.listenTCP(0, factory, 1) + + raise NotImplementedError,( + "XXX!!! Help! I need to bind a socket, have it listen, and tell me its address. " + "(and stop accepting once we've made a single connection.)") + + my_address = struct.pack("!I", my_address) + + args = ['SEND', name, my_address, str(port)] + + if not (size is None): + args.append(size) + + args = string.join(args, ' ') + + self.ctcpMakeQuery(user, [('DCC', args)]) + + def dccResume(self, user, fileName, port, resumePos): + """Send a DCC RESUME request to another user.""" + self.ctcpMakeQuery(user, [ + ('DCC', ['RESUME', fileName, port, resumePos])]) + + def dccAcceptResume(self, user, fileName, port, resumePos): + """Send a DCC ACCEPT response to clients who have requested a resume. + """ + self.ctcpMakeQuery(user, [ + ('DCC', ['ACCEPT', fileName, port, resumePos])]) + + ### server->client messages + ### You might want to fiddle with these, + ### but it is safe to leave them alone. + + def irc_ERR_NICKNAMEINUSE(self, prefix, params): + """ + Called when we try to register or change to a nickname that is already + taken. + """ + self._attemptedNick = self.alterCollidedNick(self._attemptedNick) + self.setNick(self._attemptedNick) + + + def alterCollidedNick(self, nickname): + """ + Generate an altered version of a nickname that caused a collision in an + effort to create an unused related name for subsequent registration. + + @param nickname: The nickname a user is attempting to register. + @type nickname: C{str} + + @returns: A string that is in some way different from the nickname. + @rtype: C{str} + """ + return nickname + '_' + + + def irc_ERR_ERRONEUSNICKNAME(self, prefix, params): + """ + Called when we try to register or change to an illegal nickname. + + The server should send this reply when the nickname contains any + disallowed characters. The bot will stall, waiting for RPL_WELCOME, if + we don't handle this during sign-on. + + @note: The method uses the spelling I{erroneus}, as it appears in + the RFC, section 6.1. + """ + if not self._registered: + self.setNick(self.erroneousNickFallback) + + + def irc_ERR_PASSWDMISMATCH(self, prefix, params): + """ + Called when the login was incorrect. + """ + raise IRCPasswordMismatch("Password Incorrect.") + + def irc_RPL_WELCOME(self, prefix, params): + """ + Called when we have received the welcome from the server. + """ + self._registered = True + self.nickname = self._attemptedNick + self.signedOn() + + def irc_JOIN(self, prefix, params): + """ + Called when a user joins a channel. + """ + nick = string.split(prefix,'!')[0] + channel = params[-1] + if nick == self.nickname: + self.joined(channel) + else: + self.userJoined(nick, channel) + + def irc_PART(self, prefix, params): + """ + Called when a user leaves a channel. + """ + nick = string.split(prefix,'!')[0] + channel = params[0] + if nick == self.nickname: + self.left(channel) + else: + self.userLeft(nick, channel) + + def irc_QUIT(self, prefix, params): + """ + Called when a user has quit. + """ + nick = string.split(prefix,'!')[0] + self.userQuit(nick, params[0]) + + + def irc_MODE(self, user, params): + """ + Parse a server mode change message. + """ + channel, modes, args = params[0], params[1], params[2:] + + if modes[0] not in '-+': + modes = '+' + modes + + if channel == self.nickname: + # This is a mode change to our individual user, not a channel mode + # that involves us. + paramModes = self.getUserModeParams() + else: + paramModes = self.getChannelModeParams() + + try: + added, removed = parseModes(modes, args, paramModes) + except IRCBadModes: + log.err(None, 'An error occured while parsing the following ' + 'MODE message: MODE %s' % (' '.join(params),)) + else: + if added: + modes, params = zip(*added) + self.modeChanged(user, channel, True, ''.join(modes), params) + + if removed: + modes, params = zip(*removed) + self.modeChanged(user, channel, False, ''.join(modes), params) + + + def irc_PING(self, prefix, params): + """ + Called when some has pinged us. + """ + self.sendLine("PONG %s" % params[-1]) + + def irc_PRIVMSG(self, prefix, params): + """ + Called when we get a message. + """ + user = prefix + channel = params[0] + message = params[-1] + + if not message: return # don't raise an exception if some idiot sends us a blank message + + if message[0]==X_DELIM: + m = ctcpExtract(message) + if m['extended']: + self.ctcpQuery(user, channel, m['extended']) + + if not m['normal']: + return + + message = string.join(m['normal'], ' ') + + self.privmsg(user, channel, message) + + def irc_NOTICE(self, prefix, params): + """ + Called when a user gets a notice. + """ + user = prefix + channel = params[0] + message = params[-1] + + if message[0]==X_DELIM: + m = ctcpExtract(message) + if m['extended']: + self.ctcpReply(user, channel, m['extended']) + + if not m['normal']: + return + + message = string.join(m['normal'], ' ') + + self.noticed(user, channel, message) + + def irc_NICK(self, prefix, params): + """ + Called when a user changes their nickname. + """ + nick = string.split(prefix,'!', 1)[0] + if nick == self.nickname: + self.nickChanged(params[0]) + else: + self.userRenamed(nick, params[0]) + + def irc_KICK(self, prefix, params): + """ + Called when a user is kicked from a channel. + """ + kicker = string.split(prefix,'!')[0] + channel = params[0] + kicked = params[1] + message = params[-1] + if string.lower(kicked) == string.lower(self.nickname): + # Yikes! + self.kickedFrom(channel, kicker, message) + else: + self.userKicked(kicked, channel, kicker, message) + + def irc_TOPIC(self, prefix, params): + """ + Someone in the channel set the topic. + """ + user = string.split(prefix, '!')[0] + channel = params[0] + newtopic = params[1] + self.topicUpdated(user, channel, newtopic) + + def irc_RPL_TOPIC(self, prefix, params): + """ + Called when the topic for a channel is initially reported or when it + subsequently changes. + """ + user = string.split(prefix, '!')[0] + channel = params[1] + newtopic = params[2] + self.topicUpdated(user, channel, newtopic) + + def irc_RPL_NOTOPIC(self, prefix, params): + user = string.split(prefix, '!')[0] + channel = params[1] + newtopic = "" + self.topicUpdated(user, channel, newtopic) + + def irc_RPL_MOTDSTART(self, prefix, params): + if params[-1].startswith("- "): + params[-1] = params[-1][2:] + self.motd = [params[-1]] + + def irc_RPL_MOTD(self, prefix, params): + if params[-1].startswith("- "): + params[-1] = params[-1][2:] + if self.motd is None: + self.motd = [] + self.motd.append(params[-1]) + + + def irc_RPL_ENDOFMOTD(self, prefix, params): + """ + I{RPL_ENDOFMOTD} indicates the end of the message of the day + messages. Deliver the accumulated lines to C{receivedMOTD}. + """ + motd = self.motd + self.motd = None + self.receivedMOTD(motd) + + + def irc_RPL_CREATED(self, prefix, params): + self.created(params[1]) + + def irc_RPL_YOURHOST(self, prefix, params): + self.yourHost(params[1]) + + def irc_RPL_MYINFO(self, prefix, params): + info = params[1].split(None, 3) + while len(info) < 4: + info.append(None) + self.myInfo(*info) + + def irc_RPL_BOUNCE(self, prefix, params): + self.bounce(params[1]) + + def irc_RPL_ISUPPORT(self, prefix, params): + args = params[1:-1] + # Several ISUPPORT messages, in no particular order, may be sent + # to the client at any given point in time (usually only on connect, + # though.) For this reason, ServerSupportedFeatures.parse is intended + # to mutate the supported feature list. + self.supported.parse(args) + self.isupport(args) + + def irc_RPL_LUSERCLIENT(self, prefix, params): + self.luserClient(params[1]) + + def irc_RPL_LUSEROP(self, prefix, params): + try: + self.luserOp(int(params[1])) + except ValueError: + pass + + def irc_RPL_LUSERCHANNELS(self, prefix, params): + try: + self.luserChannels(int(params[1])) + except ValueError: + pass + + def irc_RPL_LUSERME(self, prefix, params): + self.luserMe(params[1]) + + def irc_unknown(self, prefix, command, params): + pass + + ### Receiving a CTCP query from another party + ### It is safe to leave these alone. + + def ctcpQuery(self, user, channel, messages): + """Dispatch method for any CTCP queries received. + """ + for m in messages: + method = getattr(self, "ctcpQuery_%s" % m[0], None) + if method: + method(user, channel, m[1]) + else: + self.ctcpUnknownQuery(user, channel, m[0], m[1]) + + def ctcpQuery_ACTION(self, user, channel, data): + self.action(user, channel, data) + + def ctcpQuery_PING(self, user, channel, data): + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [("PING", data)]) + + def ctcpQuery_FINGER(self, user, channel, data): + if data is not None: + self.quirkyMessage("Why did %s send '%s' with a FINGER query?" + % (user, data)) + if not self.fingerReply: + return + + if callable(self.fingerReply): + reply = self.fingerReply() + else: + reply = str(self.fingerReply) + + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('FINGER', reply)]) + + def ctcpQuery_VERSION(self, user, channel, data): + if data is not None: + self.quirkyMessage("Why did %s send '%s' with a VERSION query?" + % (user, data)) + + if self.versionName: + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('VERSION', '%s:%s:%s' % + (self.versionName, + self.versionNum or '', + self.versionEnv or ''))]) + + def ctcpQuery_SOURCE(self, user, channel, data): + if data is not None: + self.quirkyMessage("Why did %s send '%s' with a SOURCE query?" + % (user, data)) + if self.sourceURL: + nick = string.split(user,"!")[0] + # The CTCP document (Zeuge, Rollo, Mesander 1994) says that SOURCE + # replies should be responded to with the location of an anonymous + # FTP server in host:directory:file format. I'm taking the liberty + # of bringing it into the 21st century by sending a URL instead. + self.ctcpMakeReply(nick, [('SOURCE', self.sourceURL), + ('SOURCE', None)]) + + def ctcpQuery_USERINFO(self, user, channel, data): + if data is not None: + self.quirkyMessage("Why did %s send '%s' with a USERINFO query?" + % (user, data)) + if self.userinfo: + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('USERINFO', self.userinfo)]) + + def ctcpQuery_CLIENTINFO(self, user, channel, data): + """A master index of what CTCP tags this client knows. + + If no arguments are provided, respond with a list of known tags. + If an argument is provided, provide human-readable help on + the usage of that tag. + """ + + nick = string.split(user,"!")[0] + if not data: + # XXX: prefixedMethodNames gets methods from my *class*, + # but it's entirely possible that this *instance* has more + # methods. + names = reflect.prefixedMethodNames(self.__class__, + 'ctcpQuery_') + + self.ctcpMakeReply(nick, [('CLIENTINFO', + string.join(names, ' '))]) + else: + args = string.split(data) + method = getattr(self, 'ctcpQuery_%s' % (args[0],), None) + if not method: + self.ctcpMakeReply(nick, [('ERRMSG', + "CLIENTINFO %s :" + "Unknown query '%s'" + % (data, args[0]))]) + return + doc = getattr(method, '__doc__', '') + self.ctcpMakeReply(nick, [('CLIENTINFO', doc)]) + + + def ctcpQuery_ERRMSG(self, user, channel, data): + # Yeah, this seems strange, but that's what the spec says to do + # when faced with an ERRMSG query (not a reply). + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('ERRMSG', + "%s :No error has occoured." % data)]) + + def ctcpQuery_TIME(self, user, channel, data): + if data is not None: + self.quirkyMessage("Why did %s send '%s' with a TIME query?" + % (user, data)) + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, + [('TIME', ':%s' % + time.asctime(time.localtime(time.time())))]) + + def ctcpQuery_DCC(self, user, channel, data): + """Initiate a Direct Client Connection + """ + + if not data: return + dcctype = data.split(None, 1)[0].upper() + handler = getattr(self, "dcc_" + dcctype, None) + if handler: + if self.dcc_sessions is None: + self.dcc_sessions = [] + data = data[len(dcctype)+1:] + handler(user, channel, data) + else: + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('ERRMSG', + "DCC %s :Unknown DCC type '%s'" + % (data, dcctype))]) + self.quirkyMessage("%s offered unknown DCC type %s" + % (user, dcctype)) + + def dcc_SEND(self, user, channel, data): + # Use splitQuoted for those who send files with spaces in the names. + data = text.splitQuoted(data) + if len(data) < 3: + raise IRCBadMessage, "malformed DCC SEND request: %r" % (data,) + + (filename, address, port) = data[:3] + + address = dccParseAddress(address) + try: + port = int(port) + except ValueError: + raise IRCBadMessage, "Indecipherable port %r" % (port,) + + size = -1 + if len(data) >= 4: + try: + size = int(data[3]) + except ValueError: + pass + + # XXX Should we bother passing this data? + self.dccDoSend(user, address, port, filename, size, data) + + def dcc_ACCEPT(self, user, channel, data): + data = text.splitQuoted(data) + if len(data) < 3: + raise IRCBadMessage, "malformed DCC SEND ACCEPT request: %r" % (data,) + (filename, port, resumePos) = data[:3] + try: + port = int(port) + resumePos = int(resumePos) + except ValueError: + return + + self.dccDoAcceptResume(user, filename, port, resumePos) + + def dcc_RESUME(self, user, channel, data): + data = text.splitQuoted(data) + if len(data) < 3: + raise IRCBadMessage, "malformed DCC SEND RESUME request: %r" % (data,) + (filename, port, resumePos) = data[:3] + try: + port = int(port) + resumePos = int(resumePos) + except ValueError: + return + self.dccDoResume(user, filename, port, resumePos) + + def dcc_CHAT(self, user, channel, data): + data = text.splitQuoted(data) + if len(data) < 3: + raise IRCBadMessage, "malformed DCC CHAT request: %r" % (data,) + + (filename, address, port) = data[:3] + + address = dccParseAddress(address) + try: + port = int(port) + except ValueError: + raise IRCBadMessage, "Indecipherable port %r" % (port,) + + self.dccDoChat(user, channel, address, port, data) + + ### The dccDo methods are the slightly higher-level siblings of + ### common dcc_ methods; the arguments have been parsed for them. + + def dccDoSend(self, user, address, port, fileName, size, data): + """Called when I receive a DCC SEND offer from a client. + + By default, I do nothing here.""" + ## filename = path.basename(arg) + ## protocol = DccFileReceive(filename, size, + ## (user,channel,data),self.dcc_destdir) + ## reactor.clientTCP(address, port, protocol) + ## self.dcc_sessions.append(protocol) + pass + + def dccDoResume(self, user, file, port, resumePos): + """Called when a client is trying to resume an offered file + via DCC send. It should be either replied to with a DCC + ACCEPT or ignored (default).""" + pass + + def dccDoAcceptResume(self, user, file, port, resumePos): + """Called when a client has verified and accepted a DCC resume + request made by us. By default it will do nothing.""" + pass + + def dccDoChat(self, user, channel, address, port, data): + pass + #factory = DccChatFactory(self, queryData=(user, channel, data)) + #reactor.connectTCP(address, port, factory) + #self.dcc_sessions.append(factory) + + #def ctcpQuery_SED(self, user, data): + # """Simple Encryption Doodoo + # + # Feel free to implement this, but no specification is available. + # """ + # raise NotImplementedError + + def ctcpUnknownQuery(self, user, channel, tag, data): + nick = string.split(user,"!")[0] + self.ctcpMakeReply(nick, [('ERRMSG', + "%s %s: Unknown query '%s'" + % (tag, data, tag))]) + + log.msg("Unknown CTCP query from %s: %s %s\n" + % (user, tag, data)) + + def ctcpMakeReply(self, user, messages): + """ + Send one or more C{extended messages} as a CTCP reply. + + @type messages: a list of extended messages. An extended + message is a (tag, data) tuple, where 'data' may be C{None}. + """ + self.notice(user, ctcpStringify(messages)) + + ### client CTCP query commands + + def ctcpMakeQuery(self, user, messages): + """ + Send one or more C{extended messages} as a CTCP query. + + @type messages: a list of extended messages. An extended + message is a (tag, data) tuple, where 'data' may be C{None}. + """ + self.msg(user, ctcpStringify(messages)) + + ### Receiving a response to a CTCP query (presumably to one we made) + ### You may want to add methods here, or override UnknownReply. + + def ctcpReply(self, user, channel, messages): + """ + Dispatch method for any CTCP replies received. + """ + for m in messages: + method = getattr(self, "ctcpReply_%s" % m[0], None) + if method: + method(user, channel, m[1]) + else: + self.ctcpUnknownReply(user, channel, m[0], m[1]) + + def ctcpReply_PING(self, user, channel, data): + nick = user.split('!', 1)[0] + if (not self._pings) or (not self._pings.has_key((nick, data))): + raise IRCBadMessage,\ + "Bogus PING response from %s: %s" % (user, data) + + t0 = self._pings[(nick, data)] + self.pong(user, time.time() - t0) + + def ctcpUnknownReply(self, user, channel, tag, data): + """Called when a fitting ctcpReply_ method is not found. + + XXX: If the client makes arbitrary CTCP queries, + this method should probably show the responses to + them instead of treating them as anomolies. + """ + log.msg("Unknown CTCP reply from %s: %s %s\n" + % (user, tag, data)) + + ### Error handlers + ### You may override these with something more appropriate to your UI. + + def badMessage(self, line, excType, excValue, tb): + """When I get a message that's so broken I can't use it. + """ + log.msg(line) + log.msg(string.join(traceback.format_exception(excType, + excValue, + tb),'')) + + def quirkyMessage(self, s): + """This is called when I receive a message which is peculiar, + but not wholly indecipherable. + """ + log.msg(s + '\n') + + ### Protocool methods + + def connectionMade(self): + self.supported = ServerSupportedFeatures() + self._queue = [] + if self.performLogin: + self.register(self.nickname) + + def dataReceived(self, data): + basic.LineReceiver.dataReceived(self, data.replace('\r', '')) + + def lineReceived(self, line): + line = lowDequote(line) + try: + prefix, command, params = parsemsg(line) + if numeric_to_symbolic.has_key(command): + command = numeric_to_symbolic[command] + self.handleCommand(command, prefix, params) + except IRCBadMessage: + self.badMessage(line, *sys.exc_info()) + + + def getUserModeParams(self): + """ + Get user modes that require parameters for correct parsing. + + @rtype: C{[str, str]} + @return C{[add, remove]} + """ + return ['', ''] + + + def getChannelModeParams(self): + """ + Get channel modes that require parameters for correct parsing. + + @rtype: C{[str, str]} + @return C{[add, remove]} + """ + # PREFIX modes are treated as "type B" CHANMODES, they always take + # parameter. + params = ['', ''] + prefixes = self.supported.getFeature('PREFIX', {}) + params[0] = params[1] = ''.join(prefixes.iterkeys()) + + chanmodes = self.supported.getFeature('CHANMODES') + if chanmodes is not None: + params[0] += chanmodes.get('addressModes', '') + params[0] += chanmodes.get('param', '') + params[1] = params[0] + params[0] += chanmodes.get('setParam', '') + return params + + + def handleCommand(self, command, prefix, params): + """Determine the function to call for the given command and call + it with the given arguments. + """ + method = getattr(self, "irc_%s" % command, None) + try: + if method is not None: + method(prefix, params) + else: + self.irc_unknown(prefix, command, params) + except: + log.deferr() + + + def __getstate__(self): + dct = self.__dict__.copy() + dct['dcc_sessions'] = None + dct['_pings'] = None + return dct + + +def dccParseAddress(address): + if '.' in address: + pass + else: + try: + address = long(address) + except ValueError: + raise IRCBadMessage,\ + "Indecipherable address %r" % (address,) + else: + address = ( + (address >> 24) & 0xFF, + (address >> 16) & 0xFF, + (address >> 8) & 0xFF, + address & 0xFF, + ) + address = '.'.join(map(str,address)) + return address + + +class DccFileReceiveBasic(protocol.Protocol, styles.Ephemeral): + """Bare protocol to receive a Direct Client Connection SEND stream. + + This does enough to keep the other guy talking, but you'll want to + extend my dataReceived method to *do* something with the data I get. + """ + + bytesReceived = 0 + + def __init__(self, resumeOffset=0): + self.bytesReceived = resumeOffset + self.resume = (resumeOffset != 0) + + def dataReceived(self, data): + """Called when data is received. + + Warning: This just acknowledges to the remote host that the + data has been received; it doesn't *do* anything with the + data, so you'll want to override this. + """ + self.bytesReceived = self.bytesReceived + len(data) + self.transport.write(struct.pack('!i', self.bytesReceived)) + + +class DccSendProtocol(protocol.Protocol, styles.Ephemeral): + """Protocol for an outgoing Direct Client Connection SEND. + """ + + blocksize = 1024 + file = None + bytesSent = 0 + completed = 0 + connected = 0 + + def __init__(self, file): + if type(file) is types.StringType: + self.file = open(file, 'r') + + def connectionMade(self): + self.connected = 1 + self.sendBlock() + + def dataReceived(self, data): + # XXX: Do we need to check to see if len(data) != fmtsize? + + bytesShesGot = struct.unpack("!I", data) + if bytesShesGot < self.bytesSent: + # Wait for her. + # XXX? Add some checks to see if we've stalled out? + return + elif bytesShesGot > self.bytesSent: + # self.transport.log("DCC SEND %s: She says she has %d bytes " + # "but I've only sent %d. I'm stopping " + # "this screwy transfer." + # % (self.file, + # bytesShesGot, self.bytesSent)) + self.transport.loseConnection() + return + + self.sendBlock() + + def sendBlock(self): + block = self.file.read(self.blocksize) + if block: + self.transport.write(block) + self.bytesSent = self.bytesSent + len(block) + else: + # Nothing more to send, transfer complete. + self.transport.loseConnection() + self.completed = 1 + + def connectionLost(self, reason): + self.connected = 0 + if hasattr(self.file, "close"): + self.file.close() + + +class DccSendFactory(protocol.Factory): + protocol = DccSendProtocol + def __init__(self, file): + self.file = file + + def buildProtocol(self, connection): + p = self.protocol(self.file) + p.factory = self + return p + + +def fileSize(file): + """I'll try my damndest to determine the size of this file object. + """ + size = None + if hasattr(file, "fileno"): + fileno = file.fileno() + try: + stat_ = os.fstat(fileno) + size = stat_[stat.ST_SIZE] + except: + pass + else: + return size + + if hasattr(file, "name") and path.exists(file.name): + try: + size = path.getsize(file.name) + except: + pass + else: + return size + + if hasattr(file, "seek") and hasattr(file, "tell"): + try: + try: + file.seek(0, 2) + size = file.tell() + finally: + file.seek(0, 0) + except: + pass + else: + return size + + return size + +class DccChat(basic.LineReceiver, styles.Ephemeral): + """Direct Client Connection protocol type CHAT. + + DCC CHAT is really just your run o' the mill basic.LineReceiver + protocol. This class only varies from that slightly, accepting + either LF or CR LF for a line delimeter for incoming messages + while always using CR LF for outgoing. + + The lineReceived method implemented here uses the DCC connection's + 'client' attribute (provided upon construction) to deliver incoming + lines from the DCC chat via IRCClient's normal privmsg interface. + That's something of a spoof, which you may well want to override. + """ + + queryData = None + delimiter = CR + NL + client = None + remoteParty = None + buffer = "" + + def __init__(self, client, queryData=None): + """Initialize a new DCC CHAT session. + + queryData is a 3-tuple of + (fromUser, targetUserOrChannel, data) + as received by the CTCP query. + + (To be honest, fromUser is the only thing that's currently + used here. targetUserOrChannel is potentially useful, while + the 'data' argument is soley for informational purposes.) + """ + self.client = client + if queryData: + self.queryData = queryData + self.remoteParty = self.queryData[0] + + def dataReceived(self, data): + self.buffer = self.buffer + data + lines = string.split(self.buffer, LF) + # Put the (possibly empty) element after the last LF back in the + # buffer + self.buffer = lines.pop() + + for line in lines: + if line[-1] == CR: + line = line[:-1] + self.lineReceived(line) + + def lineReceived(self, line): + log.msg("DCC CHAT<%s> %s" % (self.remoteParty, line)) + self.client.privmsg(self.remoteParty, + self.client.nickname, line) + + +class DccChatFactory(protocol.ClientFactory): + protocol = DccChat + noisy = 0 + def __init__(self, client, queryData): + self.client = client + self.queryData = queryData + + def buildProtocol(self, addr): + p = self.protocol(client=self.client, queryData=self.queryData) + p.factory = self + + def clientConnectionFailed(self, unused_connector, unused_reason): + self.client.dcc_sessions.remove(self) + + def clientConnectionLost(self, unused_connector, unused_reason): + self.client.dcc_sessions.remove(self) + + +def dccDescribe(data): + """Given the data chunk from a DCC query, return a descriptive string. + """ + + orig_data = data + data = string.split(data) + if len(data) < 4: + return orig_data + + (dcctype, arg, address, port) = data[:4] + + if '.' in address: + pass + else: + try: + address = long(address) + except ValueError: + pass + else: + address = ( + (address >> 24) & 0xFF, + (address >> 16) & 0xFF, + (address >> 8) & 0xFF, + address & 0xFF, + ) + # The mapping to 'int' is to get rid of those accursed + # "L"s which python 1.5.2 puts on the end of longs. + address = string.join(map(str,map(int,address)), ".") + + if dcctype == 'SEND': + filename = arg + + size_txt = '' + if len(data) >= 5: + try: + size = int(data[4]) + size_txt = ' of size %d bytes' % (size,) + except ValueError: + pass + + dcc_text = ("SEND for file '%s'%s at host %s, port %s" + % (filename, size_txt, address, port)) + elif dcctype == 'CHAT': + dcc_text = ("CHAT for host %s, port %s" + % (address, port)) + else: + dcc_text = orig_data + + return dcc_text + + +class DccFileReceive(DccFileReceiveBasic): + """Higher-level coverage for getting a file from DCC SEND. + + I allow you to change the file's name and destination directory. + I won't overwrite an existing file unless I've been told it's okay + to do so. If passed the resumeOffset keyword argument I will attempt to + resume the file from that amount of bytes. + + XXX: I need to let the client know when I am finished. + XXX: I need to decide how to keep a progress indicator updated. + XXX: Client needs a way to tell me "Do not finish until I say so." + XXX: I need to make sure the client understands if the file cannot be written. + """ + + filename = 'dcc' + fileSize = -1 + destDir = '.' + overwrite = 0 + fromUser = None + queryData = None + + def __init__(self, filename, fileSize=-1, queryData=None, + destDir='.', resumeOffset=0): + DccFileReceiveBasic.__init__(self, resumeOffset=resumeOffset) + self.filename = filename + self.destDir = destDir + self.fileSize = fileSize + + if queryData: + self.queryData = queryData + self.fromUser = self.queryData[0] + + def set_directory(self, directory): + """Set the directory where the downloaded file will be placed. + + May raise OSError if the supplied directory path is not suitable. + """ + if not path.exists(directory): + raise OSError(errno.ENOENT, "You see no directory there.", + directory) + if not path.isdir(directory): + raise OSError(errno.ENOTDIR, "You cannot put a file into " + "something which is not a directory.", + directory) + if not os.access(directory, os.X_OK | os.W_OK): + raise OSError(errno.EACCES, + "This directory is too hard to write in to.", + directory) + self.destDir = directory + + def set_filename(self, filename): + """Change the name of the file being transferred. + + This replaces the file name provided by the sender. + """ + self.filename = filename + + def set_overwrite(self, boolean): + """May I overwrite existing files? + """ + self.overwrite = boolean + + + # Protocol-level methods. + + def connectionMade(self): + dst = path.abspath(path.join(self.destDir,self.filename)) + exists = path.exists(dst) + if self.resume and exists: + # I have been told I want to resume, and a file already + # exists - Here we go + self.file = open(dst, 'ab') + log.msg("Attempting to resume %s - starting from %d bytes" % + (self.file, self.file.tell())) + elif self.overwrite or not exists: + self.file = open(dst, 'wb') + else: + raise OSError(errno.EEXIST, + "There's a file in the way. " + "Perhaps that's why you cannot open it.", + dst) + + def dataReceived(self, data): + self.file.write(data) + DccFileReceiveBasic.dataReceived(self, data) + + # XXX: update a progress indicator here? + + def connectionLost(self, reason): + """When the connection is lost, I close the file. + """ + self.connected = 0 + logmsg = ("%s closed." % (self,)) + if self.fileSize > 0: + logmsg = ("%s %d/%d bytes received" + % (logmsg, self.bytesReceived, self.fileSize)) + if self.bytesReceived == self.fileSize: + pass # Hooray! + elif self.bytesReceived < self.fileSize: + logmsg = ("%s (Warning: %d bytes short)" + % (logmsg, self.fileSize - self.bytesReceived)) + else: + logmsg = ("%s (file larger than expected)" + % (logmsg,)) + else: + logmsg = ("%s %d bytes received" + % (logmsg, self.bytesReceived)) + + if hasattr(self, 'file'): + logmsg = "%s and written to %s.\n" % (logmsg, self.file.name) + if hasattr(self.file, 'close'): self.file.close() + + # self.transport.log(logmsg) + + def __str__(self): + if not self.connected: + return "" % (id(self),) + from_ = self.transport.getPeer() + if self.fromUser: + from_ = "%s (%s)" % (self.fromUser, from_) + + s = ("DCC transfer of '%s' from %s" % (self.filename, from_)) + return s + + def __repr__(self): + s = ("<%s at %x: GET %s>" + % (self.__class__, id(self), self.filename)) + return s + + +# CTCP constants and helper functions + +X_DELIM = chr(001) + +def ctcpExtract(message): + """Extract CTCP data from a string. + + Returns a dictionary with two items: + + - C{'extended'}: a list of CTCP (tag, data) tuples + - C{'normal'}: a list of strings which were not inside a CTCP delimeter + """ + + extended_messages = [] + normal_messages = [] + retval = {'extended': extended_messages, + 'normal': normal_messages } + + messages = string.split(message, X_DELIM) + odd = 0 + + # X1 extended data X2 nomal data X3 extended data X4 normal... + while messages: + if odd: + extended_messages.append(messages.pop(0)) + else: + normal_messages.append(messages.pop(0)) + odd = not odd + + extended_messages[:] = filter(None, extended_messages) + normal_messages[:] = filter(None, normal_messages) + + extended_messages[:] = map(ctcpDequote, extended_messages) + for i in xrange(len(extended_messages)): + m = string.split(extended_messages[i], SPC, 1) + tag = m[0] + if len(m) > 1: + data = m[1] + else: + data = None + + extended_messages[i] = (tag, data) + + return retval + +# CTCP escaping + +M_QUOTE= chr(020) + +mQuoteTable = { + NUL: M_QUOTE + '0', + NL: M_QUOTE + 'n', + CR: M_QUOTE + 'r', + M_QUOTE: M_QUOTE + M_QUOTE + } + +mDequoteTable = {} +for k, v in mQuoteTable.items(): + mDequoteTable[v[-1]] = k +del k, v + +mEscape_re = re.compile('%s.' % (re.escape(M_QUOTE),), re.DOTALL) + +def lowQuote(s): + for c in (M_QUOTE, NUL, NL, CR): + s = string.replace(s, c, mQuoteTable[c]) + return s + +def lowDequote(s): + def sub(matchobj, mDequoteTable=mDequoteTable): + s = matchobj.group()[1] + try: + s = mDequoteTable[s] + except KeyError: + s = s + return s + + return mEscape_re.sub(sub, s) + +X_QUOTE = '\\' + +xQuoteTable = { + X_DELIM: X_QUOTE + 'a', + X_QUOTE: X_QUOTE + X_QUOTE + } + +xDequoteTable = {} + +for k, v in xQuoteTable.items(): + xDequoteTable[v[-1]] = k + +xEscape_re = re.compile('%s.' % (re.escape(X_QUOTE),), re.DOTALL) + +def ctcpQuote(s): + for c in (X_QUOTE, X_DELIM): + s = string.replace(s, c, xQuoteTable[c]) + return s + +def ctcpDequote(s): + def sub(matchobj, xDequoteTable=xDequoteTable): + s = matchobj.group()[1] + try: + s = xDequoteTable[s] + except KeyError: + s = s + return s + + return xEscape_re.sub(sub, s) + +def ctcpStringify(messages): + """ + @type messages: a list of extended messages. An extended + message is a (tag, data) tuple, where 'data' may be C{None}, a + string, or a list of strings to be joined with whitespace. + + @returns: String + """ + coded_messages = [] + for (tag, data) in messages: + if data: + if not isinstance(data, types.StringType): + try: + # data as list-of-strings + data = " ".join(map(str, data)) + except TypeError: + # No? Then use it's %s representation. + pass + m = "%s %s" % (tag, data) + else: + m = str(tag) + m = ctcpQuote(m) + m = "%s%s%s" % (X_DELIM, m, X_DELIM) + coded_messages.append(m) + + line = string.join(coded_messages, '') + return line + + +# Constants (from RFC 2812) +RPL_WELCOME = '001' +RPL_YOURHOST = '002' +RPL_CREATED = '003' +RPL_MYINFO = '004' +RPL_ISUPPORT = '005' +RPL_BOUNCE = '010' +RPL_USERHOST = '302' +RPL_ISON = '303' +RPL_AWAY = '301' +RPL_UNAWAY = '305' +RPL_NOWAWAY = '306' +RPL_WHOISUSER = '311' +RPL_WHOISSERVER = '312' +RPL_WHOISOPERATOR = '313' +RPL_WHOISIDLE = '317' +RPL_ENDOFWHOIS = '318' +RPL_WHOISCHANNELS = '319' +RPL_WHOWASUSER = '314' +RPL_ENDOFWHOWAS = '369' +RPL_LISTSTART = '321' +RPL_LIST = '322' +RPL_LISTEND = '323' +RPL_UNIQOPIS = '325' +RPL_CHANNELMODEIS = '324' +RPL_NOTOPIC = '331' +RPL_TOPIC = '332' +RPL_INVITING = '341' +RPL_SUMMONING = '342' +RPL_INVITELIST = '346' +RPL_ENDOFINVITELIST = '347' +RPL_EXCEPTLIST = '348' +RPL_ENDOFEXCEPTLIST = '349' +RPL_VERSION = '351' +RPL_WHOREPLY = '352' +RPL_ENDOFWHO = '315' +RPL_NAMREPLY = '353' +RPL_ENDOFNAMES = '366' +RPL_LINKS = '364' +RPL_ENDOFLINKS = '365' +RPL_BANLIST = '367' +RPL_ENDOFBANLIST = '368' +RPL_INFO = '371' +RPL_ENDOFINFO = '374' +RPL_MOTDSTART = '375' +RPL_MOTD = '372' +RPL_ENDOFMOTD = '376' +RPL_YOUREOPER = '381' +RPL_REHASHING = '382' +RPL_YOURESERVICE = '383' +RPL_TIME = '391' +RPL_USERSSTART = '392' +RPL_USERS = '393' +RPL_ENDOFUSERS = '394' +RPL_NOUSERS = '395' +RPL_TRACELINK = '200' +RPL_TRACECONNECTING = '201' +RPL_TRACEHANDSHAKE = '202' +RPL_TRACEUNKNOWN = '203' +RPL_TRACEOPERATOR = '204' +RPL_TRACEUSER = '205' +RPL_TRACESERVER = '206' +RPL_TRACESERVICE = '207' +RPL_TRACENEWTYPE = '208' +RPL_TRACECLASS = '209' +RPL_TRACERECONNECT = '210' +RPL_TRACELOG = '261' +RPL_TRACEEND = '262' +RPL_STATSLINKINFO = '211' +RPL_STATSCOMMANDS = '212' +RPL_ENDOFSTATS = '219' +RPL_STATSUPTIME = '242' +RPL_STATSOLINE = '243' +RPL_UMODEIS = '221' +RPL_SERVLIST = '234' +RPL_SERVLISTEND = '235' +RPL_LUSERCLIENT = '251' +RPL_LUSEROP = '252' +RPL_LUSERUNKNOWN = '253' +RPL_LUSERCHANNELS = '254' +RPL_LUSERME = '255' +RPL_ADMINME = '256' +RPL_ADMINLOC = '257' +RPL_ADMINLOC = '258' +RPL_ADMINEMAIL = '259' +RPL_TRYAGAIN = '263' +ERR_NOSUCHNICK = '401' +ERR_NOSUCHSERVER = '402' +ERR_NOSUCHCHANNEL = '403' +ERR_CANNOTSENDTOCHAN = '404' +ERR_TOOMANYCHANNELS = '405' +ERR_WASNOSUCHNICK = '406' +ERR_TOOMANYTARGETS = '407' +ERR_NOSUCHSERVICE = '408' +ERR_NOORIGIN = '409' +ERR_NORECIPIENT = '411' +ERR_NOTEXTTOSEND = '412' +ERR_NOTOPLEVEL = '413' +ERR_WILDTOPLEVEL = '414' +ERR_BADMASK = '415' +ERR_UNKNOWNCOMMAND = '421' +ERR_NOMOTD = '422' +ERR_NOADMININFO = '423' +ERR_FILEERROR = '424' +ERR_NONICKNAMEGIVEN = '431' +ERR_ERRONEUSNICKNAME = '432' +ERR_NICKNAMEINUSE = '433' +ERR_NICKCOLLISION = '436' +ERR_UNAVAILRESOURCE = '437' +ERR_USERNOTINCHANNEL = '441' +ERR_NOTONCHANNEL = '442' +ERR_USERONCHANNEL = '443' +ERR_NOLOGIN = '444' +ERR_SUMMONDISABLED = '445' +ERR_USERSDISABLED = '446' +ERR_NOTREGISTERED = '451' +ERR_NEEDMOREPARAMS = '461' +ERR_ALREADYREGISTRED = '462' +ERR_NOPERMFORHOST = '463' +ERR_PASSWDMISMATCH = '464' +ERR_YOUREBANNEDCREEP = '465' +ERR_YOUWILLBEBANNED = '466' +ERR_KEYSET = '467' +ERR_CHANNELISFULL = '471' +ERR_UNKNOWNMODE = '472' +ERR_INVITEONLYCHAN = '473' +ERR_BANNEDFROMCHAN = '474' +ERR_BADCHANNELKEY = '475' +ERR_BADCHANMASK = '476' +ERR_NOCHANMODES = '477' +ERR_BANLISTFULL = '478' +ERR_NOPRIVILEGES = '481' +ERR_CHANOPRIVSNEEDED = '482' +ERR_CANTKILLSERVER = '483' +ERR_RESTRICTED = '484' +ERR_UNIQOPPRIVSNEEDED = '485' +ERR_NOOPERHOST = '491' +ERR_NOSERVICEHOST = '492' +ERR_UMODEUNKNOWNFLAG = '501' +ERR_USERSDONTMATCH = '502' + +# And hey, as long as the strings are already intern'd... +symbolic_to_numeric = { + "RPL_WELCOME": '001', + "RPL_YOURHOST": '002', + "RPL_CREATED": '003', + "RPL_MYINFO": '004', + "RPL_ISUPPORT": '005', + "RPL_BOUNCE": '010', + "RPL_USERHOST": '302', + "RPL_ISON": '303', + "RPL_AWAY": '301', + "RPL_UNAWAY": '305', + "RPL_NOWAWAY": '306', + "RPL_WHOISUSER": '311', + "RPL_WHOISSERVER": '312', + "RPL_WHOISOPERATOR": '313', + "RPL_WHOISIDLE": '317', + "RPL_ENDOFWHOIS": '318', + "RPL_WHOISCHANNELS": '319', + "RPL_WHOWASUSER": '314', + "RPL_ENDOFWHOWAS": '369', + "RPL_LISTSTART": '321', + "RPL_LIST": '322', + "RPL_LISTEND": '323', + "RPL_UNIQOPIS": '325', + "RPL_CHANNELMODEIS": '324', + "RPL_NOTOPIC": '331', + "RPL_TOPIC": '332', + "RPL_INVITING": '341', + "RPL_SUMMONING": '342', + "RPL_INVITELIST": '346', + "RPL_ENDOFINVITELIST": '347', + "RPL_EXCEPTLIST": '348', + "RPL_ENDOFEXCEPTLIST": '349', + "RPL_VERSION": '351', + "RPL_WHOREPLY": '352', + "RPL_ENDOFWHO": '315', + "RPL_NAMREPLY": '353', + "RPL_ENDOFNAMES": '366', + "RPL_LINKS": '364', + "RPL_ENDOFLINKS": '365', + "RPL_BANLIST": '367', + "RPL_ENDOFBANLIST": '368', + "RPL_INFO": '371', + "RPL_ENDOFINFO": '374', + "RPL_MOTDSTART": '375', + "RPL_MOTD": '372', + "RPL_ENDOFMOTD": '376', + "RPL_YOUREOPER": '381', + "RPL_REHASHING": '382', + "RPL_YOURESERVICE": '383', + "RPL_TIME": '391', + "RPL_USERSSTART": '392', + "RPL_USERS": '393', + "RPL_ENDOFUSERS": '394', + "RPL_NOUSERS": '395', + "RPL_TRACELINK": '200', + "RPL_TRACECONNECTING": '201', + "RPL_TRACEHANDSHAKE": '202', + "RPL_TRACEUNKNOWN": '203', + "RPL_TRACEOPERATOR": '204', + "RPL_TRACEUSER": '205', + "RPL_TRACESERVER": '206', + "RPL_TRACESERVICE": '207', + "RPL_TRACENEWTYPE": '208', + "RPL_TRACECLASS": '209', + "RPL_TRACERECONNECT": '210', + "RPL_TRACELOG": '261', + "RPL_TRACEEND": '262', + "RPL_STATSLINKINFO": '211', + "RPL_STATSCOMMANDS": '212', + "RPL_ENDOFSTATS": '219', + "RPL_STATSUPTIME": '242', + "RPL_STATSOLINE": '243', + "RPL_UMODEIS": '221', + "RPL_SERVLIST": '234', + "RPL_SERVLISTEND": '235', + "RPL_LUSERCLIENT": '251', + "RPL_LUSEROP": '252', + "RPL_LUSERUNKNOWN": '253', + "RPL_LUSERCHANNELS": '254', + "RPL_LUSERME": '255', + "RPL_ADMINME": '256', + "RPL_ADMINLOC": '257', + "RPL_ADMINLOC": '258', + "RPL_ADMINEMAIL": '259', + "RPL_TRYAGAIN": '263', + "ERR_NOSUCHNICK": '401', + "ERR_NOSUCHSERVER": '402', + "ERR_NOSUCHCHANNEL": '403', + "ERR_CANNOTSENDTOCHAN": '404', + "ERR_TOOMANYCHANNELS": '405', + "ERR_WASNOSUCHNICK": '406', + "ERR_TOOMANYTARGETS": '407', + "ERR_NOSUCHSERVICE": '408', + "ERR_NOORIGIN": '409', + "ERR_NORECIPIENT": '411', + "ERR_NOTEXTTOSEND": '412', + "ERR_NOTOPLEVEL": '413', + "ERR_WILDTOPLEVEL": '414', + "ERR_BADMASK": '415', + "ERR_UNKNOWNCOMMAND": '421', + "ERR_NOMOTD": '422', + "ERR_NOADMININFO": '423', + "ERR_FILEERROR": '424', + "ERR_NONICKNAMEGIVEN": '431', + "ERR_ERRONEUSNICKNAME": '432', + "ERR_NICKNAMEINUSE": '433', + "ERR_NICKCOLLISION": '436', + "ERR_UNAVAILRESOURCE": '437', + "ERR_USERNOTINCHANNEL": '441', + "ERR_NOTONCHANNEL": '442', + "ERR_USERONCHANNEL": '443', + "ERR_NOLOGIN": '444', + "ERR_SUMMONDISABLED": '445', + "ERR_USERSDISABLED": '446', + "ERR_NOTREGISTERED": '451', + "ERR_NEEDMOREPARAMS": '461', + "ERR_ALREADYREGISTRED": '462', + "ERR_NOPERMFORHOST": '463', + "ERR_PASSWDMISMATCH": '464', + "ERR_YOUREBANNEDCREEP": '465', + "ERR_YOUWILLBEBANNED": '466', + "ERR_KEYSET": '467', + "ERR_CHANNELISFULL": '471', + "ERR_UNKNOWNMODE": '472', + "ERR_INVITEONLYCHAN": '473', + "ERR_BANNEDFROMCHAN": '474', + "ERR_BADCHANNELKEY": '475', + "ERR_BADCHANMASK": '476', + "ERR_NOCHANMODES": '477', + "ERR_BANLISTFULL": '478', + "ERR_NOPRIVILEGES": '481', + "ERR_CHANOPRIVSNEEDED": '482', + "ERR_CANTKILLSERVER": '483', + "ERR_RESTRICTED": '484', + "ERR_UNIQOPPRIVSNEEDED": '485', + "ERR_NOOPERHOST": '491', + "ERR_NOSERVICEHOST": '492', + "ERR_UMODEUNKNOWNFLAG": '501', + "ERR_USERSDONTMATCH": '502', +} + +numeric_to_symbolic = {} +for k, v in symbolic_to_numeric.items(): + numeric_to_symbolic[v] = k diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/__init__.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/__init__.py new file mode 100644 index 000000000000..53c765c2cc33 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/__init__.py @@ -0,0 +1,8 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +Twisted Jabber: Jabber Protocol Helpers +""" diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/client.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/client.py new file mode 100644 index 000000000000..f26354e91978 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/client.py @@ -0,0 +1,369 @@ +# -*- test-case-name: twisted.words.test.test_jabberclient -*- +# +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.internet import defer +from twisted.words.xish import domish, xpath, utility +from twisted.words.protocols.jabber import xmlstream, sasl, error +from twisted.words.protocols.jabber.jid import JID + +NS_XMPP_STREAMS = 'urn:ietf:params:xml:ns:xmpp-streams' +NS_XMPP_BIND = 'urn:ietf:params:xml:ns:xmpp-bind' +NS_XMPP_SESSION = 'urn:ietf:params:xml:ns:xmpp-session' +NS_IQ_AUTH_FEATURE = 'http://jabber.org/features/iq-auth' + +DigestAuthQry = xpath.internQuery("/iq/query/digest") +PlaintextAuthQry = xpath.internQuery("/iq/query/password") + +def basicClientFactory(jid, secret): + a = BasicAuthenticator(jid, secret) + return xmlstream.XmlStreamFactory(a) + +class IQ(domish.Element): + """ + Wrapper for a Info/Query packet. + + This provides the necessary functionality to send IQs and get notified when + a result comes back. It's a subclass from L{domish.Element}, so you can use + the standard DOM manipulation calls to add data to the outbound request. + + @type callbacks: L{utility.CallbackList} + @cvar callbacks: Callback list to be notified when response comes back + + """ + def __init__(self, xmlstream, type = "set"): + """ + @type xmlstream: L{xmlstream.XmlStream} + @param xmlstream: XmlStream to use for transmission of this IQ + + @type type: L{str} + @param type: IQ type identifier ('get' or 'set') + """ + + domish.Element.__init__(self, ("jabber:client", "iq")) + self.addUniqueId() + self["type"] = type + self._xmlstream = xmlstream + self.callbacks = utility.CallbackList() + + def addCallback(self, fn, *args, **kwargs): + """ + Register a callback for notification when the IQ result is available. + """ + + self.callbacks.addCallback(True, fn, *args, **kwargs) + + def send(self, to = None): + """ + Call this method to send this IQ request via the associated XmlStream. + + @param to: Jabber ID of the entity to send the request to + @type to: L{str} + + @returns: Callback list for this IQ. Any callbacks added to this list + will be fired when the result comes back. + """ + if to != None: + self["to"] = to + self._xmlstream.addOnetimeObserver("/iq[@id='%s']" % self["id"], \ + self._resultEvent) + self._xmlstream.send(self) + + def _resultEvent(self, iq): + self.callbacks.callback(iq) + self.callbacks = None + + + +class IQAuthInitializer(object): + """ + Non-SASL Authentication initializer for the initiating entity. + + This protocol is defined in + U{JEP-0078} and mainly serves for + compatibility with pre-XMPP-1.0 server implementations. + """ + + INVALID_USER_EVENT = "//event/client/basicauth/invaliduser" + AUTH_FAILED_EVENT = "//event/client/basicauth/authfailed" + + def __init__(self, xs): + self.xmlstream = xs + + + def initialize(self): + # Send request for auth fields + iq = xmlstream.IQ(self.xmlstream, "get") + iq.addElement(("jabber:iq:auth", "query")) + jid = self.xmlstream.authenticator.jid + iq.query.addElement("username", content = jid.user) + + d = iq.send() + d.addCallbacks(self._cbAuthQuery, self._ebAuthQuery) + return d + + + def _cbAuthQuery(self, iq): + jid = self.xmlstream.authenticator.jid + password = self.xmlstream.authenticator.password + + # Construct auth request + reply = xmlstream.IQ(self.xmlstream, "set") + reply.addElement(("jabber:iq:auth", "query")) + reply.query.addElement("username", content = jid.user) + reply.query.addElement("resource", content = jid.resource) + + # Prefer digest over plaintext + if DigestAuthQry.matches(iq): + digest = xmlstream.hashPassword(self.xmlstream.sid, unicode(password)) + reply.query.addElement("digest", content = digest) + else: + reply.query.addElement("password", content = password) + + d = reply.send() + d.addCallbacks(self._cbAuth, self._ebAuth) + return d + + + def _ebAuthQuery(self, failure): + failure.trap(error.StanzaError) + e = failure.value + if e.condition == 'not-authorized': + self.xmlstream.dispatch(e.stanza, self.INVALID_USER_EVENT) + else: + self.xmlstream.dispatch(e.stanza, self.AUTH_FAILED_EVENT) + + return failure + + + def _cbAuth(self, iq): + pass + + + def _ebAuth(self, failure): + failure.trap(error.StanzaError) + self.xmlstream.dispatch(failure.value.stanza, self.AUTH_FAILED_EVENT) + return failure + + + +class BasicAuthenticator(xmlstream.ConnectAuthenticator): + """ + Authenticates an XmlStream against a Jabber server as a Client. + + This only implements non-SASL authentication, per + U{JEP-0078}. Additionally, this + authenticator provides the ability to perform inline registration, per + U{JEP-0077}. + + Under normal circumstances, the BasicAuthenticator generates the + L{xmlstream.STREAM_AUTHD_EVENT} once the stream has authenticated. However, + it can also generate other events, such as: + - L{INVALID_USER_EVENT} : Authentication failed, due to invalid username + - L{AUTH_FAILED_EVENT} : Authentication failed, due to invalid password + - L{REGISTER_FAILED_EVENT} : Registration failed + + If authentication fails for any reason, you can attempt to register by + calling the L{registerAccount} method. If the registration succeeds, a + L{xmlstream.STREAM_AUTHD_EVENT} will be fired. Otherwise, one of the above + errors will be generated (again). + """ + + namespace = "jabber:client" + + INVALID_USER_EVENT = IQAuthInitializer.INVALID_USER_EVENT + AUTH_FAILED_EVENT = IQAuthInitializer.AUTH_FAILED_EVENT + REGISTER_FAILED_EVENT = "//event/client/basicauth/registerfailed" + + def __init__(self, jid, password): + xmlstream.ConnectAuthenticator.__init__(self, jid.host) + self.jid = jid + self.password = password + + def associateWithStream(self, xs): + xs.version = (0, 0) + xmlstream.ConnectAuthenticator.associateWithStream(self, xs) + + inits = [ (xmlstream.TLSInitiatingInitializer, False), + (IQAuthInitializer, True), + ] + + for initClass, required in inits: + init = initClass(xs) + init.required = required + xs.initializers.append(init) + + # TODO: move registration into an Initializer? + + def registerAccount(self, username = None, password = None): + if username: + self.jid.user = username + if password: + self.password = password + + iq = IQ(self.xmlstream, "set") + iq.addElement(("jabber:iq:register", "query")) + iq.query.addElement("username", content = self.jid.user) + iq.query.addElement("password", content = self.password) + + iq.addCallback(self._registerResultEvent) + + iq.send() + + def _registerResultEvent(self, iq): + if iq["type"] == "result": + # Registration succeeded -- go ahead and auth + self.streamStarted() + else: + # Registration failed + self.xmlstream.dispatch(iq, self.REGISTER_FAILED_EVENT) + + + +class CheckVersionInitializer(object): + """ + Initializer that checks if the minimum common stream version number is 1.0. + """ + + def __init__(self, xs): + self.xmlstream = xs + + + def initialize(self): + if self.xmlstream.version < (1, 0): + raise error.StreamError('unsupported-version') + + + +class BindInitializer(xmlstream.BaseFeatureInitiatingInitializer): + """ + Initializer that implements Resource Binding for the initiating entity. + + This protocol is documented in U{RFC 3920, section + 7}. + """ + + feature = (NS_XMPP_BIND, 'bind') + + def start(self): + iq = xmlstream.IQ(self.xmlstream, 'set') + bind = iq.addElement((NS_XMPP_BIND, 'bind')) + resource = self.xmlstream.authenticator.jid.resource + if resource: + bind.addElement('resource', content=resource) + d = iq.send() + d.addCallback(self.onBind) + return d + + + def onBind(self, iq): + if iq.bind: + self.xmlstream.authenticator.jid = JID(unicode(iq.bind.jid)) + + + +class SessionInitializer(xmlstream.BaseFeatureInitiatingInitializer): + """ + Initializer that implements session establishment for the initiating + entity. + + This protocol is defined in U{RFC 3921, section + 3}. + """ + + feature = (NS_XMPP_SESSION, 'session') + + def start(self): + iq = xmlstream.IQ(self.xmlstream, 'set') + session = iq.addElement((NS_XMPP_SESSION, 'session')) + return iq.send() + + + +def XMPPClientFactory(jid, password): + """ + Client factory for XMPP 1.0 (only). + + This returns a L{xmlstream.XmlStreamFactory} with an L{XMPPAuthenticator} + object to perform the stream initialization steps (such as authentication). + + @see: The notes at L{XMPPAuthenticator} describe how the L{jid} and + L{password} parameters are to be used. + + @param jid: Jabber ID to connect with. + @type jid: L{jid.JID} + @param password: password to authenticate with. + @type password: L{unicode} + @return: XML stream factory. + @rtype: L{xmlstream.XmlStreamFactory} + """ + a = XMPPAuthenticator(jid, password) + return xmlstream.XmlStreamFactory(a) + + + +class XMPPAuthenticator(xmlstream.ConnectAuthenticator): + """ + Initializes an XmlStream connecting to an XMPP server as a Client. + + This authenticator performs the initialization steps needed to start + exchanging XML stanzas with an XMPP server as an XMPP client. It checks if + the server advertises XML stream version 1.0, negotiates TLS (when + available), performs SASL authentication, binds a resource and establishes + a session. + + Upon successful stream initialization, the L{xmlstream.STREAM_AUTHD_EVENT} + event will be dispatched through the XML stream object. Otherwise, the + L{xmlstream.INIT_FAILED_EVENT} event will be dispatched with a failure + object. + + After inspection of the failure, initialization can then be restarted by + calling L{initializeStream}. For example, in case of authentication + failure, a user may be given the opportunity to input the correct password. + By setting the L{password} instance variable and restarting initialization, + the stream authentication step is then retried, and subsequent steps are + performed if succesful. + + @ivar jid: Jabber ID to authenticate with. This may contain a resource + part, as a suggestion to the server for resource binding. A + server may override this, though. If the resource part is left + off, the server will generate a unique resource identifier. + The server will always return the full Jabber ID in the + resource binding step, and this is stored in this instance + variable. + @type jid: L{jid.JID} + @ivar password: password to be used during SASL authentication. + @type password: L{unicode} + """ + + namespace = 'jabber:client' + + def __init__(self, jid, password): + xmlstream.ConnectAuthenticator.__init__(self, jid.host) + self.jid = jid + self.password = password + + + def associateWithStream(self, xs): + """ + Register with the XML stream. + + Populates stream's list of initializers, along with their + requiredness. This list is used by + L{ConnectAuthenticator.initializeStream} to perform the initalization + steps. + """ + xmlstream.ConnectAuthenticator.associateWithStream(self, xs) + + xs.initializers = [CheckVersionInitializer(xs)] + inits = [ (xmlstream.TLSInitiatingInitializer, False), + (sasl.SASLInitiatingInitializer, True), + (BindInitializer, False), + (SessionInitializer, False), + ] + + for initClass, required in inits: + init = initClass(xs) + init.required = required + xs.initializers.append(init) diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/component.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/component.py new file mode 100644 index 000000000000..8df5bf7845f4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/component.py @@ -0,0 +1,474 @@ +# -*- test-case-name: twisted.words.test.test_jabbercomponent -*- +# +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +External server-side components. + +Most Jabber server implementations allow for add-on components that act as a +seperate entity on the Jabber network, but use the server-to-server +functionality of a regular Jabber IM server. These so-called 'external +components' are connected to the Jabber server using the Jabber Component +Protocol as defined in U{JEP-0114}. + +This module allows for writing external server-side component by assigning one +or more services implementing L{ijabber.IService} up to L{ServiceManager}. The +ServiceManager connects to the Jabber server and is responsible for the +corresponding XML stream. +""" + +from zope.interface import implements + +from twisted.application import service +from twisted.internet import defer +from twisted.python import log +from twisted.words.xish import domish +from twisted.words.protocols.jabber import error, ijabber, jstrports, xmlstream +from twisted.words.protocols.jabber.jid import internJID as JID + +NS_COMPONENT_ACCEPT = 'jabber:component:accept' + +def componentFactory(componentid, password): + """ + XML stream factory for external server-side components. + + @param componentid: JID of the component. + @type componentid: L{unicode} + @param password: password used to authenticate to the server. + @type password: L{str} + """ + a = ConnectComponentAuthenticator(componentid, password) + return xmlstream.XmlStreamFactory(a) + +class ComponentInitiatingInitializer(object): + """ + External server-side component authentication initializer for the + initiating entity. + + @ivar xmlstream: XML stream between server and component. + @type xmlstream: L{xmlstream.XmlStream} + """ + + def __init__(self, xs): + self.xmlstream = xs + self._deferred = None + + def initialize(self): + xs = self.xmlstream + hs = domish.Element((self.xmlstream.namespace, "handshake")) + hs.addContent(xmlstream.hashPassword(xs.sid, + unicode(xs.authenticator.password))) + + # Setup observer to watch for handshake result + xs.addOnetimeObserver("/handshake", self._cbHandshake) + xs.send(hs) + self._deferred = defer.Deferred() + return self._deferred + + def _cbHandshake(self, _): + # we have successfully shaken hands and can now consider this + # entity to represent the component JID. + self.xmlstream.thisEntity = self.xmlstream.otherEntity + self._deferred.callback(None) + + + +class ConnectComponentAuthenticator(xmlstream.ConnectAuthenticator): + """ + Authenticator to permit an XmlStream to authenticate against a Jabber + server as an external component (where the Authenticator is initiating the + stream). + """ + namespace = NS_COMPONENT_ACCEPT + + def __init__(self, componentjid, password): + """ + @type componentjid: L{str} + @param componentjid: Jabber ID that this component wishes to bind to. + + @type password: L{str} + @param password: Password/secret this component uses to authenticate. + """ + # Note that we are sending 'to' our desired component JID. + xmlstream.ConnectAuthenticator.__init__(self, componentjid) + self.password = password + + def associateWithStream(self, xs): + xs.version = (0, 0) + xmlstream.ConnectAuthenticator.associateWithStream(self, xs) + + xs.initializers = [ComponentInitiatingInitializer(xs)] + + + +class ListenComponentAuthenticator(xmlstream.ListenAuthenticator): + """ + Authenticator for accepting components. + + @since: 8.2 + @ivar secret: The shared secret used to authorized incoming component + connections. + @type secret: C{unicode}. + """ + + namespace = NS_COMPONENT_ACCEPT + + def __init__(self, secret): + self.secret = secret + xmlstream.ListenAuthenticator.__init__(self) + + + def associateWithStream(self, xs): + """ + Associate the authenticator with a stream. + + This sets the stream's version to 0.0, because the XEP-0114 component + protocol was not designed for XMPP 1.0. + """ + xs.version = (0, 0) + xmlstream.ListenAuthenticator.associateWithStream(self, xs) + + + def streamStarted(self, rootElement): + """ + Called by the stream when it has started. + + This examines the default namespace of the incoming stream and whether + there is a requested hostname for the component. Then it generates a + stream identifier, sends a response header and adds an observer for + the first incoming element, triggering L{onElement}. + """ + + xmlstream.ListenAuthenticator.streamStarted(self, rootElement) + + if rootElement.defaultUri != self.namespace: + exc = error.StreamError('invalid-namespace') + self.xmlstream.sendStreamError(exc) + return + + # self.xmlstream.thisEntity is set to the address the component + # wants to assume. + if not self.xmlstream.thisEntity: + exc = error.StreamError('improper-addressing') + self.xmlstream.sendStreamError(exc) + return + + self.xmlstream.sendHeader() + self.xmlstream.addOnetimeObserver('/*', self.onElement) + + + def onElement(self, element): + """ + Called on incoming XML Stanzas. + + The very first element received should be a request for handshake. + Otherwise, the stream is dropped with a 'not-authorized' error. If a + handshake request was received, the hash is extracted and passed to + L{onHandshake}. + """ + if (element.uri, element.name) == (self.namespace, 'handshake'): + self.onHandshake(unicode(element)) + else: + exc = error.StreamError('not-authorized') + self.xmlstream.sendStreamError(exc) + + + def onHandshake(self, handshake): + """ + Called upon receiving the handshake request. + + This checks that the given hash in C{handshake} is equal to a + calculated hash, responding with a handshake reply or a stream error. + If the handshake was ok, the stream is authorized, and XML Stanzas may + be exchanged. + """ + calculatedHash = xmlstream.hashPassword(self.xmlstream.sid, + unicode(self.secret)) + if handshake != calculatedHash: + exc = error.StreamError('not-authorized', text='Invalid hash') + self.xmlstream.sendStreamError(exc) + else: + self.xmlstream.send('') + self.xmlstream.dispatch(self.xmlstream, + xmlstream.STREAM_AUTHD_EVENT) + + + +class Service(service.Service): + """ + External server-side component service. + """ + + implements(ijabber.IService) + + def componentConnected(self, xs): + pass + + def componentDisconnected(self): + pass + + def transportConnected(self, xs): + pass + + def send(self, obj): + """ + Send data over service parent's XML stream. + + @note: L{ServiceManager} maintains a queue for data sent using this + method when there is no current established XML stream. This data is + then sent as soon as a new stream has been established and initialized. + Subsequently, L{componentConnected} will be called again. If this + queueing is not desired, use C{send} on the XmlStream object (passed to + L{componentConnected}) directly. + + @param obj: data to be sent over the XML stream. This is usually an + object providing L{domish.IElement}, or serialized XML. See + L{xmlstream.XmlStream} for details. + """ + + self.parent.send(obj) + +class ServiceManager(service.MultiService): + """ + Business logic representing a managed component connection to a Jabber + router. + + This service maintains a single connection to a Jabber router and provides + facilities for packet routing and transmission. Business logic modules are + services implementing L{ijabber.IService} (like subclasses of L{Service}), and + added as sub-service. + """ + + def __init__(self, jid, password): + service.MultiService.__init__(self) + + # Setup defaults + self.jabberId = jid + self.xmlstream = None + + # Internal buffer of packets + self._packetQueue = [] + + # Setup the xmlstream factory + self._xsFactory = componentFactory(self.jabberId, password) + + # Register some lambda functions to keep the self.xmlstream var up to + # date + self._xsFactory.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, + self._connected) + self._xsFactory.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self._authd) + self._xsFactory.addBootstrap(xmlstream.STREAM_END_EVENT, + self._disconnected) + + # Map addBootstrap and removeBootstrap to the underlying factory -- is + # this right? I have no clue...but it'll work for now, until i can + # think about it more. + self.addBootstrap = self._xsFactory.addBootstrap + self.removeBootstrap = self._xsFactory.removeBootstrap + + def getFactory(self): + return self._xsFactory + + def _connected(self, xs): + self.xmlstream = xs + for c in self: + if ijabber.IService.providedBy(c): + c.transportConnected(xs) + + def _authd(self, xs): + # Flush all pending packets + for p in self._packetQueue: + self.xmlstream.send(p) + self._packetQueue = [] + + # Notify all child services which implement the IService interface + for c in self: + if ijabber.IService.providedBy(c): + c.componentConnected(xs) + + def _disconnected(self, _): + self.xmlstream = None + + # Notify all child services which implement + # the IService interface + for c in self: + if ijabber.IService.providedBy(c): + c.componentDisconnected() + + def send(self, obj): + """ + Send data over the XML stream. + + When there is no established XML stream, the data is queued and sent + out when a new XML stream has been established and initialized. + + @param obj: data to be sent over the XML stream. This is usually an + object providing L{domish.IElement}, or serialized XML. See + L{xmlstream.XmlStream} for details. + """ + + if self.xmlstream != None: + self.xmlstream.send(obj) + else: + self._packetQueue.append(obj) + +def buildServiceManager(jid, password, strport): + """ + Constructs a pre-built L{ServiceManager}, using the specified strport + string. + """ + + svc = ServiceManager(jid, password) + client_svc = jstrports.client(strport, svc.getFactory()) + client_svc.setServiceParent(svc) + return svc + + + +class Router(object): + """ + XMPP Server's Router. + + A router connects the different components of the XMPP service and routes + messages between them based on the given routing table. + + Connected components are trusted to have correct addressing in the + stanzas they offer for routing. + + A route destination of C{None} adds a default route. Traffic for which no + specific route exists, will be routed to this default route. + + @since: 8.2 + @ivar routes: Routes based on the host part of JIDs. Maps host names to the + L{EventDispatcher}s that should + receive the traffic. A key of C{None} means the default + route. + @type routes: C{dict} + """ + + def __init__(self): + self.routes = {} + + + def addRoute(self, destination, xs): + """ + Add a new route. + + The passed XML Stream C{xs} will have an observer for all stanzas + added to route its outgoing traffic. In turn, traffic for + C{destination} will be passed to this stream. + + @param destination: Destination of the route to be added as a host name + or C{None} for the default route. + @type destination: C{str} or C{NoneType}. + @param xs: XML Stream to register the route for. + @type xs: L{EventDispatcher}. + """ + self.routes[destination] = xs + xs.addObserver('/*', self.route) + + + def removeRoute(self, destination, xs): + """ + Remove a route. + + @param destination: Destination of the route that should be removed. + @type destination: C{str}. + @param xs: XML Stream to remove the route for. + @type xs: L{EventDispatcher}. + """ + xs.removeObserver('/*', self.route) + if (xs == self.routes[destination]): + del self.routes[destination] + + + def route(self, stanza): + """ + Route a stanza. + + @param stanza: The stanza to be routed. + @type stanza: L{domish.Element}. + """ + destination = JID(stanza['to']) + + log.msg("Routing to %s: %r" % (destination.full(), stanza.toXml())) + + if destination.host in self.routes: + self.routes[destination.host].send(stanza) + else: + self.routes[None].send(stanza) + + + +class XMPPComponentServerFactory(xmlstream.XmlStreamServerFactory): + """ + XMPP Component Server factory. + + This factory accepts XMPP external component connections and makes + the router service route traffic for a component's bound domain + to that component. + + @since: 8.2 + """ + + logTraffic = False + + def __init__(self, router, secret='secret'): + self.router = router + self.secret = secret + + def authenticatorFactory(): + return ListenComponentAuthenticator(self.secret) + + xmlstream.XmlStreamServerFactory.__init__(self, authenticatorFactory) + self.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, + self.onConnectionMade) + self.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, + self.onAuthenticated) + + self.serial = 0 + + + def onConnectionMade(self, xs): + """ + Called when a component connection was made. + + This enables traffic debugging on incoming streams. + """ + xs.serial = self.serial + self.serial += 1 + + def logDataIn(buf): + log.msg("RECV (%d): %r" % (xs.serial, buf)) + + def logDataOut(buf): + log.msg("SEND (%d): %r" % (xs.serial, buf)) + + if self.logTraffic: + xs.rawDataInFn = logDataIn + xs.rawDataOutFn = logDataOut + + xs.addObserver(xmlstream.STREAM_ERROR_EVENT, self.onError) + + + def onAuthenticated(self, xs): + """ + Called when a component has succesfully authenticated. + + Add the component to the routing table and establish a handler + for a closed connection. + """ + destination = xs.thisEntity.host + + self.router.addRoute(destination, xs) + xs.addObserver(xmlstream.STREAM_END_EVENT, self.onConnectionLost, 0, + destination, xs) + + + def onError(self, reason): + log.err(reason, "Stream Error") + + + def onConnectionLost(self, destination, xs, reason): + self.router.removeRoute(destination, xs) diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/error.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/error.py new file mode 100644 index 000000000000..64fbe284d9fb --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/error.py @@ -0,0 +1,336 @@ +# -*- test-case-name: twisted.words.test.test_jabbererror -*- +# +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +XMPP Error support. +""" + +import copy + +from twisted.words.xish import domish + +NS_XML = "http://www.w3.org/XML/1998/namespace" +NS_XMPP_STREAMS = "urn:ietf:params:xml:ns:xmpp-streams" +NS_XMPP_STANZAS = "urn:ietf:params:xml:ns:xmpp-stanzas" + +STANZA_CONDITIONS = { + 'bad-request': {'code': '400', 'type': 'modify'}, + 'conflict': {'code': '409', 'type': 'cancel'}, + 'feature-not-implemented': {'code': '501', 'type': 'cancel'}, + 'forbidden': {'code': '403', 'type': 'auth'}, + 'gone': {'code': '302', 'type': 'modify'}, + 'internal-server-error': {'code': '500', 'type': 'wait'}, + 'item-not-found': {'code': '404', 'type': 'cancel'}, + 'jid-malformed': {'code': '400', 'type': 'modify'}, + 'not-acceptable': {'code': '406', 'type': 'modify'}, + 'not-allowed': {'code': '405', 'type': 'cancel'}, + 'not-authorized': {'code': '401', 'type': 'auth'}, + 'payment-required': {'code': '402', 'type': 'auth'}, + 'recipient-unavailable': {'code': '404', 'type': 'wait'}, + 'redirect': {'code': '302', 'type': 'modify'}, + 'registration-required': {'code': '407', 'type': 'auth'}, + 'remote-server-not-found': {'code': '404', 'type': 'cancel'}, + 'remove-server-timeout': {'code': '504', 'type': 'wait'}, + 'resource-constraint': {'code': '500', 'type': 'wait'}, + 'service-unavailable': {'code': '503', 'type': 'cancel'}, + 'subscription-required': {'code': '407', 'type': 'auth'}, + 'undefined-condition': {'code': '500', 'type': None}, + 'unexpected-request': {'code': '400', 'type': 'wait'}, +} + +CODES_TO_CONDITIONS = { + '302': ('gone', 'modify'), + '400': ('bad-request', 'modify'), + '401': ('not-authorized', 'auth'), + '402': ('payment-required', 'auth'), + '403': ('forbidden', 'auth'), + '404': ('item-not-found', 'cancel'), + '405': ('not-allowed', 'cancel'), + '406': ('not-acceptable', 'modify'), + '407': ('registration-required', 'auth'), + '408': ('remote-server-timeout', 'wait'), + '409': ('conflict', 'cancel'), + '500': ('internal-server-error', 'wait'), + '501': ('feature-not-implemented', 'cancel'), + '502': ('service-unavailable', 'wait'), + '503': ('service-unavailable', 'cancel'), + '504': ('remote-server-timeout', 'wait'), + '510': ('service-unavailable', 'cancel'), +} + +class BaseError(Exception): + """ + Base class for XMPP error exceptions. + + @cvar namespace: The namespace of the C{error} element generated by + C{getElement}. + @type namespace: C{str} + @ivar condition: The error condition. The valid values are defined by + subclasses of L{BaseError}. + @type contition: C{str} + @ivar text: Optional text message to supplement the condition or application + specific condition. + @type text: C{unicode} + @ivar textLang: Identifier of the language used for the message in C{text}. + Values are as described in RFC 3066. + @type textLang: C{str} + @ivar appCondition: Application specific condition element, supplementing + the error condition in C{condition}. + @type appCondition: object providing L{domish.IElement}. + """ + + namespace = None + + def __init__(self, condition, text=None, textLang=None, appCondition=None): + Exception.__init__(self) + self.condition = condition + self.text = text + self.textLang = textLang + self.appCondition = appCondition + + + def __str__(self): + message = "%s with condition %r" % (self.__class__.__name__, + self.condition) + + if self.text: + message += ': ' + self.text + + return message + + + def getElement(self): + """ + Get XML representation from self. + + The method creates an L{domish} representation of the + error data contained in this exception. + + @rtype: L{domish.Element} + """ + error = domish.Element((None, 'error')) + error.addElement((self.namespace, self.condition)) + if self.text: + text = error.addElement((self.namespace, 'text'), + content=self.text) + if self.textLang: + text[(NS_XML, 'lang')] = self.textLang + if self.appCondition: + error.addChild(self.appCondition) + return error + + + +class StreamError(BaseError): + """ + Stream Error exception. + + Refer to RFC 3920, section 4.7.3, for the allowed values for C{condition}. + """ + + namespace = NS_XMPP_STREAMS + + def getElement(self): + """ + Get XML representation from self. + + Overrides the base L{BaseError.getElement} to make sure the returned + element is in the XML Stream namespace. + + @rtype: L{domish.Element} + """ + from twisted.words.protocols.jabber.xmlstream import NS_STREAMS + + error = BaseError.getElement(self) + error.uri = NS_STREAMS + return error + + + +class StanzaError(BaseError): + """ + Stanza Error exception. + + Refer to RFC 3920, section 9.3, for the allowed values for C{condition} and + C{type}. + + @ivar type: The stanza error type. Gives a suggestion to the recipient + of the error on how to proceed. + @type type: C{str} + @ivar code: A numeric identifier for the error condition for backwards + compatibility with pre-XMPP Jabber implementations. + """ + + namespace = NS_XMPP_STANZAS + + def __init__(self, condition, type=None, text=None, textLang=None, + appCondition=None): + BaseError.__init__(self, condition, text, textLang, appCondition) + + if type is None: + try: + type = STANZA_CONDITIONS[condition]['type'] + except KeyError: + pass + self.type = type + + try: + self.code = STANZA_CONDITIONS[condition]['code'] + except KeyError: + self.code = None + + self.children = [] + self.iq = None + + + def getElement(self): + """ + Get XML representation from self. + + Overrides the base L{BaseError.getElement} to make sure the returned + element has a C{type} attribute and optionally a legacy C{code} + attribute. + + @rtype: L{domish.Element} + """ + error = BaseError.getElement(self) + error['type'] = self.type + if self.code: + error['code'] = self.code + return error + + + def toResponse(self, stanza): + """ + Construct error response stanza. + + The C{stanza} is transformed into an error response stanza by + swapping the C{to} and C{from} addresses and inserting an error + element. + + @note: This creates a shallow copy of the list of child elements of the + stanza. The child elements themselves are not copied themselves, + and references to their parent element will still point to the + original stanza element. + + The serialization of an element does not use the reference to + its parent, so the typical use case of immediately sending out + the constructed error response is not affected. + + @param stanza: the stanza to respond to + @type stanza: L{domish.Element} + """ + from twisted.words.protocols.jabber.xmlstream import toResponse + response = toResponse(stanza, stanzaType='error') + response.children = copy.copy(stanza.children) + response.addChild(self.getElement()) + return response + + +def _getText(element): + for child in element.children: + if isinstance(child, basestring): + return unicode(child) + + return None + + + +def _parseError(error, errorNamespace): + """ + Parses an error element. + + @param error: The error element to be parsed + @type error: L{domish.Element} + @param errorNamespace: The namespace of the elements that hold the error + condition and text. + @type errorNamespace: C{str} + @return: Dictionary with extracted error information. If present, keys + C{condition}, C{text}, C{textLang} have a string value, + and C{appCondition} has an L{domish.Element} value. + @rtype: L{dict} + """ + condition = None + text = None + textLang = None + appCondition = None + + for element in error.elements(): + if element.uri == errorNamespace: + if element.name == 'text': + text = _getText(element) + textLang = element.getAttribute((NS_XML, 'lang')) + else: + condition = element.name + else: + appCondition = element + + return { + 'condition': condition, + 'text': text, + 'textLang': textLang, + 'appCondition': appCondition, + } + + + +def exceptionFromStreamError(element): + """ + Build an exception object from a stream error. + + @param element: the stream error + @type element: L{domish.Element} + @return: the generated exception object + @rtype: L{StreamError} + """ + error = _parseError(element, NS_XMPP_STREAMS) + + exception = StreamError(error['condition'], + error['text'], + error['textLang'], + error['appCondition']) + + return exception + + + +def exceptionFromStanza(stanza): + """ + Build an exception object from an error stanza. + + @param stanza: the error stanza + @type stanza: L{domish.Element} + @return: the generated exception object + @rtype: L{StanzaError} + """ + children = [] + condition = text = textLang = appCondition = type = code = None + + for element in stanza.elements(): + if element.name == 'error' and element.uri == stanza.uri: + code = element.getAttribute('code') + type = element.getAttribute('type') + error = _parseError(element, NS_XMPP_STANZAS) + condition = error['condition'] + text = error['text'] + textLang = error['textLang'] + appCondition = error['appCondition'] + + if not condition and code: + condition, type = CODES_TO_CONDITIONS[code] + text = _getText(stanza.error) + else: + children.append(element) + + if condition is None: + # TODO: raise exception instead? + return StanzaError(None) + + exception = StanzaError(condition, type, text, textLang, appCondition) + + exception.children = children + exception.stanza = stanza + + return exception diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/ijabber.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/ijabber.py new file mode 100644 index 000000000000..1e50179b4107 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/ijabber.py @@ -0,0 +1,199 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Public Jabber Interfaces. +""" + +from zope.interface import Attribute, Interface + +class IInitializer(Interface): + """ + Interface for XML stream initializers. + + Initializers perform a step in getting the XML stream ready to be + used for the exchange of XML stanzas. + """ + + + +class IInitiatingInitializer(IInitializer): + """ + Interface for XML stream initializers for the initiating entity. + """ + + xmlstream = Attribute("""The associated XML stream""") + + def initialize(): + """ + Initiate the initialization step. + + May return a deferred when the initialization is done asynchronously. + """ + + + +class IIQResponseTracker(Interface): + """ + IQ response tracker interface. + + The XMPP stanza C{iq} has a request-response nature that fits + naturally with deferreds. You send out a request and when the response + comes back a deferred is fired. + + The L{IQ} class implements a C{send} method that returns a deferred. This + deferred is put in a dictionary that is kept in an L{XmlStream} object, + keyed by the request stanzas C{id} attribute. + + An object providing this interface (usually an instance of L{XmlStream}), + keeps the said dictionary and sets observers on the iq stanzas of type + C{result} and C{error} and lets the callback fire the associated deferred. + """ + iqDeferreds = Attribute("Dictionary of deferreds waiting for an iq " + "response") + + + +class IXMPPHandler(Interface): + """ + Interface for XMPP protocol handlers. + + Objects that provide this interface can be added to a stream manager to + handle of (part of) an XMPP extension protocol. + """ + + parent = Attribute("""XML stream manager for this handler""") + xmlstream = Attribute("""The managed XML stream""") + + def setHandlerParent(parent): + """ + Set the parent of the handler. + + @type parent: L{IXMPPHandlerCollection} + """ + + + def disownHandlerParent(parent): + """ + Remove the parent of the handler. + + @type parent: L{IXMPPHandlerCollection} + """ + + + def makeConnection(xs): + """ + A connection over the underlying transport of the XML stream has been + established. + + At this point, no traffic has been exchanged over the XML stream + given in C{xs}. + + This should setup L{xmlstream} and call L{connectionMade}. + + @type xs: L{XmlStream} + """ + + + def connectionMade(): + """ + Called after a connection has been established. + + This method can be used to change properties of the XML Stream, its + authenticator or the stream manager prior to stream initialization + (including authentication). + """ + + + def connectionInitialized(): + """ + The XML stream has been initialized. + + At this point, authentication was successful, and XML stanzas can be + exchanged over the XML stream L{xmlstream}. This method can be + used to setup observers for incoming stanzas. + """ + + + def connectionLost(reason): + """ + The XML stream has been closed. + + Subsequent use of L{parent.send} will result in data being queued + until a new connection has been established. + + @type reason: L{twisted.python.failure.Failure} + """ + + + +class IXMPPHandlerCollection(Interface): + """ + Collection of handlers. + + Contain several handlers and manage their connection. + """ + + def __iter__(): + """ + Get an iterator over all child handlers. + """ + + + def addHandler(handler): + """ + Add a child handler. + + @type handler: L{IXMPPHandler} + """ + + + def removeHandler(handler): + """ + Remove a child handler. + + @type handler: L{IXMPPHandler} + """ + + + +class IService(Interface): + """ + External server-side component service interface. + + Services that provide this interface can be added to L{ServiceManager} to + implement (part of) the functionality of the server-side component. + """ + + def componentConnected(xs): + """ + Parent component has established a connection. + + At this point, authentication was succesful, and XML stanzas + can be exchanged over the XML stream L{xs}. This method can be used + to setup observers for incoming stanzas. + + @param xs: XML Stream that represents the established connection. + @type xs: L{xmlstream.XmlStream} + """ + + + def componentDisconnected(): + """ + Parent component has lost the connection to the Jabber server. + + Subsequent use of C{self.parent.send} will result in data being + queued until a new connection has been established. + """ + + + def transportConnected(xs): + """ + Parent component has established a connection over the underlying + transport. + + At this point, no traffic has been exchanged over the XML stream. This + method can be used to change properties of the XML Stream (in L{xs}), + the service manager or it's authenticator prior to stream + initialization (including authentication). + """ diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jid.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jid.py new file mode 100644 index 000000000000..2604685c6f41 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jid.py @@ -0,0 +1,249 @@ +# -*- test-case-name: twisted.words.test.test_jabberjid -*- +# +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Jabber Identifier support. + +This module provides an object to represent Jabber Identifiers (JIDs) and +parse string representations into them with proper checking for illegal +characters, case folding and canonicalisation through L{stringprep}. +""" + +from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep + +class InvalidFormat(Exception): + """ + The given string could not be parsed into a valid Jabber Identifier (JID). + """ + +def parse(jidstring): + """ + Parse given JID string into its respective parts and apply stringprep. + + @param jidstring: string representation of a JID. + @type jidstring: C{unicode} + @return: tuple of (user, host, resource), each of type C{unicode} as + the parsed and stringprep'd parts of the given JID. If the + given string did not have a user or resource part, the respective + field in the tuple will hold C{None}. + @rtype: C{tuple} + """ + user = None + host = None + resource = None + + # Search for delimiters + user_sep = jidstring.find("@") + res_sep = jidstring.find("/") + + if user_sep == -1: + if res_sep == -1: + # host + host = jidstring + else: + # host/resource + host = jidstring[0:res_sep] + resource = jidstring[res_sep + 1:] or None + else: + if res_sep == -1: + # user@host + user = jidstring[0:user_sep] or None + host = jidstring[user_sep + 1:] + else: + if user_sep < res_sep: + # user@host/resource + user = jidstring[0:user_sep] or None + host = jidstring[user_sep + 1:user_sep + (res_sep - user_sep)] + resource = jidstring[res_sep + 1:] or None + else: + # host/resource (with an @ in resource) + host = jidstring[0:res_sep] + resource = jidstring[res_sep + 1:] or None + + return prep(user, host, resource) + +def prep(user, host, resource): + """ + Perform stringprep on all JID fragments. + + @param user: The user part of the JID. + @type user: C{unicode} + @param host: The host part of the JID. + @type host: C{unicode} + @param resource: The resource part of the JID. + @type resource: C{unicode} + @return: The given parts with stringprep applied. + @rtype: C{tuple} + """ + + if user: + try: + user = nodeprep.prepare(unicode(user)) + except UnicodeError: + raise InvalidFormat, "Invalid character in username" + else: + user = None + + if not host: + raise InvalidFormat, "Server address required." + else: + try: + host = nameprep.prepare(unicode(host)) + except UnicodeError: + raise InvalidFormat, "Invalid character in hostname" + + if resource: + try: + resource = resourceprep.prepare(unicode(resource)) + except UnicodeError: + raise InvalidFormat, "Invalid character in resource" + else: + resource = None + + return (user, host, resource) + +__internJIDs = {} + +def internJID(jidstring): + """ + Return interned JID. + + @rtype: L{JID} + """ + + if jidstring in __internJIDs: + return __internJIDs[jidstring] + else: + j = JID(jidstring) + __internJIDs[jidstring] = j + return j + +class JID(object): + """ + Represents a stringprep'd Jabber ID. + + JID objects are hashable so they can be used in sets and as keys in + dictionaries. + """ + + def __init__(self, str=None, tuple=None): + if not (str or tuple): + raise RuntimeError("You must provide a value for either 'str' or " + "'tuple' arguments.") + + if str: + user, host, res = parse(str) + else: + user, host, res = prep(*tuple) + + self.user = user + self.host = host + self.resource = res + + def userhost(self): + """ + Extract the bare JID as a unicode string. + + A bare JID does not have a resource part, so this returns either + C{user@host} or just C{host}. + + @rtype: C{unicode} + """ + if self.user: + return u"%s@%s" % (self.user, self.host) + else: + return self.host + + def userhostJID(self): + """ + Extract the bare JID. + + A bare JID does not have a resource part, so this returns a + L{JID} object representing either C{user@host} or just C{host}. + + If the object this method is called upon doesn't have a resource + set, it will return itself. Otherwise, the bare JID object will + be created, interned using L{internJID}. + + @rtype: L{JID} + """ + if self.resource: + return internJID(self.userhost()) + else: + return self + + def full(self): + """ + Return the string representation of this JID. + + @rtype: C{unicode} + """ + if self.user: + if self.resource: + return u"%s@%s/%s" % (self.user, self.host, self.resource) + else: + return u"%s@%s" % (self.user, self.host) + else: + if self.resource: + return u"%s/%s" % (self.host, self.resource) + else: + return self.host + + def __eq__(self, other): + """ + Equality comparison. + + L{JID}s compare equal if their user, host and resource parts all + compare equal. When comparing against instances of other types, it + uses the default comparison. + """ + if isinstance(other, JID): + return (self.user == other.user and + self.host == other.host and + self.resource == other.resource) + else: + return NotImplemented + + def __ne__(self, other): + """ + Inequality comparison. + + This negates L{__eq__} for comparison with JIDs and uses the default + comparison for other types. + """ + result = self.__eq__(other) + if result is NotImplemented: + return result + else: + return not result + + def __hash__(self): + """ + Calculate hash. + + L{JID}s with identical constituent user, host and resource parts have + equal hash values. In combination with the comparison defined on JIDs, + this allows for using L{JID}s in sets and as dictionary keys. + """ + return hash((self.user, self.host, self.resource)) + + def __unicode__(self): + """ + Get unicode representation. + + Return the string representation of this JID as a unicode string. + @see: L{full} + """ + + return self.full() + + def __repr__(self): + """ + Get object representation. + + Returns a string that would create a new JID object that compares equal + to this one. + """ + return 'JID(%r)' % self.full() diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jstrports.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jstrports.py new file mode 100644 index 000000000000..dbecbdd9a541 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/jstrports.py @@ -0,0 +1,31 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" A temporary placeholder for client-capable strports, until we +sufficient use cases get identified """ + +from twisted.application import strports + +def _parseTCPSSL(factory, domain, port): + """ For the moment, parse TCP or SSL connections the same """ + return (domain, int(port), factory), {} + +def _parseUNIX(factory, address): + return (address, factory), {} + + +_funcs = { "tcp" : _parseTCPSSL, + "unix" : _parseUNIX, + "ssl" : _parseTCPSSL } + + +def parse(description, factory): + args, kw = strports._parse(description) + return (args[0].upper(),) + _funcs[args[0]](factory, *args[1:], **kw) + +def client(description, factory): + from twisted.application import internet + name, args, kw = parse(description, factory) + return getattr(internet, name + 'Client')(*args, **kw) diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl.py new file mode 100644 index 000000000000..eb4b6c375071 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl.py @@ -0,0 +1,243 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +XMPP-specific SASL profile. +""" + +import re +from twisted.internet import defer +from twisted.words.protocols.jabber import sasl_mechanisms, xmlstream +from twisted.words.xish import domish + +# The b64decode and b64encode functions from the base64 module are new in +# Python 2.4. For Python 2.3 compatibility, the legacy interface is used while +# working around MIMEisms. + +try: + from base64 import b64decode, b64encode +except ImportError: + import base64 + + def b64encode(s): + return "".join(base64.encodestring(s).split("\n")) + + b64decode = base64.decodestring + +NS_XMPP_SASL = 'urn:ietf:params:xml:ns:xmpp-sasl' + +def get_mechanisms(xs): + """ + Parse the SASL feature to extract the available mechanism names. + """ + mechanisms = [] + for element in xs.features[(NS_XMPP_SASL, 'mechanisms')].elements(): + if element.name == 'mechanism': + mechanisms.append(str(element)) + + return mechanisms + + +class SASLError(Exception): + """ + SASL base exception. + """ + + +class SASLNoAcceptableMechanism(SASLError): + """ + The server did not present an acceptable SASL mechanism. + """ + + +class SASLAuthError(SASLError): + """ + SASL Authentication failed. + """ + def __init__(self, condition=None): + self.condition = condition + + + def __str__(self): + return "SASLAuthError with condition %r" % self.condition + + +class SASLIncorrectEncodingError(SASLError): + """ + SASL base64 encoding was incorrect. + + RFC 3920 specifies that any characters not in the base64 alphabet + and padding characters present elsewhere than at the end of the string + MUST be rejected. See also L{fromBase64}. + + This exception is raised whenever the encoded string does not adhere + to these additional restrictions or when the decoding itself fails. + + The recommended behaviour for so-called receiving entities (like servers in + client-to-server connections, see RFC 3920 for terminology) is to fail the + SASL negotiation with a C{'incorrect-encoding'} condition. For initiating + entities, one should assume the receiving entity to be either buggy or + malevolent. The stream should be terminated and reconnecting is not + advised. + """ + +base64Pattern = re.compile("^[0-9A-Za-z+/]*[0-9A-Za-z+/=]{,2}$") + +def fromBase64(s): + """ + Decode base64 encoded string. + + This helper performs regular decoding of a base64 encoded string, but also + rejects any characters that are not in the base64 alphabet and padding + occurring elsewhere from the last or last two characters, as specified in + section 14.9 of RFC 3920. This safeguards against various attack vectors + among which the creation of a covert channel that "leaks" information. + """ + + if base64Pattern.match(s) is None: + raise SASLIncorrectEncodingError() + + try: + return b64decode(s) + except Exception, e: + raise SASLIncorrectEncodingError(str(e)) + + + +class SASLInitiatingInitializer(xmlstream.BaseFeatureInitiatingInitializer): + """ + Stream initializer that performs SASL authentication. + + The supported mechanisms by this initializer are C{DIGEST-MD5}, C{PLAIN} + and C{ANONYMOUS}. The C{ANONYMOUS} SASL mechanism is used when the JID, set + on the authenticator, does not have a localpart (username), requesting an + anonymous session where the username is generated by the server. + Otherwise, C{DIGEST-MD5} and C{PLAIN} are attempted, in that order. + """ + + feature = (NS_XMPP_SASL, 'mechanisms') + _deferred = None + + def setMechanism(self): + """ + Select and setup authentication mechanism. + + Uses the authenticator's C{jid} and C{password} attribute for the + authentication credentials. If no supported SASL mechanisms are + advertized by the receiving party, a failing deferred is returned with + a L{SASLNoAcceptableMechanism} exception. + """ + + jid = self.xmlstream.authenticator.jid + password = self.xmlstream.authenticator.password + + mechanisms = get_mechanisms(self.xmlstream) + if jid.user is not None: + if 'DIGEST-MD5' in mechanisms: + self.mechanism = sasl_mechanisms.DigestMD5('xmpp', jid.host, None, + jid.user, password) + elif 'PLAIN' in mechanisms: + self.mechanism = sasl_mechanisms.Plain(None, jid.user, password) + else: + raise SASLNoAcceptableMechanism() + else: + if 'ANONYMOUS' in mechanisms: + self.mechanism = sasl_mechanisms.Anonymous() + else: + raise SASLNoAcceptableMechanism() + + + def start(self): + """ + Start SASL authentication exchange. + """ + + self.setMechanism() + self._deferred = defer.Deferred() + self.xmlstream.addObserver('/challenge', self.onChallenge) + self.xmlstream.addOnetimeObserver('/success', self.onSuccess) + self.xmlstream.addOnetimeObserver('/failure', self.onFailure) + self.sendAuth(self.mechanism.getInitialResponse()) + return self._deferred + + + def sendAuth(self, data=None): + """ + Initiate authentication protocol exchange. + + If an initial client response is given in C{data}, it will be + sent along. + + @param data: initial client response. + @type data: L{str} or L{None}. + """ + + auth = domish.Element((NS_XMPP_SASL, 'auth')) + auth['mechanism'] = self.mechanism.name + if data is not None: + auth.addContent(b64encode(data) or '=') + self.xmlstream.send(auth) + + + def sendResponse(self, data=''): + """ + Send response to a challenge. + + @param data: client response. + @type data: L{str}. + """ + + response = domish.Element((NS_XMPP_SASL, 'response')) + if data: + response.addContent(b64encode(data)) + self.xmlstream.send(response) + + + def onChallenge(self, element): + """ + Parse challenge and send response from the mechanism. + + @param element: the challenge protocol element. + @type element: L{domish.Element}. + """ + + try: + challenge = fromBase64(str(element)) + except SASLIncorrectEncodingError: + self._deferred.errback() + else: + self.sendResponse(self.mechanism.getResponse(challenge)) + + + def onSuccess(self, success): + """ + Clean up observers, reset the XML stream and send a new header. + + @param success: the success protocol element. For now unused, but + could hold additional data. + @type success: L{domish.Element} + """ + + self.xmlstream.removeObserver('/challenge', self.onChallenge) + self.xmlstream.removeObserver('/failure', self.onFailure) + self.xmlstream.reset() + self.xmlstream.sendHeader() + self._deferred.callback(xmlstream.Reset) + + + def onFailure(self, failure): + """ + Clean up observers, parse the failure and errback the deferred. + + @param failure: the failure protocol element. Holds details on + the error condition. + @type failure: L{domish.Element} + """ + + self.xmlstream.removeObserver('/challenge', self.onChallenge) + self.xmlstream.removeObserver('/success', self.onSuccess) + try: + condition = failure.firstChildElement().name + except AttributeError: + condition = None + self._deferred.errback(SASLAuthError(condition)) diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl_mechanisms.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl_mechanisms.py new file mode 100644 index 000000000000..390f6cbe22fc --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/sasl_mechanisms.py @@ -0,0 +1,240 @@ +# -*- test-case-name: twisted.words.test.test_jabbersaslmechanisms -*- +# +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Protocol agnostic implementations of SASL authentication mechanisms. +""" + +import binascii, random, time, os + +from zope.interface import Interface, Attribute, implements + +from twisted.python.hashlib import md5 + +class ISASLMechanism(Interface): + name = Attribute("""Common name for the SASL Mechanism.""") + + def getInitialResponse(): + """ + Get the initial client response, if defined for this mechanism. + + @return: initial client response string. + @rtype: L{str}. + """ + + + def getResponse(challenge): + """ + Get the response to a server challenge. + + @param challenge: server challenge. + @type challenge: L{str}. + @return: client response. + @rtype: L{str}. + """ + + + +class Anonymous(object): + """ + Implements the ANONYMOUS SASL authentication mechanism. + + This mechanism is defined in RFC 2245. + """ + implements(ISASLMechanism) + name = 'ANONYMOUS' + + def getInitialResponse(self): + return None + + + +class Plain(object): + """ + Implements the PLAIN SASL authentication mechanism. + + The PLAIN SASL authentication mechanism is defined in RFC 2595. + """ + implements(ISASLMechanism) + + name = 'PLAIN' + + def __init__(self, authzid, authcid, password): + self.authzid = authzid or '' + self.authcid = authcid or '' + self.password = password or '' + + + def getInitialResponse(self): + return "%s\x00%s\x00%s" % (self.authzid.encode('utf-8'), + self.authcid.encode('utf-8'), + self.password.encode('utf-8')) + + + +class DigestMD5(object): + """ + Implements the DIGEST-MD5 SASL authentication mechanism. + + The DIGEST-MD5 SASL authentication mechanism is defined in RFC 2831. + """ + implements(ISASLMechanism) + + name = 'DIGEST-MD5' + + def __init__(self, serv_type, host, serv_name, username, password): + self.username = username + self.password = password + self.defaultRealm = host + + self.digest_uri = '%s/%s' % (serv_type, host) + if serv_name is not None: + self.digest_uri += '/%s' % serv_name + + + def getInitialResponse(self): + return None + + + def getResponse(self, challenge): + directives = self._parse(challenge) + + # Compat for implementations that do not send this along with + # a succesful authentication. + if 'rspauth' in directives: + return '' + + try: + realm = directives['realm'] + except KeyError: + realm = self.defaultRealm + + return self._gen_response(directives['charset'], + realm, + directives['nonce']) + + def _parse(self, challenge): + """ + Parses the server challenge. + + Splits the challenge into a dictionary of directives with values. + + @return: challenge directives and their values. + @rtype: L{dict} of L{str} to L{str}. + """ + s = challenge + paramDict = {} + cur = 0 + remainingParams = True + while remainingParams: + # Parse a param. We can't just split on commas, because there can + # be some commas inside (quoted) param values, e.g.: + # qop="auth,auth-int" + + middle = s.index("=", cur) + name = s[cur:middle].lstrip() + middle += 1 + if s[middle] == '"': + middle += 1 + end = s.index('"', middle) + value = s[middle:end] + cur = s.find(',', end) + 1 + if cur == 0: + remainingParams = False + else: + end = s.find(',', middle) + if end == -1: + value = s[middle:].rstrip() + remainingParams = False + else: + value = s[middle:end].rstrip() + cur = end + 1 + paramDict[name] = value + + for param in ('qop', 'cipher'): + if param in paramDict: + paramDict[param] = paramDict[param].split(',') + + return paramDict + + def _unparse(self, directives): + """ + Create message string from directives. + + @param directives: dictionary of directives (names to their values). + For certain directives, extra quotes are added, as + needed. + @type directives: L{dict} of L{str} to L{str} + @return: message string. + @rtype: L{str}. + """ + + directive_list = [] + for name, value in directives.iteritems(): + if name in ('username', 'realm', 'cnonce', + 'nonce', 'digest-uri', 'authzid', 'cipher'): + directive = '%s="%s"' % (name, value) + else: + directive = '%s=%s' % (name, value) + + directive_list.append(directive) + + return ','.join(directive_list) + + + def _gen_response(self, charset, realm, nonce): + """ + Generate response-value. + + Creates a response to a challenge according to section 2.1.2.1 of + RFC 2831 using the L{charset}, L{realm} and L{nonce} directives + from the challenge. + """ + + def H(s): + return md5(s).digest() + + def HEX(n): + return binascii.b2a_hex(n) + + def KD(k, s): + return H('%s:%s' % (k, s)) + + try: + username = self.username.encode(charset) + password = self.password.encode(charset) + except UnicodeError: + # TODO - add error checking + raise + + nc = '%08x' % 1 # TODO: support subsequent auth. + cnonce = self._gen_nonce() + qop = 'auth' + + # TODO - add support for authzid + a1 = "%s:%s:%s" % (H("%s:%s:%s" % (username, realm, password)), + nonce, + cnonce) + a2 = "AUTHENTICATE:%s" % self.digest_uri + + response = HEX( KD ( HEX(H(a1)), + "%s:%s:%s:%s:%s" % (nonce, nc, + cnonce, "auth", HEX(H(a2))))) + + directives = {'username': username, + 'realm' : realm, + 'nonce' : nonce, + 'cnonce' : cnonce, + 'nc' : nc, + 'qop' : qop, + 'digest-uri': self.digest_uri, + 'response': response, + 'charset': charset} + + return self._unparse(directives) + + + def _gen_nonce(self): + return md5("%s:%s:%s" % (str(random.random()) , str(time.gmtime()),str(os.getpid()))).hexdigest() diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmlstream.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmlstream.py new file mode 100644 index 000000000000..b5847b7204b1 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmlstream.py @@ -0,0 +1,1136 @@ +# -*- test-case-name: twisted.words.test.test_jabberxmlstream -*- +# +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +XMPP XML Streams + +Building blocks for setting up XML Streams, including helping classes for +doing authentication on either client or server side, and working with XML +Stanzas. +""" + +from zope.interface import directlyProvides, implements + +from twisted.internet import defer, protocol +from twisted.internet.error import ConnectionLost +from twisted.python import failure, log, randbytes +from twisted.python.hashlib import sha1 +from twisted.words.protocols.jabber import error, ijabber, jid +from twisted.words.xish import domish, xmlstream +from twisted.words.xish.xmlstream import STREAM_CONNECTED_EVENT +from twisted.words.xish.xmlstream import STREAM_START_EVENT +from twisted.words.xish.xmlstream import STREAM_END_EVENT +from twisted.words.xish.xmlstream import STREAM_ERROR_EVENT + +try: + from twisted.internet import ssl +except ImportError: + ssl = None +if ssl and not ssl.supported: + ssl = None + +STREAM_AUTHD_EVENT = intern("//event/stream/authd") +INIT_FAILED_EVENT = intern("//event/xmpp/initfailed") + +NS_STREAMS = 'http://etherx.jabber.org/streams' +NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls' + +Reset = object() + +def hashPassword(sid, password): + """ + Create a SHA1-digest string of a session identifier and password. + + @param sid: The stream session identifier. + @type sid: C{unicode}. + @param password: The password to be hashed. + @type password: C{unicode}. + """ + if not isinstance(sid, unicode): + raise TypeError("The session identifier must be a unicode object") + if not isinstance(password, unicode): + raise TypeError("The password must be a unicode object") + input = u"%s%s" % (sid, password) + return sha1(input.encode('utf-8')).hexdigest() + + + +class Authenticator: + """ + Base class for business logic of initializing an XmlStream + + Subclass this object to enable an XmlStream to initialize and authenticate + to different types of stream hosts (such as clients, components, etc.). + + Rules: + 1. The Authenticator MUST dispatch a L{STREAM_AUTHD_EVENT} when the + stream has been completely initialized. + 2. The Authenticator SHOULD reset all state information when + L{associateWithStream} is called. + 3. The Authenticator SHOULD override L{streamStarted}, and start + initialization there. + + @type xmlstream: L{XmlStream} + @ivar xmlstream: The XmlStream that needs authentication + + @note: the term authenticator is historical. Authenticators perform + all steps required to prepare the stream for the exchange + of XML stanzas. + """ + + def __init__(self): + self.xmlstream = None + + + def connectionMade(self): + """ + Called by the XmlStream when the underlying socket connection is + in place. + + This allows the Authenticator to send an initial root element, if it's + connecting, or wait for an inbound root from the peer if it's accepting + the connection. + + Subclasses can use self.xmlstream.send() to send any initial data to + the peer. + """ + + + def streamStarted(self, rootElement): + """ + Called by the XmlStream when the stream has started. + + A stream is considered to have started when the start tag of the root + element has been received. + + This examines L{rootElement} to see if there is a version attribute. + If absent, C{0.0} is assumed per RFC 3920. Subsequently, the + minimum of the version from the received stream header and the + value stored in L{xmlstream} is taken and put back in {xmlstream}. + + Extensions of this method can extract more information from the + stream header and perform checks on them, optionally sending + stream errors and closing the stream. + """ + if rootElement.hasAttribute("version"): + version = rootElement["version"].split(".") + try: + version = (int(version[0]), int(version[1])) + except (IndexError, ValueError): + version = (0, 0) + else: + version = (0, 0) + + self.xmlstream.version = min(self.xmlstream.version, version) + + + def associateWithStream(self, xmlstream): + """ + Called by the XmlStreamFactory when a connection has been made + to the requested peer, and an XmlStream object has been + instantiated. + + The default implementation just saves a handle to the new + XmlStream. + + @type xmlstream: L{XmlStream} + @param xmlstream: The XmlStream that will be passing events to this + Authenticator. + + """ + self.xmlstream = xmlstream + + + +class ConnectAuthenticator(Authenticator): + """ + Authenticator for initiating entities. + """ + + namespace = None + + def __init__(self, otherHost): + self.otherHost = otherHost + + + def connectionMade(self): + self.xmlstream.namespace = self.namespace + self.xmlstream.otherEntity = jid.internJID(self.otherHost) + self.xmlstream.sendHeader() + + + def initializeStream(self): + """ + Perform stream initialization procedures. + + An L{XmlStream} holds a list of initializer objects in its + C{initializers} attribute. This method calls these initializers in + order and dispatches the C{STREAM_AUTHD_EVENT} event when the list has + been successfully processed. Otherwise it dispatches the + C{INIT_FAILED_EVENT} event with the failure. + + Initializers may return the special L{Reset} object to halt the + initialization processing. It signals that the current initializer was + successfully processed, but that the XML Stream has been reset. An + example is the TLSInitiatingInitializer. + """ + + def remove_first(result): + self.xmlstream.initializers.pop(0) + + return result + + def do_next(result): + """ + Take the first initializer and process it. + + On success, the initializer is removed from the list and + then next initializer will be tried. + """ + + if result is Reset: + return None + + try: + init = self.xmlstream.initializers[0] + except IndexError: + self.xmlstream.dispatch(self.xmlstream, STREAM_AUTHD_EVENT) + return None + else: + d = defer.maybeDeferred(init.initialize) + d.addCallback(remove_first) + d.addCallback(do_next) + return d + + d = defer.succeed(None) + d.addCallback(do_next) + d.addErrback(self.xmlstream.dispatch, INIT_FAILED_EVENT) + + + def streamStarted(self, rootElement): + """ + Called by the XmlStream when the stream has started. + + This extends L{Authenticator.streamStarted} to extract further stream + headers from L{rootElement}, optionally wait for stream features being + received and then call C{initializeStream}. + """ + + Authenticator.streamStarted(self, rootElement) + + self.xmlstream.sid = rootElement.getAttribute("id") + + if rootElement.hasAttribute("from"): + self.xmlstream.otherEntity = jid.internJID(rootElement["from"]) + + # Setup observer for stream features, if applicable + if self.xmlstream.version >= (1, 0): + def onFeatures(element): + features = {} + for feature in element.elements(): + features[(feature.uri, feature.name)] = feature + + self.xmlstream.features = features + self.initializeStream() + + self.xmlstream.addOnetimeObserver('/features[@xmlns="%s"]' % + NS_STREAMS, + onFeatures) + else: + self.initializeStream() + + + +class ListenAuthenticator(Authenticator): + """ + Authenticator for receiving entities. + """ + + namespace = None + + def associateWithStream(self, xmlstream): + """ + Called by the XmlStreamFactory when a connection has been made. + + Extend L{Authenticator.associateWithStream} to set the L{XmlStream} + to be non-initiating. + """ + Authenticator.associateWithStream(self, xmlstream) + self.xmlstream.initiating = False + + + def streamStarted(self, rootElement): + """ + Called by the XmlStream when the stream has started. + + This extends L{Authenticator.streamStarted} to extract further + information from the stream headers from L{rootElement}. + """ + Authenticator.streamStarted(self, rootElement) + + self.xmlstream.namespace = rootElement.defaultUri + + if rootElement.hasAttribute("to"): + self.xmlstream.thisEntity = jid.internJID(rootElement["to"]) + + self.xmlstream.prefixes = {} + for prefix, uri in rootElement.localPrefixes.iteritems(): + self.xmlstream.prefixes[uri] = prefix + + self.xmlstream.sid = randbytes.secureRandom(8).encode('hex') + + + +class FeatureNotAdvertized(Exception): + """ + Exception indicating a stream feature was not advertized, while required by + the initiating entity. + """ + + + +class BaseFeatureInitiatingInitializer(object): + """ + Base class for initializers with a stream feature. + + This assumes the associated XmlStream represents the initiating entity + of the connection. + + @cvar feature: tuple of (uri, name) of the stream feature root element. + @type feature: tuple of (L{str}, L{str}) + @ivar required: whether the stream feature is required to be advertized + by the receiving entity. + @type required: L{bool} + """ + + implements(ijabber.IInitiatingInitializer) + + feature = None + required = False + + def __init__(self, xs): + self.xmlstream = xs + + + def initialize(self): + """ + Initiate the initialization. + + Checks if the receiving entity advertizes the stream feature. If it + does, the initialization is started. If it is not advertized, and the + C{required} instance variable is L{True}, it raises + L{FeatureNotAdvertized}. Otherwise, the initialization silently + succeeds. + """ + + if self.feature in self.xmlstream.features: + return self.start() + elif self.required: + raise FeatureNotAdvertized + else: + return None + + + def start(self): + """ + Start the actual initialization. + + May return a deferred for asynchronous initialization. + """ + + + +class TLSError(Exception): + """ + TLS base exception. + """ + + + +class TLSFailed(TLSError): + """ + Exception indicating failed TLS negotiation + """ + + + +class TLSRequired(TLSError): + """ + Exception indicating required TLS negotiation. + + This exception is raised when the receiving entity requires TLS + negotiation and the initiating does not desire to negotiate TLS. + """ + + + +class TLSNotSupported(TLSError): + """ + Exception indicating missing TLS support. + + This exception is raised when the initiating entity wants and requires to + negotiate TLS when the OpenSSL library is not available. + """ + + + +class TLSInitiatingInitializer(BaseFeatureInitiatingInitializer): + """ + TLS stream initializer for the initiating entity. + + It is strongly required to include this initializer in the list of + initializers for an XMPP stream. By default it will try to negotiate TLS. + An XMPP server may indicate that TLS is required. If TLS is not desired, + set the C{wanted} attribute to False instead of removing it from the list + of initializers, so a proper exception L{TLSRequired} can be raised. + + @cvar wanted: indicates if TLS negotiation is wanted. + @type wanted: L{bool} + """ + + feature = (NS_XMPP_TLS, 'starttls') + wanted = True + _deferred = None + + def onProceed(self, obj): + """ + Proceed with TLS negotiation and reset the XML stream. + """ + + self.xmlstream.removeObserver('/failure', self.onFailure) + ctx = ssl.CertificateOptions() + self.xmlstream.transport.startTLS(ctx) + self.xmlstream.reset() + self.xmlstream.sendHeader() + self._deferred.callback(Reset) + + + def onFailure(self, obj): + self.xmlstream.removeObserver('/proceed', self.onProceed) + self._deferred.errback(TLSFailed()) + + + def start(self): + """ + Start TLS negotiation. + + This checks if the receiving entity requires TLS, the SSL library is + available and uses the C{required} and C{wanted} instance variables to + determine what to do in the various different cases. + + For example, if the SSL library is not available, and wanted and + required by the user, it raises an exception. However if it is not + required by both parties, initialization silently succeeds, moving + on to the next step. + """ + if self.wanted: + if ssl is None: + if self.required: + return defer.fail(TLSNotSupported()) + else: + return defer.succeed(None) + else: + pass + elif self.xmlstream.features[self.feature].required: + return defer.fail(TLSRequired()) + else: + return defer.succeed(None) + + self._deferred = defer.Deferred() + self.xmlstream.addOnetimeObserver("/proceed", self.onProceed) + self.xmlstream.addOnetimeObserver("/failure", self.onFailure) + self.xmlstream.send(domish.Element((NS_XMPP_TLS, "starttls"))) + return self._deferred + + + +class XmlStream(xmlstream.XmlStream): + """ + XMPP XML Stream protocol handler. + + @ivar version: XML stream version as a tuple (major, minor). Initially, + this is set to the minimally supported version. Upon + receiving the stream header of the peer, it is set to the + minimum of that value and the version on the received + header. + @type version: (L{int}, L{int}) + @ivar namespace: default namespace URI for stream + @type namespace: L{str} + @ivar thisEntity: JID of this entity + @type thisEntity: L{JID} + @ivar otherEntity: JID of the peer entity + @type otherEntity: L{JID} + @ivar sid: session identifier + @type sid: L{str} + @ivar initiating: True if this is the initiating stream + @type initiating: L{bool} + @ivar features: map of (uri, name) to stream features element received from + the receiving entity. + @type features: L{dict} of (L{str}, L{str}) to L{domish.Element}. + @ivar prefixes: map of URI to prefixes that are to appear on stream + header. + @type prefixes: L{dict} of L{str} to L{str} + @ivar initializers: list of stream initializer objects + @type initializers: L{list} of objects that provide L{IInitializer} + @ivar authenticator: associated authenticator that uses C{initializers} to + initialize the XML stream. + """ + + version = (1, 0) + namespace = 'invalid' + thisEntity = None + otherEntity = None + sid = None + initiating = True + + _headerSent = False # True if the stream header has been sent + + def __init__(self, authenticator): + xmlstream.XmlStream.__init__(self) + + self.prefixes = {NS_STREAMS: 'stream'} + self.authenticator = authenticator + self.initializers = [] + self.features = {} + + # Reset the authenticator + authenticator.associateWithStream(self) + + + def _callLater(self, *args, **kwargs): + from twisted.internet import reactor + return reactor.callLater(*args, **kwargs) + + + def reset(self): + """ + Reset XML Stream. + + Resets the XML Parser for incoming data. This is to be used after + successfully negotiating a new layer, e.g. TLS and SASL. Note that + registered event observers will continue to be in place. + """ + self._headerSent = False + self._initializeStream() + + + def onStreamError(self, errelem): + """ + Called when a stream:error element has been received. + + Dispatches a L{STREAM_ERROR_EVENT} event with the error element to + allow for cleanup actions and drops the connection. + + @param errelem: The received error element. + @type errelem: L{domish.Element} + """ + self.dispatch(failure.Failure(error.exceptionFromStreamError(errelem)), + STREAM_ERROR_EVENT) + self.transport.loseConnection() + + + def sendHeader(self): + """ + Send stream header. + """ + # set up optional extra namespaces + localPrefixes = {} + for uri, prefix in self.prefixes.iteritems(): + if uri != NS_STREAMS: + localPrefixes[prefix] = uri + + rootElement = domish.Element((NS_STREAMS, 'stream'), self.namespace, + localPrefixes=localPrefixes) + + if self.otherEntity: + rootElement['to'] = self.otherEntity.userhost() + + if self.thisEntity: + rootElement['from'] = self.thisEntity.userhost() + + if not self.initiating and self.sid: + rootElement['id'] = self.sid + + if self.version >= (1, 0): + rootElement['version'] = "%d.%d" % self.version + + self.send(rootElement.toXml(prefixes=self.prefixes, closeElement=0)) + self._headerSent = True + + + def sendFooter(self): + """ + Send stream footer. + """ + self.send('') + + + def sendStreamError(self, streamError): + """ + Send stream level error. + + If we are the receiving entity, and haven't sent the header yet, + we sent one first. + + After sending the stream error, the stream is closed and the transport + connection dropped. + + @param streamError: stream error instance + @type streamError: L{error.StreamError} + """ + if not self._headerSent and not self.initiating: + self.sendHeader() + + if self._headerSent: + self.send(streamError.getElement()) + self.sendFooter() + + self.transport.loseConnection() + + + def send(self, obj): + """ + Send data over the stream. + + This overrides L{xmlstream.Xmlstream.send} to use the default namespace + of the stream header when serializing L{domish.IElement}s. It is + assumed that if you pass an object that provides L{domish.IElement}, + it represents a direct child of the stream's root element. + """ + if domish.IElement.providedBy(obj): + obj = obj.toXml(prefixes=self.prefixes, + defaultUri=self.namespace, + prefixesInScope=self.prefixes.values()) + + xmlstream.XmlStream.send(self, obj) + + + def connectionMade(self): + """ + Called when a connection is made. + + Notifies the authenticator when a connection has been made. + """ + xmlstream.XmlStream.connectionMade(self) + self.authenticator.connectionMade() + + + def onDocumentStart(self, rootElement): + """ + Called when the stream header has been received. + + Extracts the header's C{id} and C{version} attributes from the root + element. The C{id} attribute is stored in our C{sid} attribute and the + C{version} attribute is parsed and the minimum of the version we sent + and the parsed C{version} attribute is stored as a tuple (major, minor) + in this class' C{version} attribute. If no C{version} attribute was + present, we assume version 0.0. + + If appropriate (we are the initiating stream and the minimum of our and + the other party's version is at least 1.0), a one-time observer is + registered for getting the stream features. The registered function is + C{onFeatures}. + + Ultimately, the authenticator's C{streamStarted} method will be called. + + @param rootElement: The root element. + @type rootElement: L{domish.Element} + """ + xmlstream.XmlStream.onDocumentStart(self, rootElement) + + # Setup observer for stream errors + self.addOnetimeObserver("/error[@xmlns='%s']" % NS_STREAMS, + self.onStreamError) + + self.authenticator.streamStarted(rootElement) + + + +class XmlStreamFactory(xmlstream.XmlStreamFactory): + """ + Factory for Jabber XmlStream objects as a reconnecting client. + + Note that this differs from L{xmlstream.XmlStreamFactory} in that + it generates Jabber specific L{XmlStream} instances that have + authenticators. + """ + + protocol = XmlStream + + def __init__(self, authenticator): + xmlstream.XmlStreamFactory.__init__(self, authenticator) + self.authenticator = authenticator + + + +class XmlStreamServerFactory(xmlstream.BootstrapMixin, + protocol.ServerFactory): + """ + Factory for Jabber XmlStream objects as a server. + + @since: 8.2. + @ivar authenticatorFactory: Factory callable that takes no arguments, to + create a fresh authenticator to be associated + with the XmlStream. + """ + + protocol = XmlStream + + def __init__(self, authenticatorFactory): + xmlstream.BootstrapMixin.__init__(self) + self.authenticatorFactory = authenticatorFactory + + + def buildProtocol(self, addr): + """ + Create an instance of XmlStream. + + A new authenticator instance will be created and passed to the new + XmlStream. Registered bootstrap event observers are installed as well. + """ + authenticator = self.authenticatorFactory() + xs = self.protocol(authenticator) + xs.factory = self + self.installBootstraps(xs) + return xs + + + +class TimeoutError(Exception): + """ + Exception raised when no IQ response has been received before the + configured timeout. + """ + + + +def upgradeWithIQResponseTracker(xs): + """ + Enhances an XmlStream for iq response tracking. + + This makes an L{XmlStream} object provide L{IIQResponseTracker}. When a + response is an error iq stanza, the deferred has its errback invoked with a + failure that holds a L{StanzaException} that is + easier to examine. + """ + def callback(iq): + """ + Handle iq response by firing associated deferred. + """ + if getattr(iq, 'handled', False): + return + + try: + d = xs.iqDeferreds[iq["id"]] + except KeyError: + pass + else: + del xs.iqDeferreds[iq["id"]] + iq.handled = True + if iq['type'] == 'error': + d.errback(error.exceptionFromStanza(iq)) + else: + d.callback(iq) + + + def disconnected(_): + """ + Make sure deferreds do not linger on after disconnect. + + This errbacks all deferreds of iq's for which no response has been + received with a L{ConnectionLost} failure. Otherwise, the deferreds + will never be fired. + """ + iqDeferreds = xs.iqDeferreds + xs.iqDeferreds = {} + for d in iqDeferreds.itervalues(): + d.errback(ConnectionLost()) + + xs.iqDeferreds = {} + xs.iqDefaultTimeout = getattr(xs, 'iqDefaultTimeout', None) + xs.addObserver(xmlstream.STREAM_END_EVENT, disconnected) + xs.addObserver('/iq[@type="result"]', callback) + xs.addObserver('/iq[@type="error"]', callback) + directlyProvides(xs, ijabber.IIQResponseTracker) + + + +class IQ(domish.Element): + """ + Wrapper for an iq stanza. + + Iq stanzas are used for communications with a request-response behaviour. + Each iq request is associated with an XML stream and has its own unique id + to be able to track the response. + + @ivar timeout: if set, a timeout period after which the deferred returned + by C{send} will have its errback called with a + L{TimeoutError} failure. + @type timeout: C{float} + """ + + timeout = None + + def __init__(self, xmlstream, stanzaType="set"): + """ + @type xmlstream: L{xmlstream.XmlStream} + @param xmlstream: XmlStream to use for transmission of this IQ + + @type stanzaType: L{str} + @param stanzaType: IQ type identifier ('get' or 'set') + """ + domish.Element.__init__(self, (None, "iq")) + self.addUniqueId() + self["type"] = stanzaType + self._xmlstream = xmlstream + + + def send(self, to=None): + """ + Send out this iq. + + Returns a deferred that is fired when an iq response with the same id + is received. Result responses will be passed to the deferred callback. + Error responses will be transformed into a + L{StanzaError} and result in the errback of the + deferred being invoked. + + @rtype: L{defer.Deferred} + """ + if to is not None: + self["to"] = to + + if not ijabber.IIQResponseTracker.providedBy(self._xmlstream): + upgradeWithIQResponseTracker(self._xmlstream) + + d = defer.Deferred() + self._xmlstream.iqDeferreds[self['id']] = d + + timeout = self.timeout or self._xmlstream.iqDefaultTimeout + if timeout is not None: + def onTimeout(): + del self._xmlstream.iqDeferreds[self['id']] + d.errback(TimeoutError("IQ timed out")) + + call = self._xmlstream._callLater(timeout, onTimeout) + + def cancelTimeout(result): + if call.active(): + call.cancel() + + return result + + d.addBoth(cancelTimeout) + + self._xmlstream.send(self) + return d + + + +def toResponse(stanza, stanzaType=None): + """ + Create a response stanza from another stanza. + + This takes the addressing and id attributes from a stanza to create a (new, + empty) response stanza. The addressing attributes are swapped and the id + copied. Optionally, the stanza type of the response can be specified. + + @param stanza: the original stanza + @type stanza: L{domish.Element} + @param stanzaType: optional response stanza type + @type stanzaType: C{str} + @return: the response stanza. + @rtype: L{domish.Element} + """ + + toAddr = stanza.getAttribute('from') + fromAddr = stanza.getAttribute('to') + stanzaID = stanza.getAttribute('id') + + response = domish.Element((None, stanza.name)) + if toAddr: + response['to'] = toAddr + if fromAddr: + response['from'] = fromAddr + if stanzaID: + response['id'] = stanzaID + if stanzaType: + response['type'] = stanzaType + + return response + + + +class XMPPHandler(object): + """ + XMPP protocol handler. + + Classes derived from this class implement (part of) one or more XMPP + extension protocols, and are referred to as a subprotocol implementation. + """ + + implements(ijabber.IXMPPHandler) + + def __init__(self): + self.parent = None + self.xmlstream = None + + + def setHandlerParent(self, parent): + self.parent = parent + self.parent.addHandler(self) + + + def disownHandlerParent(self, parent): + self.parent.removeHandler(self) + self.parent = None + + + def makeConnection(self, xs): + self.xmlstream = xs + self.connectionMade() + + + def connectionMade(self): + """ + Called after a connection has been established. + + Can be overridden to perform work before stream initialization. + """ + + + def connectionInitialized(self): + """ + The XML stream has been initialized. + + Can be overridden to perform work after stream initialization, e.g. to + set up observers and start exchanging XML stanzas. + """ + + + def connectionLost(self, reason): + """ + The XML stream has been closed. + + This method can be extended to inspect the C{reason} argument and + act on it. + """ + self.xmlstream = None + + + def send(self, obj): + """ + Send data over the managed XML stream. + + @note: The stream manager maintains a queue for data sent using this + method when there is no current initialized XML stream. This + data is then sent as soon as a new stream has been established + and initialized. Subsequently, L{connectionInitialized} will be + called again. If this queueing is not desired, use C{send} on + C{self.xmlstream}. + + @param obj: data to be sent over the XML stream. This is usually an + object providing L{domish.IElement}, or serialized XML. See + L{xmlstream.XmlStream} for details. + """ + self.parent.send(obj) + + + +class XMPPHandlerCollection(object): + """ + Collection of XMPP subprotocol handlers. + + This allows for grouping of subprotocol handlers, but is not an + L{XMPPHandler} itself, so this is not recursive. + + @ivar handlers: List of protocol handlers. + @type handlers: L{list} of objects providing + L{IXMPPHandler} + """ + + implements(ijabber.IXMPPHandlerCollection) + + def __init__(self): + self.handlers = [] + + + def __iter__(self): + """ + Act as a container for handlers. + """ + return iter(self.handlers) + + + def addHandler(self, handler): + """ + Add protocol handler. + + Protocol handlers are expected to provide L{ijabber.IXMPPHandler}. + """ + self.handlers.append(handler) + + + def removeHandler(self, handler): + """ + Remove protocol handler. + """ + self.handlers.remove(handler) + + + +class StreamManager(XMPPHandlerCollection): + """ + Business logic representing a managed XMPP connection. + + This maintains a single XMPP connection and provides facilities for packet + routing and transmission. Business logic modules are objects providing + L{ijabber.IXMPPHandler} (like subclasses of L{XMPPHandler}), and added + using L{addHandler}. + + @ivar xmlstream: currently managed XML stream + @type xmlstream: L{XmlStream} + @ivar logTraffic: if true, log all traffic. + @type logTraffic: L{bool} + @ivar _initialized: Whether the stream represented by L{xmlstream} has + been initialized. This is used when caching outgoing + stanzas. + @type _initialized: C{bool} + @ivar _packetQueue: internal buffer of unsent data. See L{send} for details. + @type _packetQueue: L{list} + """ + + logTraffic = False + + def __init__(self, factory): + XMPPHandlerCollection.__init__(self) + self.xmlstream = None + self._packetQueue = [] + self._initialized = False + + factory.addBootstrap(STREAM_CONNECTED_EVENT, self._connected) + factory.addBootstrap(STREAM_AUTHD_EVENT, self._authd) + factory.addBootstrap(INIT_FAILED_EVENT, self.initializationFailed) + factory.addBootstrap(STREAM_END_EVENT, self._disconnected) + self.factory = factory + + + def addHandler(self, handler): + """ + Add protocol handler. + + When an XML stream has already been established, the handler's + C{connectionInitialized} will be called to get it up to speed. + """ + XMPPHandlerCollection.addHandler(self, handler) + + # get protocol handler up to speed when a connection has already + # been established + if self.xmlstream and self._initialized: + handler.makeConnection(self.xmlstream) + handler.connectionInitialized() + + + def _connected(self, xs): + """ + Called when the transport connection has been established. + + Here we optionally set up traffic logging (depending on L{logTraffic}) + and call each handler's C{makeConnection} method with the L{XmlStream} + instance. + """ + def logDataIn(buf): + log.msg("RECV: %r" % buf) + + def logDataOut(buf): + log.msg("SEND: %r" % buf) + + if self.logTraffic: + xs.rawDataInFn = logDataIn + xs.rawDataOutFn = logDataOut + + self.xmlstream = xs + + for e in self: + e.makeConnection(xs) + + + def _authd(self, xs): + """ + Called when the stream has been initialized. + + Send out cached stanzas and call each handler's + C{connectionInitialized} method. + """ + # Flush all pending packets + for p in self._packetQueue: + xs.send(p) + self._packetQueue = [] + self._initialized = True + + # Notify all child services which implement + # the IService interface + for e in self: + e.connectionInitialized() + + + def initializationFailed(self, reason): + """ + Called when stream initialization has failed. + + Stream initialization has halted, with the reason indicated by + C{reason}. It may be retried by calling the authenticator's + C{initializeStream}. See the respective authenticators for details. + + @param reason: A failure instance indicating why stream initialization + failed. + @type reason: L{failure.Failure} + """ + + + def _disconnected(self, _): + """ + Called when the stream has been closed. + + From this point on, the manager doesn't interact with the + L{XmlStream} anymore and notifies each handler that the connection + was lost by calling its C{connectionLost} method. + """ + self.xmlstream = None + self._initialized = False + + # Notify all child services which implement + # the IService interface + for e in self: + e.connectionLost(None) + + + def send(self, obj): + """ + Send data over the XML stream. + + When there is no established XML stream, the data is queued and sent + out when a new XML stream has been established and initialized. + + @param obj: data to be sent over the XML stream. See + L{xmlstream.XmlStream.send} for details. + """ + if self._initialized: + self.xmlstream.send(obj) + else: + self._packetQueue.append(obj) + + + +__all__ = ['Authenticator', 'BaseFeatureInitiatingInitializer', + 'ConnectAuthenticator', 'ConnectionLost', 'FeatureNotAdvertized', + 'INIT_FAILED_EVENT', 'IQ', 'ListenAuthenticator', 'NS_STREAMS', + 'NS_XMPP_TLS', 'Reset', 'STREAM_AUTHD_EVENT', + 'STREAM_CONNECTED_EVENT', 'STREAM_END_EVENT', 'STREAM_ERROR_EVENT', + 'STREAM_START_EVENT', 'StreamManager', 'TLSError', 'TLSFailed', + 'TLSInitiatingInitializer', 'TLSNotSupported', 'TLSRequired', + 'TimeoutError', 'XMPPHandler', 'XMPPHandlerCollection', 'XmlStream', + 'XmlStreamFactory', 'XmlStreamServerFactory', 'hashPassword', + 'toResponse', 'upgradeWithIQResponseTracker'] diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmpp_stringprep.py b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmpp_stringprep.py new file mode 100644 index 000000000000..87025fb2efa3 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/jabber/xmpp_stringprep.py @@ -0,0 +1,248 @@ +# -*- test-case-name: twisted.words.test.test_jabberxmppstringprep -*- +# +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +import sys, warnings +from zope.interface import Interface, implements + +if sys.version_info < (2,3,2): + import re + + class IDNA: + dots = re.compile(u"[\u002E\u3002\uFF0E\uFF61]") + def nameprep(self, label): + return label.lower() + + idna = IDNA() + + crippled = True + + warnings.warn("Accented and non-Western Jabber IDs will not be properly " + "case-folded with this version of Python, resulting in " + "incorrect protocol-level behavior. It is strongly " + "recommended you upgrade to Python 2.3.2 or newer if you " + "intend to use Twisted's Jabber support.") + +else: + import stringprep + import unicodedata + from encodings import idna + + crippled = False + +del sys, warnings + +class ILookupTable(Interface): + """ Interface for character lookup classes. """ + + def lookup(c): + """ Return whether character is in this table. """ + +class IMappingTable(Interface): + """ Interface for character mapping classes. """ + + def map(c): + """ Return mapping for character. """ + +class LookupTableFromFunction: + + implements(ILookupTable) + + def __init__(self, in_table_function): + self.lookup = in_table_function + +class LookupTable: + + implements(ILookupTable) + + def __init__(self, table): + self._table = table + + def lookup(self, c): + return c in self._table + +class MappingTableFromFunction: + + implements(IMappingTable) + + def __init__(self, map_table_function): + self.map = map_table_function + +class EmptyMappingTable: + + implements(IMappingTable) + + def __init__(self, in_table_function): + self._in_table_function = in_table_function + + def map(self, c): + if self._in_table_function(c): + return None + else: + return c + +class Profile: + def __init__(self, mappings=[], normalize=True, prohibiteds=[], + check_unassigneds=True, check_bidi=True): + self.mappings = mappings + self.normalize = normalize + self.prohibiteds = prohibiteds + self.do_check_unassigneds = check_unassigneds + self.do_check_bidi = check_bidi + + def prepare(self, string): + result = self.map(string) + if self.normalize: + result = unicodedata.normalize("NFKC", result) + self.check_prohibiteds(result) + if self.do_check_unassigneds: + self.check_unassigneds(result) + if self.do_check_bidi: + self.check_bidirectionals(result) + return result + + def map(self, string): + result = [] + + for c in string: + result_c = c + + for mapping in self.mappings: + result_c = mapping.map(c) + if result_c != c: + break + + if result_c is not None: + result.append(result_c) + + return u"".join(result) + + def check_prohibiteds(self, string): + for c in string: + for table in self.prohibiteds: + if table.lookup(c): + raise UnicodeError, "Invalid character %s" % repr(c) + + def check_unassigneds(self, string): + for c in string: + if stringprep.in_table_a1(c): + raise UnicodeError, "Unassigned code point %s" % repr(c) + + def check_bidirectionals(self, string): + found_LCat = False + found_RandALCat = False + + for c in string: + if stringprep.in_table_d1(c): + found_RandALCat = True + if stringprep.in_table_d2(c): + found_LCat = True + + if found_LCat and found_RandALCat: + raise UnicodeError, "Violation of BIDI Requirement 2" + + if found_RandALCat and not (stringprep.in_table_d1(string[0]) and + stringprep.in_table_d1(string[-1])): + raise UnicodeError, "Violation of BIDI Requirement 3" + + +class NamePrep: + """ Implements preparation of internationalized domain names. + + This class implements preparing internationalized domain names using the + rules defined in RFC 3491, section 4 (Conversion operations). + + We do not perform step 4 since we deal with unicode representations of + domain names and do not convert from or to ASCII representations using + punycode encoding. When such a conversion is needed, the L{idna} standard + library provides the C{ToUnicode()} and C{ToASCII()} functions. Note that + L{idna} itself assumes UseSTD3ASCIIRules to be false. + + The following steps are performed by C{prepare()}: + + - Split the domain name in labels at the dots (RFC 3490, 3.1) + - Apply nameprep proper on each label (RFC 3491) + - Enforce the restrictions on ASCII characters in host names by + assuming STD3ASCIIRules to be true. (STD 3) + - Rejoin the labels using the label separator U+002E (full stop). + + """ + + # Prohibited characters. + prohibiteds = [unichr(n) for n in range(0x00, 0x2c + 1) + + range(0x2e, 0x2f + 1) + + range(0x3a, 0x40 + 1) + + range(0x5b, 0x60 + 1) + + range(0x7b, 0x7f + 1) ] + + def prepare(self, string): + result = [] + + labels = idna.dots.split(string) + + if labels and len(labels[-1]) == 0: + trailing_dot = '.' + del labels[-1] + else: + trailing_dot = '' + + for label in labels: + result.append(self.nameprep(label)) + + return ".".join(result) + trailing_dot + + def check_prohibiteds(self, string): + for c in string: + if c in self.prohibiteds: + raise UnicodeError, "Invalid character %s" % repr(c) + + def nameprep(self, label): + label = idna.nameprep(label) + self.check_prohibiteds(label) + if label[0] == '-': + raise UnicodeError, "Invalid leading hyphen-minus" + if label[-1] == '-': + raise UnicodeError, "Invalid trailing hyphen-minus" + return label + +if crippled: + case_map = MappingTableFromFunction(lambda c: c.lower()) + nodeprep = Profile(mappings=[case_map], + normalize=False, + prohibiteds=[LookupTable([u' ', u'"', u'&', u"'", u'/', + u':', u'<', u'>', u'@'])], + check_unassigneds=False, + check_bidi=False) + + resourceprep = Profile(normalize=False, + check_unassigneds=False, + check_bidi=False) + +else: + C_11 = LookupTableFromFunction(stringprep.in_table_c11) + C_12 = LookupTableFromFunction(stringprep.in_table_c12) + C_21 = LookupTableFromFunction(stringprep.in_table_c21) + C_22 = LookupTableFromFunction(stringprep.in_table_c22) + C_3 = LookupTableFromFunction(stringprep.in_table_c3) + C_4 = LookupTableFromFunction(stringprep.in_table_c4) + C_5 = LookupTableFromFunction(stringprep.in_table_c5) + C_6 = LookupTableFromFunction(stringprep.in_table_c6) + C_7 = LookupTableFromFunction(stringprep.in_table_c7) + C_8 = LookupTableFromFunction(stringprep.in_table_c8) + C_9 = LookupTableFromFunction(stringprep.in_table_c9) + + B_1 = EmptyMappingTable(stringprep.in_table_b1) + B_2 = MappingTableFromFunction(stringprep.map_table_b2) + + nodeprep = Profile(mappings=[B_1, B_2], + prohibiteds=[C_11, C_12, C_21, C_22, + C_3, C_4, C_5, C_6, C_7, C_8, C_9, + LookupTable([u'"', u'&', u"'", u'/', + u':', u'<', u'>', u'@'])]) + + resourceprep = Profile(mappings=[B_1,], + prohibiteds=[C_12, C_21, C_22, + C_3, C_4, C_5, C_6, C_7, C_8, C_9]) + +nameprep = NamePrep() diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/msn.py b/vendor/Twisted-10.0.0/twisted/words/protocols/msn.py new file mode 100644 index 000000000000..cdc5c3c1bd4b --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/msn.py @@ -0,0 +1,2449 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +MSNP8 Protocol (client only) - semi-experimental + +This module provides support for clients using the MSN Protocol (MSNP8). +There are basically 3 servers involved in any MSN session: + +I{Dispatch server} + +The DispatchClient class handles connections to the +dispatch server, which basically delegates users to a +suitable notification server. + +You will want to subclass this and handle the gotNotificationReferral +method appropriately. + +I{Notification Server} + +The NotificationClient class handles connections to the +notification server, which acts as a session server +(state updates, message negotiation etc...) + +I{Switcboard Server} + +The SwitchboardClient handles connections to switchboard +servers which are used to conduct conversations with other users. + +There are also two classes (FileSend and FileReceive) used +for file transfers. + +Clients handle events in two ways. + + - each client request requiring a response will return a Deferred, + the callback for same will be fired when the server sends the + required response + - Events which are not in response to any client request have + respective methods which should be overridden and handled in + an adequate manner + +Most client request callbacks require more than one argument, +and since Deferreds can only pass the callback one result, +most of the time the callback argument will be a tuple of +values (documented in the respective request method). +To make reading/writing code easier, callbacks can be defined in +a number of ways to handle this 'cleanly'. One way would be to +define methods like: def callBack(self, (arg1, arg2, arg)): ... +another way would be to do something like: +d.addCallback(lambda result: myCallback(*result)). + +If the server sends an error response to a client request, +the errback of the corresponding Deferred will be called, +the argument being the corresponding error code. + +B{NOTE}: +Due to the lack of an official spec for MSNP8, extra checking +than may be deemed necessary often takes place considering the +server is never 'wrong'. Thus, if gotBadLine (in any of the 3 +main clients) is called, or an MSNProtocolError is raised, it's +probably a good idea to submit a bug report. ;) +Use of this module requires that PyOpenSSL is installed. + +TODO +==== +- check message hooks with invalid x-msgsinvite messages. +- font handling +- switchboard factory + +@author: Sam Jordan +""" + +import types, operator, os +from random import randint +from urllib import quote, unquote + +from twisted.python import failure, log +from twisted.python.hashlib import md5 +from twisted.internet import reactor +from twisted.internet.defer import Deferred +from twisted.internet.protocol import ClientFactory +try: + from twisted.internet.ssl import ClientContextFactory +except ImportError: + ClientContextFactory = None +from twisted.protocols.basic import LineReceiver +from twisted.web.http import HTTPClient + + +MSN_PROTOCOL_VERSION = "MSNP8 CVR0" # protocol version +MSN_PORT = 1863 # default dispatch server port +MSN_MAX_MESSAGE = 1664 # max message length +MSN_CHALLENGE_STR = "Q1P7W2E4J9R8U3S5" # used for server challenges +MSN_CVR_STR = "0x0409 win 4.10 i386 MSNMSGR 5.0.0544 MSMSGS" # :( + +# auth constants +LOGIN_SUCCESS = 1 +LOGIN_FAILURE = 2 +LOGIN_REDIRECT = 3 + +# list constants +FORWARD_LIST = 1 +ALLOW_LIST = 2 +BLOCK_LIST = 4 +REVERSE_LIST = 8 + +# phone constants +HOME_PHONE = "PHH" +WORK_PHONE = "PHW" +MOBILE_PHONE = "PHM" +HAS_PAGER = "MOB" + +# status constants +STATUS_ONLINE = 'NLN' +STATUS_OFFLINE = 'FLN' +STATUS_HIDDEN = 'HDN' +STATUS_IDLE = 'IDL' +STATUS_AWAY = 'AWY' +STATUS_BUSY = 'BSY' +STATUS_BRB = 'BRB' +STATUS_PHONE = 'PHN' +STATUS_LUNCH = 'LUN' + +CR = "\r" +LF = "\n" + +def checkParamLen(num, expected, cmd, error=None): + if error == None: + error = "Invalid Number of Parameters for %s" % cmd + if num != expected: + raise MSNProtocolError, error + +def _parseHeader(h, v): + """ + Split a certin number of known + header values with the format: + field1=val,field2=val,field3=val into + a dict mapping fields to values. + @param h: the header's key + @param v: the header's value as a string + """ + + if h in ('passporturls','authentication-info','www-authenticate'): + v = v.replace('Passport1.4','').lstrip() + fields = {} + for fieldPair in v.split(','): + try: + field,value = fieldPair.split('=',1) + fields[field.lower()] = value + except ValueError: + fields[field.lower()] = '' + return fields + else: + return v + +def _parsePrimitiveHost(host): + # Ho Ho Ho + h,p = host.replace('https://','').split('/',1) + p = '/' + p + return h,p + +def _login(userHandle, passwd, nexusServer, cached=0, authData=''): + """ + This function is used internally and should not ever be called + directly. + """ + cb = Deferred() + def _cb(server, auth): + loginFac = ClientFactory() + loginFac.protocol = lambda : PassportLogin(cb, userHandle, passwd, server, auth) + reactor.connectSSL(_parsePrimitiveHost(server)[0], 443, loginFac, ClientContextFactory()) + + if cached: + _cb(nexusServer, authData) + else: + fac = ClientFactory() + d = Deferred() + d.addCallbacks(_cb, callbackArgs=(authData,)) + d.addErrback(lambda f: cb.errback(f)) + fac.protocol = lambda : PassportNexus(d, nexusServer) + reactor.connectSSL(_parsePrimitiveHost(nexusServer)[0], 443, fac, ClientContextFactory()) + return cb + + +class PassportNexus(HTTPClient): + + """ + Used to obtain the URL of a valid passport + login HTTPS server. + + This class is used internally and should + not be instantiated directly -- that is, + The passport logging in process is handled + transparantly by NotificationClient. + """ + + def __init__(self, deferred, host): + self.deferred = deferred + self.host, self.path = _parsePrimitiveHost(host) + + def connectionMade(self): + HTTPClient.connectionMade(self) + self.sendCommand('GET', self.path) + self.sendHeader('Host', self.host) + self.endHeaders() + self.headers = {} + + def handleHeader(self, header, value): + h = header.lower() + self.headers[h] = _parseHeader(h, value) + + def handleEndHeaders(self): + if self.connected: + self.transport.loseConnection() + if not self.headers.has_key('passporturls') or not self.headers['passporturls'].has_key('dalogin'): + self.deferred.errback(failure.Failure(failure.DefaultException("Invalid Nexus Reply"))) + self.deferred.callback('https://' + self.headers['passporturls']['dalogin']) + + def handleResponse(self, r): + pass + +class PassportLogin(HTTPClient): + """ + This class is used internally to obtain + a login ticket from a passport HTTPS + server -- it should not be used directly. + """ + + _finished = 0 + + def __init__(self, deferred, userHandle, passwd, host, authData): + self.deferred = deferred + self.userHandle = userHandle + self.passwd = passwd + self.authData = authData + self.host, self.path = _parsePrimitiveHost(host) + + def connectionMade(self): + self.sendCommand('GET', self.path) + self.sendHeader('Authorization', 'Passport1.4 OrgVerb=GET,OrgURL=http://messenger.msn.com,' + + 'sign-in=%s,pwd=%s,%s' % (quote(self.userHandle), self.passwd,self.authData)) + self.sendHeader('Host', self.host) + self.endHeaders() + self.headers = {} + + def handleHeader(self, header, value): + h = header.lower() + self.headers[h] = _parseHeader(h, value) + + def handleEndHeaders(self): + if self._finished: + return + self._finished = 1 # I think we need this because of HTTPClient + if self.connected: + self.transport.loseConnection() + authHeader = 'authentication-info' + _interHeader = 'www-authenticate' + if self.headers.has_key(_interHeader): + authHeader = _interHeader + try: + info = self.headers[authHeader] + status = info['da-status'] + handler = getattr(self, 'login_%s' % (status,), None) + if handler: + handler(info) + else: + raise Exception() + except Exception, e: + self.deferred.errback(failure.Failure(e)) + + def handleResponse(self, r): + pass + + def login_success(self, info): + ticket = info['from-pp'] + ticket = ticket[1:len(ticket)-1] + self.deferred.callback((LOGIN_SUCCESS, ticket)) + + def login_failed(self, info): + self.deferred.callback((LOGIN_FAILURE, unquote(info['cbtxt']))) + + def login_redir(self, info): + self.deferred.callback((LOGIN_REDIRECT, self.headers['location'], self.authData)) + + +class MSNProtocolError(Exception): + """ + This Exception is basically used for debugging + purposes, as the official MSN server should never + send anything _wrong_ and nobody in their right + mind would run their B{own} MSN server. + If it is raised by default command handlers + (handle_BLAH) the error will be logged. + """ + pass + + +class MSNCommandFailed(Exception): + """ + The server said that the command failed. + """ + + def __init__(self, errorCode): + self.errorCode = errorCode + + def __str__(self): + return ("Command failed: %s (error code %d)" + % (errorCodes[self.errorCode], self.errorCode)) + + +class MSNMessage: + """ + I am the class used to represent an 'instant' message. + + @ivar userHandle: The user handle (passport) of the sender + (this is only used when receiving a message) + @ivar screenName: The screen name of the sender (this is only used + when receiving a message) + @ivar message: The message + @ivar headers: The message headers + @type headers: dict + @ivar length: The message length (including headers and line endings) + @ivar ack: This variable is used to tell the server how to respond + once the message has been sent. If set to MESSAGE_ACK + (default) the server will respond with an ACK upon receiving + the message, if set to MESSAGE_NACK the server will respond + with a NACK upon failure to receive the message. + If set to MESSAGE_ACK_NONE the server will do nothing. + This is relevant for the return value of + SwitchboardClient.sendMessage (which will return + a Deferred if ack is set to either MESSAGE_ACK or MESSAGE_NACK + and will fire when the respective ACK or NACK is received). + If set to MESSAGE_ACK_NONE sendMessage will return None. + """ + MESSAGE_ACK = 'A' + MESSAGE_NACK = 'N' + MESSAGE_ACK_NONE = 'U' + + ack = MESSAGE_ACK + + def __init__(self, length=0, userHandle="", screenName="", message=""): + self.userHandle = userHandle + self.screenName = screenName + self.message = message + self.headers = {'MIME-Version' : '1.0', 'Content-Type' : 'text/plain'} + self.length = length + self.readPos = 0 + + def _calcMessageLen(self): + """ + used to calculte the number to send + as the message length when sending a message. + """ + return reduce(operator.add, [len(x[0]) + len(x[1]) + 4 for x in self.headers.items()]) + len(self.message) + 2 + + def setHeader(self, header, value): + """ set the desired header """ + self.headers[header] = value + + def getHeader(self, header): + """ + get the desired header value + @raise KeyError: if no such header exists. + """ + return self.headers[header] + + def hasHeader(self, header): + """ check to see if the desired header exists """ + return self.headers.has_key(header) + + def getMessage(self): + """ return the message - not including headers """ + return self.message + + def setMessage(self, message): + """ set the message text """ + self.message = message + +class MSNContact: + + """ + This class represents a contact (user). + + @ivar userHandle: The contact's user handle (passport). + @ivar screenName: The contact's screen name. + @ivar groups: A list of all the group IDs which this + contact belongs to. + @ivar lists: An integer representing the sum of all lists + that this contact belongs to. + @ivar status: The contact's status code. + @type status: str if contact's status is known, None otherwise. + + @ivar homePhone: The contact's home phone number. + @type homePhone: str if known, otherwise None. + @ivar workPhone: The contact's work phone number. + @type workPhone: str if known, otherwise None. + @ivar mobilePhone: The contact's mobile phone number. + @type mobilePhone: str if known, otherwise None. + @ivar hasPager: Whether or not this user has a mobile pager + (true=yes, false=no) + """ + + def __init__(self, userHandle="", screenName="", lists=0, groups=[], status=None): + self.userHandle = userHandle + self.screenName = screenName + self.lists = lists + self.groups = [] # if applicable + self.status = status # current status + + # phone details + self.homePhone = None + self.workPhone = None + self.mobilePhone = None + self.hasPager = None + + def setPhone(self, phoneType, value): + """ + set phone numbers/values for this specific user. + for phoneType check the *_PHONE constants and HAS_PAGER + """ + + t = phoneType.upper() + if t == HOME_PHONE: + self.homePhone = value + elif t == WORK_PHONE: + self.workPhone = value + elif t == MOBILE_PHONE: + self.mobilePhone = value + elif t == HAS_PAGER: + self.hasPager = value + else: + raise ValueError, "Invalid Phone Type" + + def addToList(self, listType): + """ + Update the lists attribute to + reflect being part of the + given list. + """ + self.lists |= listType + + def removeFromList(self, listType): + """ + Update the lists attribute to + reflect being removed from the + given list. + """ + self.lists ^= listType + +class MSNContactList: + """ + This class represents a basic MSN contact list. + + @ivar contacts: All contacts on my various lists + @type contacts: dict (mapping user handles to MSNContact objects) + @ivar version: The current contact list version (used for list syncing) + @ivar groups: a mapping of group ids to group names + (groups can only exist on the forward list) + @type groups: dict + + B{Note}: + This is used only for storage and doesn't effect the + server's contact list. + """ + + def __init__(self): + self.contacts = {} + self.version = 0 + self.groups = {} + self.autoAdd = 0 + self.privacy = 0 + + def _getContactsFromList(self, listType): + """ + Obtain all contacts which belong + to the given list type. + """ + return dict([(uH,obj) for uH,obj in self.contacts.items() if obj.lists & listType]) + + def addContact(self, contact): + """ + Add a contact + """ + self.contacts[contact.userHandle] = contact + + def remContact(self, userHandle): + """ + Remove a contact + """ + try: + del self.contacts[userHandle] + except KeyError: + pass + + def getContact(self, userHandle): + """ + Obtain the MSNContact object + associated with the given + userHandle. + @return: the MSNContact object if + the user exists, or None. + """ + try: + return self.contacts[userHandle] + except KeyError: + return None + + def getBlockedContacts(self): + """ + Obtain all the contacts on my block list + """ + return self._getContactsFromList(BLOCK_LIST) + + def getAuthorizedContacts(self): + """ + Obtain all the contacts on my auth list. + (These are contacts which I have verified + can view my state changes). + """ + return self._getContactsFromList(ALLOW_LIST) + + def getReverseContacts(self): + """ + Get all contacts on my reverse list. + (These are contacts which have added me + to their forward list). + """ + return self._getContactsFromList(REVERSE_LIST) + + def getContacts(self): + """ + Get all contacts on my forward list. + (These are the contacts which I have added + to my list). + """ + return self._getContactsFromList(FORWARD_LIST) + + def setGroup(self, id, name): + """ + Keep a mapping from the given id + to the given name. + """ + self.groups[id] = name + + def remGroup(self, id): + """ + Removed the stored group + mapping for the given id. + """ + try: + del self.groups[id] + except KeyError: + pass + for c in self.contacts: + if id in c.groups: + c.groups.remove(id) + + +class MSNEventBase(LineReceiver): + """ + This class provides support for handling / dispatching events and is the + base class of the three main client protocols (DispatchClient, + NotificationClient, SwitchboardClient) + """ + + def __init__(self): + self.ids = {} # mapping of ids to Deferreds + self.currentID = 0 + self.connected = 0 + self.setLineMode() + self.currentMessage = None + + def connectionLost(self, reason): + self.ids = {} + self.connected = 0 + + def connectionMade(self): + self.connected = 1 + + def _fireCallback(self, id, *args): + """ + Fire the callback for the given id + if one exists and return 1, else return false + """ + if self.ids.has_key(id): + self.ids[id][0].callback(args) + del self.ids[id] + return 1 + return 0 + + def _nextTransactionID(self): + """ return a usable transaction ID """ + self.currentID += 1 + if self.currentID > 1000: + self.currentID = 1 + return self.currentID + + def _createIDMapping(self, data=None): + """ + return a unique transaction ID that is mapped internally to a + deferred .. also store arbitrary data if it is needed + """ + id = self._nextTransactionID() + d = Deferred() + self.ids[id] = (d, data) + return (id, d) + + def checkMessage(self, message): + """ + process received messages to check for file invitations and + typing notifications and other control type messages + """ + raise NotImplementedError + + def lineReceived(self, line): + if self.currentMessage: + self.currentMessage.readPos += len(line+CR+LF) + if line == "": + self.setRawMode() + if self.currentMessage.readPos == self.currentMessage.length: + self.rawDataReceived("") # :( + return + try: + header, value = line.split(':') + except ValueError: + raise MSNProtocolError, "Invalid Message Header" + self.currentMessage.setHeader(header, unquote(value).lstrip()) + return + try: + cmd, params = line.split(' ', 1) + except ValueError: + raise MSNProtocolError, "Invalid Message, %s" % repr(line) + + if len(cmd) != 3: + raise MSNProtocolError, "Invalid Command, %s" % repr(cmd) + if cmd.isdigit(): + errorCode = int(cmd) + id = int(params.split()[0]) + if id in self.ids: + self.ids[id][0].errback(MSNCommandFailed(errorCode)) + del self.ids[id] + return + else: # we received an error which doesn't map to a sent command + self.gotError(errorCode) + return + + handler = getattr(self, "handle_%s" % cmd.upper(), None) + if handler: + try: + handler(params.split()) + except MSNProtocolError, why: + self.gotBadLine(line, why) + else: + self.handle_UNKNOWN(cmd, params.split()) + + def rawDataReceived(self, data): + extra = "" + self.currentMessage.readPos += len(data) + diff = self.currentMessage.readPos - self.currentMessage.length + if diff > 0: + self.currentMessage.message += data[:-diff] + extra = data[-diff:] + elif diff == 0: + self.currentMessage.message += data + else: + self.currentMessage += data + return + del self.currentMessage.readPos + m = self.currentMessage + self.currentMessage = None + self.setLineMode(extra) + if not self.checkMessage(m): + return + self.gotMessage(m) + + ### protocol command handlers - no need to override these. + + def handle_MSG(self, params): + checkParamLen(len(params), 3, 'MSG') + try: + messageLen = int(params[2]) + except ValueError: + raise MSNProtocolError, "Invalid Parameter for MSG length argument" + self.currentMessage = MSNMessage(length=messageLen, userHandle=params[0], screenName=unquote(params[1])) + + def handle_UNKNOWN(self, cmd, params): + """ implement me in subclasses if you want to handle unknown events """ + log.msg("Received unknown command (%s), params: %s" % (cmd, params)) + + ### callbacks + + def gotMessage(self, message): + """ + called when we receive a message - override in notification + and switchboard clients + """ + raise NotImplementedError + + def gotBadLine(self, line, why): + """ called when a handler notifies me that this line is broken """ + log.msg('Error in line: %s (%s)' % (line, why)) + + def gotError(self, errorCode): + """ + called when the server sends an error which is not in + response to a sent command (ie. it has no matching transaction ID) + """ + log.msg('Error %s' % (errorCodes[errorCode])) + + + +class DispatchClient(MSNEventBase): + """ + This class provides support for clients connecting to the dispatch server + @ivar userHandle: your user handle (passport) needed before connecting. + """ + + # eventually this may become an attribute of the + # factory. + userHandle = "" + + def connectionMade(self): + MSNEventBase.connectionMade(self) + self.sendLine('VER %s %s' % (self._nextTransactionID(), MSN_PROTOCOL_VERSION)) + + ### protocol command handlers ( there is no need to override these ) + + def handle_VER(self, params): + id = self._nextTransactionID() + self.sendLine("CVR %s %s %s" % (id, MSN_CVR_STR, self.userHandle)) + + def handle_CVR(self, params): + self.sendLine("USR %s TWN I %s" % (self._nextTransactionID(), self.userHandle)) + + def handle_XFR(self, params): + if len(params) < 4: + raise MSNProtocolError, "Invalid number of parameters for XFR" + id, refType, addr = params[:3] + # was addr a host:port pair? + try: + host, port = addr.split(':') + except ValueError: + host = addr + port = MSN_PORT + if refType == "NS": + self.gotNotificationReferral(host, int(port)) + + ### callbacks + + def gotNotificationReferral(self, host, port): + """ + called when we get a referral to the notification server. + + @param host: the notification server's hostname + @param port: the port to connect to + """ + pass + + +class NotificationClient(MSNEventBase): + """ + This class provides support for clients connecting + to the notification server. + """ + + factory = None # sssh pychecker + + def __init__(self, currentID=0): + MSNEventBase.__init__(self) + self.currentID = currentID + self._state = ['DISCONNECTED', {}] + + def _setState(self, state): + self._state[0] = state + + def _getState(self): + return self._state[0] + + def _getStateData(self, key): + return self._state[1][key] + + def _setStateData(self, key, value): + self._state[1][key] = value + + def _remStateData(self, *args): + for key in args: + del self._state[1][key] + + def connectionMade(self): + MSNEventBase.connectionMade(self) + self._setState('CONNECTED') + self.sendLine("VER %s %s" % (self._nextTransactionID(), MSN_PROTOCOL_VERSION)) + + def connectionLost(self, reason): + self._setState('DISCONNECTED') + self._state[1] = {} + MSNEventBase.connectionLost(self, reason) + + def checkMessage(self, message): + """ hook used for detecting specific notification messages """ + cTypes = [s.lstrip() for s in message.getHeader('Content-Type').split(';')] + if 'text/x-msmsgsprofile' in cTypes: + self.gotProfile(message) + return 0 + return 1 + + ### protocol command handlers - no need to override these + + def handle_VER(self, params): + id = self._nextTransactionID() + self.sendLine("CVR %s %s %s" % (id, MSN_CVR_STR, self.factory.userHandle)) + + def handle_CVR(self, params): + self.sendLine("USR %s TWN I %s" % (self._nextTransactionID(), self.factory.userHandle)) + + def handle_USR(self, params): + if len(params) != 4 and len(params) != 6: + raise MSNProtocolError, "Invalid Number of Parameters for USR" + + mechanism = params[1] + if mechanism == "OK": + self.loggedIn(params[2], unquote(params[3]), int(params[4])) + elif params[2].upper() == "S": + # we need to obtain auth from a passport server + f = self.factory + d = _login(f.userHandle, f.password, f.passportServer, authData=params[3]) + d.addCallback(self._passportLogin) + d.addErrback(self._passportError) + + def _passportLogin(self, result): + if result[0] == LOGIN_REDIRECT: + d = _login(self.factory.userHandle, self.factory.password, + result[1], cached=1, authData=result[2]) + d.addCallback(self._passportLogin) + d.addErrback(self._passportError) + elif result[0] == LOGIN_SUCCESS: + self.sendLine("USR %s TWN S %s" % (self._nextTransactionID(), result[1])) + elif result[0] == LOGIN_FAILURE: + self.loginFailure(result[1]) + + def _passportError(self, failure): + self.loginFailure("Exception while authenticating: %s" % failure) + + def handle_CHG(self, params): + checkParamLen(len(params), 3, 'CHG') + id = int(params[0]) + if not self._fireCallback(id, params[1]): + self.statusChanged(params[1]) + + def handle_ILN(self, params): + checkParamLen(len(params), 5, 'ILN') + self.gotContactStatus(params[1], params[2], unquote(params[3])) + + def handle_CHL(self, params): + checkParamLen(len(params), 2, 'CHL') + self.sendLine("QRY %s msmsgs@msnmsgr.com 32" % self._nextTransactionID()) + self.transport.write(md5(params[1] + MSN_CHALLENGE_STR).hexdigest()) + + def handle_QRY(self, params): + pass + + def handle_NLN(self, params): + checkParamLen(len(params), 4, 'NLN') + self.contactStatusChanged(params[0], params[1], unquote(params[2])) + + def handle_FLN(self, params): + checkParamLen(len(params), 1, 'FLN') + self.contactOffline(params[0]) + + def handle_LST(self, params): + # support no longer exists for manually + # requesting lists - why do I feel cleaner now? + if self._getState() != 'SYNC': + return + contact = MSNContact(userHandle=params[0], screenName=unquote(params[1]), + lists=int(params[2])) + if contact.lists & FORWARD_LIST: + contact.groups.extend(map(int, params[3].split(','))) + self._getStateData('list').addContact(contact) + self._setStateData('last_contact', contact) + sofar = self._getStateData('lst_sofar') + 1 + if sofar == self._getStateData('lst_reply'): + # this is the best place to determine that + # a syn realy has finished - msn _may_ send + # BPR information for the last contact + # which is unfortunate because it means + # that the real end of a syn is non-deterministic. + # to handle this we'll keep 'last_contact' hanging + # around in the state data and update it if we need + # to later. + self._setState('SESSION') + contacts = self._getStateData('list') + phone = self._getStateData('phone') + id = self._getStateData('synid') + self._remStateData('lst_reply', 'lsg_reply', 'lst_sofar', 'phone', 'synid', 'list') + self._fireCallback(id, contacts, phone) + else: + self._setStateData('lst_sofar',sofar) + + def handle_BLP(self, params): + # check to see if this is in response to a SYN + if self._getState() == 'SYNC': + self._getStateData('list').privacy = listCodeToID[params[0].lower()] + else: + id = int(params[0]) + self._fireCallback(id, int(params[1]), listCodeToID[params[2].lower()]) + + def handle_GTC(self, params): + # check to see if this is in response to a SYN + if self._getState() == 'SYNC': + if params[0].lower() == "a": + self._getStateData('list').autoAdd = 0 + elif params[0].lower() == "n": + self._getStateData('list').autoAdd = 1 + else: + raise MSNProtocolError, "Invalid Paramater for GTC" # debug + else: + id = int(params[0]) + if params[1].lower() == "a": + self._fireCallback(id, 0) + elif params[1].lower() == "n": + self._fireCallback(id, 1) + else: + raise MSNProtocolError, "Invalid Paramater for GTC" # debug + + def handle_SYN(self, params): + id = int(params[0]) + if len(params) == 2: + self._setState('SESSION') + self._fireCallback(id, None, None) + else: + contacts = MSNContactList() + contacts.version = int(params[1]) + self._setStateData('list', contacts) + self._setStateData('lst_reply', int(params[2])) + self._setStateData('lsg_reply', int(params[3])) + self._setStateData('lst_sofar', 0) + self._setStateData('phone', []) + + def handle_LSG(self, params): + if self._getState() == 'SYNC': + self._getStateData('list').groups[int(params[0])] = unquote(params[1]) + + # Please see the comment above the requestListGroups / requestList methods + # regarding support for this + # + #else: + # self._getStateData('groups').append((int(params[4]), unquote(params[5]))) + # if params[3] == params[4]: # this was the last group + # self._fireCallback(int(params[0]), self._getStateData('groups'), int(params[1])) + # self._remStateData('groups') + + def handle_PRP(self, params): + if self._getState() == 'SYNC': + self._getStateData('phone').append((params[0], unquote(params[1]))) + else: + self._fireCallback(int(params[0]), int(params[1]), unquote(params[3])) + + def handle_BPR(self, params): + numParams = len(params) + if numParams == 2: # part of a syn + self._getStateData('last_contact').setPhone(params[0], unquote(params[1])) + elif numParams == 4: + self.gotPhoneNumber(int(params[0]), params[1], params[2], unquote(params[3])) + + def handle_ADG(self, params): + checkParamLen(len(params), 5, 'ADG') + id = int(params[0]) + if not self._fireCallback(id, int(params[1]), unquote(params[2]), int(params[3])): + raise MSNProtocolError, "ADG response does not match up to a request" # debug + + def handle_RMG(self, params): + checkParamLen(len(params), 3, 'RMG') + id = int(params[0]) + if not self._fireCallback(id, int(params[1]), int(params[2])): + raise MSNProtocolError, "RMG response does not match up to a request" # debug + + def handle_REG(self, params): + checkParamLen(len(params), 5, 'REG') + id = int(params[0]) + if not self._fireCallback(id, int(params[1]), int(params[2]), unquote(params[3])): + raise MSNProtocolError, "REG response does not match up to a request" # debug + + def handle_ADD(self, params): + numParams = len(params) + if numParams < 5 or params[1].upper() not in ('AL','BL','RL','FL'): + raise MSNProtocolError, "Invalid Paramaters for ADD" # debug + id = int(params[0]) + listType = params[1].lower() + listVer = int(params[2]) + userHandle = params[3] + groupID = None + if numParams == 6: # they sent a group id + if params[1].upper() != "FL": + raise MSNProtocolError, "Only forward list can contain groups" # debug + groupID = int(params[5]) + if not self._fireCallback(id, listCodeToID[listType], userHandle, listVer, groupID): + self.userAddedMe(userHandle, unquote(params[4]), listVer) + + def handle_REM(self, params): + numParams = len(params) + if numParams < 4 or params[1].upper() not in ('AL','BL','FL','RL'): + raise MSNProtocolError, "Invalid Paramaters for REM" # debug + id = int(params[0]) + listType = params[1].lower() + listVer = int(params[2]) + userHandle = params[3] + groupID = None + if numParams == 5: + if params[1] != "FL": + raise MSNProtocolError, "Only forward list can contain groups" # debug + groupID = int(params[4]) + if not self._fireCallback(id, listCodeToID[listType], userHandle, listVer, groupID): + if listType.upper() == "RL": + self.userRemovedMe(userHandle, listVer) + + def handle_REA(self, params): + checkParamLen(len(params), 4, 'REA') + id = int(params[0]) + self._fireCallback(id, int(params[1]), unquote(params[3])) + + def handle_XFR(self, params): + checkParamLen(len(params), 5, 'XFR') + id = int(params[0]) + # check to see if they sent a host/port pair + try: + host, port = params[2].split(':') + except ValueError: + host = params[2] + port = MSN_PORT + + if not self._fireCallback(id, host, int(port), params[4]): + raise MSNProtocolError, "Got XFR (referral) that I didn't ask for .. should this happen?" # debug + + def handle_RNG(self, params): + checkParamLen(len(params), 6, 'RNG') + # check for host:port pair + try: + host, port = params[1].split(":") + port = int(port) + except ValueError: + host = params[1] + port = MSN_PORT + self.gotSwitchboardInvitation(int(params[0]), host, port, params[3], params[4], + unquote(params[5])) + + def handle_OUT(self, params): + checkParamLen(len(params), 1, 'OUT') + if params[0] == "OTH": + self.multipleLogin() + elif params[0] == "SSD": + self.serverGoingDown() + else: + raise MSNProtocolError, "Invalid Parameters received for OUT" # debug + + # callbacks + + def loggedIn(self, userHandle, screenName, verified): + """ + Called when the client has logged in. + The default behaviour of this method is to + update the factory with our screenName and + to sync the contact list (factory.contacts). + When this is complete self.listSynchronized + will be called. + + @param userHandle: our userHandle + @param screenName: our screenName + @param verified: 1 if our passport has been (verified), 0 if not. + (i'm not sure of the significace of this) + @type verified: int + """ + self.factory.screenName = screenName + if not self.factory.contacts: + listVersion = 0 + else: + listVersion = self.factory.contacts.version + self.syncList(listVersion).addCallback(self.listSynchronized) + + def loginFailure(self, message): + """ + Called when the client fails to login. + + @param message: a message indicating the problem that was encountered + """ + pass + + def gotProfile(self, message): + """ + Called after logging in when the server sends an initial + message with MSN/passport specific profile information + such as country, number of kids, etc. + Check the message headers for the specific values. + + @param message: The profile message + """ + pass + + def listSynchronized(self, *args): + """ + Lists are now synchronized by default upon logging in, this + method is called after the synchronization has finished + and the factory now has the up-to-date contacts. + """ + pass + + def statusChanged(self, statusCode): + """ + Called when our status changes and it isn't in response to + a client command. By default we will update the status + attribute of the factory. + + @param statusCode: 3-letter status code + """ + self.factory.status = statusCode + + def gotContactStatus(self, statusCode, userHandle, screenName): + """ + Called after loggin in when the server sends status of online contacts. + By default we will update the status attribute of the contact stored + on the factory. + + @param statusCode: 3-letter status code + @param userHandle: the contact's user handle (passport) + @param screenName: the contact's screen name + """ + self.factory.contacts.getContact(userHandle).status = statusCode + + def contactStatusChanged(self, statusCode, userHandle, screenName): + """ + Called when we're notified that a contact's status has changed. + By default we will update the status attribute of the contact + stored on the factory. + + @param statusCode: 3-letter status code + @param userHandle: the contact's user handle (passport) + @param screenName: the contact's screen name + """ + self.factory.contacts.getContact(userHandle).status = statusCode + + def contactOffline(self, userHandle): + """ + Called when a contact goes offline. By default this method + will update the status attribute of the contact stored + on the factory. + + @param userHandle: the contact's user handle + """ + self.factory.contacts.getContact(userHandle).status = STATUS_OFFLINE + + def gotPhoneNumber(self, listVersion, userHandle, phoneType, number): + """ + Called when the server sends us phone details about + a specific user (for example after a user is added + the server will send their status, phone details etc. + By default we will update the list version for the + factory's contact list and update the phone details + for the specific user. + + @param listVersion: the new list version + @param userHandle: the contact's user handle (passport) + @param phoneType: the specific phoneType + (*_PHONE constants or HAS_PAGER) + @param number: the value/phone number. + """ + self.factory.contacts.version = listVersion + self.factory.contacts.getContact(userHandle).setPhone(phoneType, number) + + def userAddedMe(self, userHandle, screenName, listVersion): + """ + Called when a user adds me to their list. (ie. they have been added to + the reverse list. By default this method will update the version of + the factory's contact list -- that is, if the contact already exists + it will update the associated lists attribute, otherwise it will create + a new MSNContact object and store it. + + @param userHandle: the userHandle of the user + @param screenName: the screen name of the user + @param listVersion: the new list version + @type listVersion: int + """ + self.factory.contacts.version = listVersion + c = self.factory.contacts.getContact(userHandle) + if not c: + c = MSNContact(userHandle=userHandle, screenName=screenName) + self.factory.contacts.addContact(c) + c.addToList(REVERSE_LIST) + + def userRemovedMe(self, userHandle, listVersion): + """ + Called when a user removes us from their contact list + (they are no longer on our reverseContacts list. + By default this method will update the version of + the factory's contact list -- that is, the user will + be removed from the reverse list and if they are no longer + part of any lists they will be removed from the contact + list entirely. + + @param userHandle: the contact's user handle (passport) + @param listVersion: the new list version + """ + self.factory.contacts.version = listVersion + c = self.factory.contacts.getContact(userHandle) + c.removeFromList(REVERSE_LIST) + if c.lists == 0: + self.factory.contacts.remContact(c.userHandle) + + def gotSwitchboardInvitation(self, sessionID, host, port, + key, userHandle, screenName): + """ + Called when we get an invitation to a switchboard server. + This happens when a user requests a chat session with us. + + @param sessionID: session ID number, must be remembered for logging in + @param host: the hostname of the switchboard server + @param port: the port to connect to + @param key: used for authorization when connecting + @param userHandle: the user handle of the person who invited us + @param screenName: the screen name of the person who invited us + """ + pass + + def multipleLogin(self): + """ + Called when the server says there has been another login + under our account, the server should disconnect us right away. + """ + pass + + def serverGoingDown(self): + """ + Called when the server has notified us that it is going down for + maintenance. + """ + pass + + # api calls + + def changeStatus(self, status): + """ + Change my current status. This method will add + a default callback to the returned Deferred + which will update the status attribute of the + factory. + + @param status: 3-letter status code (as defined by + the STATUS_* constants) + @return: A Deferred, the callback of which will be + fired when the server confirms the change + of status. The callback argument will be + a tuple with the new status code as the + only element. + """ + + id, d = self._createIDMapping() + self.sendLine("CHG %s %s" % (id, status)) + def _cb(r): + self.factory.status = r[0] + return r + return d.addCallback(_cb) + + # I am no longer supporting the process of manually requesting + # lists or list groups -- as far as I can see this has no use + # if lists are synchronized and updated correctly, which they + # should be. If someone has a specific justified need for this + # then please contact me and i'll re-enable/fix support for it. + + #def requestList(self, listType): + # """ + # request the desired list type + # + # @param listType: (as defined by the *_LIST constants) + # @return: A Deferred, the callback of which will be + # fired when the list has been retrieved. + # The callback argument will be a tuple with + # the only element being a list of MSNContact + # objects. + # """ + # # this doesn't need to ever be used if syncing of the lists takes place + # # i.e. please don't use it! + # warnings.warn("Please do not use this method - use the list syncing process instead") + # id, d = self._createIDMapping() + # self.sendLine("LST %s %s" % (id, listIDToCode[listType].upper())) + # self._setStateData('list',[]) + # return d + + def setPrivacyMode(self, privLevel): + """ + Set my privacy mode on the server. + + B{Note}: + This only keeps the current privacy setting on + the server for later retrieval, it does not + effect the way the server works at all. + + @param privLevel: This parameter can be true, in which + case the server will keep the state as + 'al' which the official client interprets + as -> allow messages from only users on + the allow list. Alternatively it can be + false, in which case the server will keep + the state as 'bl' which the official client + interprets as -> allow messages from all + users except those on the block list. + + @return: A Deferred, the callback of which will be fired when + the server replies with the new privacy setting. + The callback argument will be a tuple, the 2 elements + of which being the list version and either 'al' + or 'bl' (the new privacy setting). + """ + + id, d = self._createIDMapping() + if privLevel: + self.sendLine("BLP %s AL" % id) + else: + self.sendLine("BLP %s BL" % id) + return d + + def syncList(self, version): + """ + Used for keeping an up-to-date contact list. + A callback is added to the returned Deferred + that updates the contact list on the factory + and also sets my state to STATUS_ONLINE. + + B{Note}: + This is called automatically upon signing + in using the version attribute of + factory.contacts, so you may want to persist + this object accordingly. Because of this there + is no real need to ever call this method + directly. + + @param version: The current known list version + + @return: A Deferred, the callback of which will be + fired when the server sends an adequate reply. + The callback argument will be a tuple with two + elements, the new list (MSNContactList) and + your current state (a dictionary). If the version + you sent _was_ the latest list version, both elements + will be None. To just request the list send a version of 0. + """ + + self._setState('SYNC') + id, d = self._createIDMapping(data=str(version)) + self._setStateData('synid',id) + self.sendLine("SYN %s %s" % (id, version)) + def _cb(r): + self.changeStatus(STATUS_ONLINE) + if r[0] is not None: + self.factory.contacts = r[0] + return r + return d.addCallback(_cb) + + + # I am no longer supporting the process of manually requesting + # lists or list groups -- as far as I can see this has no use + # if lists are synchronized and updated correctly, which they + # should be. If someone has a specific justified need for this + # then please contact me and i'll re-enable/fix support for it. + + #def requestListGroups(self): + # """ + # Request (forward) list groups. + # + # @return: A Deferred, the callback for which will be called + # when the server responds with the list groups. + # The callback argument will be a tuple with two elements, + # a dictionary mapping group IDs to group names and the + # current list version. + # """ + # + # # this doesn't need to be used if syncing of the lists takes place (which it SHOULD!) + # # i.e. please don't use it! + # warnings.warn("Please do not use this method - use the list syncing process instead") + # id, d = self._createIDMapping() + # self.sendLine("LSG %s" % id) + # self._setStateData('groups',{}) + # return d + + def setPhoneDetails(self, phoneType, value): + """ + Set/change my phone numbers stored on the server. + + @param phoneType: phoneType can be one of the following + constants - HOME_PHONE, WORK_PHONE, + MOBILE_PHONE, HAS_PAGER. + These are pretty self-explanatory, except + maybe HAS_PAGER which refers to whether or + not you have a pager. + @param value: for all of the *_PHONE constants the value is a + phone number (str), for HAS_PAGER accepted values + are 'Y' (for yes) and 'N' (for no). + + @return: A Deferred, the callback for which will be fired when + the server confirms the change has been made. The + callback argument will be a tuple with 2 elements, the + first being the new list version (int) and the second + being the new phone number value (str). + """ + # XXX: Add a default callback which updates + # factory.contacts.version and the relevant phone + # number + id, d = self._createIDMapping() + self.sendLine("PRP %s %s %s" % (id, phoneType, quote(value))) + return d + + def addListGroup(self, name): + """ + Used to create a new list group. + A default callback is added to the + returned Deferred which updates the + contacts attribute of the factory. + + @param name: The desired name of the new group. + + @return: A Deferred, the callbacck for which will be called + when the server clarifies that the new group has been + created. The callback argument will be a tuple with 3 + elements: the new list version (int), the new group name + (str) and the new group ID (int). + """ + + id, d = self._createIDMapping() + self.sendLine("ADG %s %s 0" % (id, quote(name))) + def _cb(r): + self.factory.contacts.version = r[0] + self.factory.contacts.setGroup(r[1], r[2]) + return r + return d.addCallback(_cb) + + def remListGroup(self, groupID): + """ + Used to remove a list group. + A default callback is added to the + returned Deferred which updates the + contacts attribute of the factory. + + @param groupID: the ID of the desired group to be removed. + + @return: A Deferred, the callback for which will be called when + the server clarifies the deletion of the group. + The callback argument will be a tuple with 2 elements: + the new list version (int) and the group ID (int) of + the removed group. + """ + + id, d = self._createIDMapping() + self.sendLine("RMG %s %s" % (id, groupID)) + def _cb(r): + self.factory.contacts.version = r[0] + self.factory.contacts.remGroup(r[1]) + return r + return d.addCallback(_cb) + + def renameListGroup(self, groupID, newName): + """ + Used to rename an existing list group. + A default callback is added to the returned + Deferred which updates the contacts attribute + of the factory. + + @param groupID: the ID of the desired group to rename. + @param newName: the desired new name for the group. + + @return: A Deferred, the callback for which will be called + when the server clarifies the renaming. + The callback argument will be a tuple of 3 elements, + the new list version (int), the group id (int) and + the new group name (str). + """ + + id, d = self._createIDMapping() + self.sendLine("REG %s %s %s 0" % (id, groupID, quote(newName))) + def _cb(r): + self.factory.contacts.version = r[0] + self.factory.contacts.setGroup(r[1], r[2]) + return r + return d.addCallback(_cb) + + def addContact(self, listType, userHandle, groupID=0): + """ + Used to add a contact to the desired list. + A default callback is added to the returned + Deferred which updates the contacts attribute of + the factory with the new contact information. + If you are adding a contact to the forward list + and you want to associate this contact with multiple + groups then you will need to call this method for each + group you would like to add them to, changing the groupID + parameter. The default callback will take care of updating + the group information on the factory's contact list. + + @param listType: (as defined by the *_LIST constants) + @param userHandle: the user handle (passport) of the contact + that is being added + @param groupID: the group ID for which to associate this contact + with. (default 0 - default group). Groups are only + valid for FORWARD_LIST. + + @return: A Deferred, the callback for which will be called when + the server has clarified that the user has been added. + The callback argument will be a tuple with 4 elements: + the list type, the contact's user handle, the new list + version, and the group id (if relevant, otherwise it + will be None) + """ + + id, d = self._createIDMapping() + listType = listIDToCode[listType].upper() + if listType == "FL": + self.sendLine("ADD %s FL %s %s %s" % (id, userHandle, userHandle, groupID)) + else: + self.sendLine("ADD %s %s %s %s" % (id, listType, userHandle, userHandle)) + + def _cb(r): + self.factory.contacts.version = r[2] + c = self.factory.contacts.getContact(r[1]) + if not c: + c = MSNContact(userHandle=r[1]) + if r[3]: + c.groups.append(r[3]) + c.addToList(r[0]) + return r + return d.addCallback(_cb) + + def remContact(self, listType, userHandle, groupID=0): + """ + Used to remove a contact from the desired list. + A default callback is added to the returned deferred + which updates the contacts attribute of the factory + to reflect the new contact information. If you are + removing from the forward list then you will need to + supply a groupID, if the contact is in more than one + group then they will only be removed from this group + and not the entire forward list, but if this is their + only group they will be removed from the whole list. + + @param listType: (as defined by the *_LIST constants) + @param userHandle: the user handle (passport) of the + contact being removed + @param groupID: the ID of the group to which this contact + belongs (only relevant for FORWARD_LIST, + default is 0) + + @return: A Deferred, the callback for which will be called when + the server has clarified that the user has been removed. + The callback argument will be a tuple of 4 elements: + the list type, the contact's user handle, the new list + version, and the group id (if relevant, otherwise it will + be None) + """ + + id, d = self._createIDMapping() + listType = listIDToCode[listType].upper() + if listType == "FL": + self.sendLine("REM %s FL %s %s" % (id, userHandle, groupID)) + else: + self.sendLine("REM %s %s %s" % (id, listType, userHandle)) + + def _cb(r): + l = self.factory.contacts + l.version = r[2] + c = l.getContact(r[1]) + group = r[3] + shouldRemove = 1 + if group: # they may not have been removed from the list + c.groups.remove(group) + if c.groups: + shouldRemove = 0 + if shouldRemove: + c.removeFromList(r[0]) + if c.lists == 0: + l.remContact(c.userHandle) + return r + return d.addCallback(_cb) + + def changeScreenName(self, newName): + """ + Used to change your current screen name. + A default callback is added to the returned + Deferred which updates the screenName attribute + of the factory and also updates the contact list + version. + + @param newName: the new screen name + + @return: A Deferred, the callback for which will be called + when the server sends an adequate reply. + The callback argument will be a tuple of 2 elements: + the new list version and the new screen name. + """ + + id, d = self._createIDMapping() + self.sendLine("REA %s %s %s" % (id, self.factory.userHandle, quote(newName))) + def _cb(r): + self.factory.contacts.version = r[0] + self.factory.screenName = r[1] + return r + return d.addCallback(_cb) + + def requestSwitchboardServer(self): + """ + Used to request a switchboard server to use for conversations. + + @return: A Deferred, the callback for which will be called when + the server responds with the switchboard information. + The callback argument will be a tuple with 3 elements: + the host of the switchboard server, the port and a key + used for logging in. + """ + + id, d = self._createIDMapping() + self.sendLine("XFR %s SB" % id) + return d + + def logOut(self): + """ + Used to log out of the notification server. + After running the method the server is expected + to close the connection. + """ + + self.sendLine("OUT") + +class NotificationFactory(ClientFactory): + """ + Factory for the NotificationClient protocol. + This is basically responsible for keeping + the state of the client and thus should be used + in a 1:1 situation with clients. + + @ivar contacts: An MSNContactList instance reflecting + the current contact list -- this is + generally kept up to date by the default + command handlers. + @ivar userHandle: The client's userHandle, this is expected + to be set by the client and is used by the + protocol (for logging in etc). + @ivar screenName: The client's current screen-name -- this is + generally kept up to date by the default + command handlers. + @ivar password: The client's password -- this is (obviously) + expected to be set by the client. + @ivar passportServer: This must point to an msn passport server + (the whole URL is required) + @ivar status: The status of the client -- this is generally kept + up to date by the default command handlers + """ + + contacts = None + userHandle = '' + screenName = '' + password = '' + passportServer = 'https://nexus.passport.com/rdr/pprdr.asp' + status = 'FLN' + protocol = NotificationClient + + +# XXX: A lot of the state currently kept in +# instances of SwitchboardClient is likely to +# be moved into a factory at some stage in the +# future + +class SwitchboardClient(MSNEventBase): + """ + This class provides support for clients connecting to a switchboard server. + + Switchboard servers are used for conversations with other people + on the MSN network. This means that the number of conversations at + any given time will be directly proportional to the number of + connections to varioius switchboard servers. + + MSN makes no distinction between single and group conversations, + so any number of users may be invited to join a specific conversation + taking place on a switchboard server. + + @ivar key: authorization key, obtained when receiving + invitation / requesting switchboard server. + @ivar userHandle: your user handle (passport) + @ivar sessionID: unique session ID, used if you are replying + to a switchboard invitation + @ivar reply: set this to 1 in connectionMade or before to signifiy + that you are replying to a switchboard invitation. + """ + + key = 0 + userHandle = "" + sessionID = "" + reply = 0 + + _iCookie = 0 + + def __init__(self): + MSNEventBase.__init__(self) + self.pendingUsers = {} + self.cookies = {'iCookies' : {}, 'external' : {}} # will maybe be moved to a factory in the future + + def connectionMade(self): + MSNEventBase.connectionMade(self) + print 'sending initial stuff' + self._sendInit() + + def connectionLost(self, reason): + self.cookies['iCookies'] = {} + self.cookies['external'] = {} + MSNEventBase.connectionLost(self, reason) + + def _sendInit(self): + """ + send initial data based on whether we are replying to an invitation + or starting one. + """ + id = self._nextTransactionID() + if not self.reply: + self.sendLine("USR %s %s %s" % (id, self.userHandle, self.key)) + else: + self.sendLine("ANS %s %s %s %s" % (id, self.userHandle, self.key, self.sessionID)) + + def _newInvitationCookie(self): + self._iCookie += 1 + if self._iCookie > 1000: + self._iCookie = 1 + return self._iCookie + + def _checkTyping(self, message, cTypes): + """ helper method for checkMessage """ + if 'text/x-msmsgscontrol' in cTypes and message.hasHeader('TypingUser'): + self.userTyping(message) + return 1 + + def _checkFileInvitation(self, message, info): + """ helper method for checkMessage """ + guid = info.get('Application-GUID', '').lower() + name = info.get('Application-Name', '').lower() + + # Both fields are required, but we'll let some lazy clients get away + # with only sending a name, if it is easy for us to recognize the + # name (the name is localized, so this check might fail for lazy, + # non-english clients, but I'm not about to include "file transfer" + # in 80 different languages here). + + if name != "file transfer" and guid != classNameToGUID["file transfer"]: + return 0 + try: + cookie = int(info['Invitation-Cookie']) + fileName = info['Application-File'] + fileSize = int(info['Application-FileSize']) + except KeyError: + log.msg('Received munged file transfer request ... ignoring.') + return 0 + self.gotSendRequest(fileName, fileSize, cookie, message) + return 1 + + def _checkFileResponse(self, message, info): + """ helper method for checkMessage """ + try: + cmd = info['Invitation-Command'].upper() + cookie = int(info['Invitation-Cookie']) + except KeyError: + return 0 + accept = (cmd == 'ACCEPT') and 1 or 0 + requested = self.cookies['iCookies'].get(cookie) + if not requested: + return 1 + requested[0].callback((accept, cookie, info)) + del self.cookies['iCookies'][cookie] + return 1 + + def _checkFileInfo(self, message, info): + """ helper method for checkMessage """ + try: + ip = info['IP-Address'] + iCookie = int(info['Invitation-Cookie']) + aCookie = int(info['AuthCookie']) + cmd = info['Invitation-Command'].upper() + port = int(info['Port']) + except KeyError: + return 0 + accept = (cmd == 'ACCEPT') and 1 or 0 + requested = self.cookies['external'].get(iCookie) + if not requested: + return 1 # we didn't ask for this + requested[0].callback((accept, ip, port, aCookie, info)) + del self.cookies['external'][iCookie] + return 1 + + def checkMessage(self, message): + """ + hook for detecting any notification type messages + (e.g. file transfer) + """ + cTypes = [s.lstrip() for s in message.getHeader('Content-Type').split(';')] + if self._checkTyping(message, cTypes): + return 0 + if 'text/x-msmsgsinvite' in cTypes: + # header like info is sent as part of the message body. + info = {} + for line in message.message.split('\r\n'): + try: + key, val = line.split(':') + info[key] = val.lstrip() + except ValueError: + continue + if self._checkFileInvitation(message, info) or self._checkFileInfo(message, info) or self._checkFileResponse(message, info): + return 0 + elif 'text/x-clientcaps' in cTypes: + # do something with capabilities + return 0 + return 1 + + # negotiation + def handle_USR(self, params): + checkParamLen(len(params), 4, 'USR') + if params[1] == "OK": + self.loggedIn() + + # invite a user + def handle_CAL(self, params): + checkParamLen(len(params), 3, 'CAL') + id = int(params[0]) + if params[1].upper() == "RINGING": + self._fireCallback(id, int(params[2])) # session ID as parameter + + # user joined + def handle_JOI(self, params): + checkParamLen(len(params), 2, 'JOI') + self.userJoined(params[0], unquote(params[1])) + + # users participating in the current chat + def handle_IRO(self, params): + checkParamLen(len(params), 5, 'IRO') + self.pendingUsers[params[3]] = unquote(params[4]) + if params[1] == params[2]: + self.gotChattingUsers(self.pendingUsers) + self.pendingUsers = {} + + # finished listing users + def handle_ANS(self, params): + checkParamLen(len(params), 2, 'ANS') + if params[1] == "OK": + self.loggedIn() + + def handle_ACK(self, params): + checkParamLen(len(params), 1, 'ACK') + self._fireCallback(int(params[0]), None) + + def handle_NAK(self, params): + checkParamLen(len(params), 1, 'NAK') + self._fireCallback(int(params[0]), None) + + def handle_BYE(self, params): + #checkParamLen(len(params), 1, 'BYE') # i've seen more than 1 param passed to this + self.userLeft(params[0]) + + # callbacks + + def loggedIn(self): + """ + called when all login details have been negotiated. + Messages can now be sent, or new users invited. + """ + pass + + def gotChattingUsers(self, users): + """ + called after connecting to an existing chat session. + + @param users: A dict mapping user handles to screen names + (current users taking part in the conversation) + """ + pass + + def userJoined(self, userHandle, screenName): + """ + called when a user has joined the conversation. + + @param userHandle: the user handle (passport) of the user + @param screenName: the screen name of the user + """ + pass + + def userLeft(self, userHandle): + """ + called when a user has left the conversation. + + @param userHandle: the user handle (passport) of the user. + """ + pass + + def gotMessage(self, message): + """ + called when we receive a message. + + @param message: the associated MSNMessage object + """ + pass + + def userTyping(self, message): + """ + called when we receive the special type of message notifying + us that a user is typing a message. + + @param message: the associated MSNMessage object + """ + pass + + def gotSendRequest(self, fileName, fileSize, iCookie, message): + """ + called when a contact is trying to send us a file. + To accept or reject this transfer see the + fileInvitationReply method. + + @param fileName: the name of the file + @param fileSize: the size of the file + @param iCookie: the invitation cookie, used so the client can + match up your reply with this request. + @param message: the MSNMessage object which brought about this + invitation (it may contain more information) + """ + pass + + # api calls + + def inviteUser(self, userHandle): + """ + used to invite a user to the current switchboard server. + + @param userHandle: the user handle (passport) of the desired user. + + @return: A Deferred, the callback for which will be called + when the server notifies us that the user has indeed + been invited. The callback argument will be a tuple + with 1 element, the sessionID given to the invited user. + I'm not sure if this is useful or not. + """ + + id, d = self._createIDMapping() + self.sendLine("CAL %s %s" % (id, userHandle)) + return d + + def sendMessage(self, message): + """ + used to send a message. + + @param message: the corresponding MSNMessage object. + + @return: Depending on the value of message.ack. + If set to MSNMessage.MESSAGE_ACK or + MSNMessage.MESSAGE_NACK a Deferred will be returned, + the callback for which will be fired when an ACK or + NACK is received - the callback argument will be + (None,). If set to MSNMessage.MESSAGE_ACK_NONE then + the return value is None. + """ + + if message.ack not in ('A','N'): + id, d = self._nextTransactionID(), None + else: + id, d = self._createIDMapping() + if message.length == 0: + message.length = message._calcMessageLen() + self.sendLine("MSG %s %s %s" % (id, message.ack, message.length)) + # apparently order matters with at least MIME-Version and Content-Type + self.sendLine('MIME-Version: %s' % message.getHeader('MIME-Version')) + self.sendLine('Content-Type: %s' % message.getHeader('Content-Type')) + # send the rest of the headers + for header in [h for h in message.headers.items() if h[0].lower() not in ('mime-version','content-type')]: + self.sendLine("%s: %s" % (header[0], header[1])) + self.transport.write(CR+LF) + self.transport.write(message.message) + return d + + def sendTypingNotification(self): + """ + used to send a typing notification. Upon receiving this + message the official client will display a 'user is typing' + message to all other users in the chat session for 10 seconds. + The official client sends one of these every 5 seconds (I think) + as long as you continue to type. + """ + m = MSNMessage() + m.ack = m.MESSAGE_ACK_NONE + m.setHeader('Content-Type', 'text/x-msmsgscontrol') + m.setHeader('TypingUser', self.userHandle) + m.message = "\r\n" + self.sendMessage(m) + + def sendFileInvitation(self, fileName, fileSize): + """ + send an notification that we want to send a file. + + @param fileName: the file name + @param fileSize: the file size + + @return: A Deferred, the callback of which will be fired + when the user responds to this invitation with an + appropriate message. The callback argument will be + a tuple with 3 elements, the first being 1 or 0 + depending on whether they accepted the transfer + (1=yes, 0=no), the second being an invitation cookie + to identify your follow-up responses and the third being + the message 'info' which is a dict of information they + sent in their reply (this doesn't really need to be used). + If you wish to proceed with the transfer see the + sendTransferInfo method. + """ + cookie = self._newInvitationCookie() + d = Deferred() + m = MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Application-Name: File Transfer\r\n' + m.message += 'Application-GUID: %s\r\n' % (classNameToGUID["file transfer"],) + m.message += 'Invitation-Command: INVITE\r\n' + m.message += 'Invitation-Cookie: %s\r\n' % str(cookie) + m.message += 'Application-File: %s\r\n' % fileName + m.message += 'Application-FileSize: %s\r\n\r\n' % str(fileSize) + m.ack = m.MESSAGE_ACK_NONE + self.sendMessage(m) + self.cookies['iCookies'][cookie] = (d, m) + return d + + def fileInvitationReply(self, iCookie, accept=1): + """ + used to reply to a file transfer invitation. + + @param iCookie: the invitation cookie of the initial invitation + @param accept: whether or not you accept this transfer, + 1 = yes, 0 = no, default = 1. + + @return: A Deferred, the callback for which will be fired when + the user responds with the transfer information. + The callback argument will be a tuple with 5 elements, + whether or not they wish to proceed with the transfer + (1=yes, 0=no), their ip, the port, the authentication + cookie (see FileReceive/FileSend) and the message + info (dict) (in case they send extra header-like info + like Internal-IP, this doesn't necessarily need to be + used). If you wish to proceed with the transfer see + FileReceive. + """ + d = Deferred() + m = MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Invitation-Command: %s\r\n' % (accept and 'ACCEPT' or 'CANCEL') + m.message += 'Invitation-Cookie: %s\r\n' % str(iCookie) + if not accept: + m.message += 'Cancel-Code: REJECT\r\n' + m.message += 'Launch-Application: FALSE\r\n' + m.message += 'Request-Data: IP-Address:\r\n' + m.message += '\r\n' + m.ack = m.MESSAGE_ACK_NONE + self.sendMessage(m) + self.cookies['external'][iCookie] = (d, m) + return d + + def sendTransferInfo(self, accept, iCookie, authCookie, ip, port): + """ + send information relating to a file transfer session. + + @param accept: whether or not to go ahead with the transfer + (1=yes, 0=no) + @param iCookie: the invitation cookie of previous replies + relating to this transfer + @param authCookie: the authentication cookie obtained from + an FileSend instance + @param ip: your ip + @param port: the port on which an FileSend protocol is listening. + """ + m = MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Invitation-Command: %s\r\n' % (accept and 'ACCEPT' or 'CANCEL') + m.message += 'Invitation-Cookie: %s\r\n' % iCookie + m.message += 'IP-Address: %s\r\n' % ip + m.message += 'Port: %s\r\n' % port + m.message += 'AuthCookie: %s\r\n' % authCookie + m.message += '\r\n' + m.ack = m.MESSAGE_NACK + self.sendMessage(m) + +class FileReceive(LineReceiver): + """ + This class provides support for receiving files from contacts. + + @ivar fileSize: the size of the receiving file. (you will have to set this) + @ivar connected: true if a connection has been established. + @ivar completed: true if the transfer is complete. + @ivar bytesReceived: number of bytes (of the file) received. + This does not include header data. + """ + + def __init__(self, auth, myUserHandle, file, directory="", overwrite=0): + """ + @param auth: auth string received in the file invitation. + @param myUserHandle: your userhandle. + @param file: A string or file object represnting the file + to save data to. + @param directory: optional parameter specifiying the directory. + Defaults to the current directory. + @param overwrite: if true and a file of the same name exists on + your system, it will be overwritten. (0 by default) + """ + self.auth = auth + self.myUserHandle = myUserHandle + self.fileSize = 0 + self.connected = 0 + self.completed = 0 + self.directory = directory + self.bytesReceived = 0 + self.overwrite = overwrite + + # used for handling current received state + self.state = 'CONNECTING' + self.segmentLength = 0 + self.buffer = '' + + if isinstance(file, types.StringType): + path = os.path.join(directory, file) + if os.path.exists(path) and not self.overwrite: + log.msg('File already exists...') + raise IOError, "File Exists" # is this all we should do here? + self.file = open(os.path.join(directory, file), 'wb') + else: + self.file = file + + def connectionMade(self): + self.connected = 1 + self.state = 'INHEADER' + self.sendLine('VER MSNFTP') + + def connectionLost(self, reason): + self.connected = 0 + self.file.close() + + def parseHeader(self, header): + """ parse the header of each 'message' to obtain the segment length """ + + if ord(header[0]) != 0: # they requested that we close the connection + self.transport.loseConnection() + return + try: + extra, factor = header[1:] + except ValueError: + # munged header, ending transfer + self.transport.loseConnection() + raise + extra = ord(extra) + factor = ord(factor) + return factor * 256 + extra + + def lineReceived(self, line): + temp = line.split() + if len(temp) == 1: + params = [] + else: + params = temp[1:] + cmd = temp[0] + handler = getattr(self, "handle_%s" % cmd.upper(), None) + if handler: + handler(params) # try/except + else: + self.handle_UNKNOWN(cmd, params) + + def rawDataReceived(self, data): + bufferLen = len(self.buffer) + if self.state == 'INHEADER': + delim = 3-bufferLen + self.buffer += data[:delim] + if len(self.buffer) == 3: + self.segmentLength = self.parseHeader(self.buffer) + if not self.segmentLength: + return # hrm + self.buffer = "" + self.state = 'INSEGMENT' + extra = data[delim:] + if len(extra) > 0: + self.rawDataReceived(extra) + return + + elif self.state == 'INSEGMENT': + dataSeg = data[:(self.segmentLength-bufferLen)] + self.buffer += dataSeg + self.bytesReceived += len(dataSeg) + if len(self.buffer) == self.segmentLength: + self.gotSegment(self.buffer) + self.buffer = "" + if self.bytesReceived == self.fileSize: + self.completed = 1 + self.buffer = "" + self.file.close() + self.sendLine("BYE 16777989") + return + self.state = 'INHEADER' + extra = data[(self.segmentLength-bufferLen):] + if len(extra) > 0: + self.rawDataReceived(extra) + return + + def handle_VER(self, params): + checkParamLen(len(params), 1, 'VER') + if params[0].upper() == "MSNFTP": + self.sendLine("USR %s %s" % (self.myUserHandle, self.auth)) + else: + log.msg('they sent the wrong version, time to quit this transfer') + self.transport.loseConnection() + + def handle_FIL(self, params): + checkParamLen(len(params), 1, 'FIL') + try: + self.fileSize = int(params[0]) + except ValueError: # they sent the wrong file size - probably want to log this + self.transport.loseConnection() + return + self.setRawMode() + self.sendLine("TFR") + + def handle_UNKNOWN(self, cmd, params): + log.msg('received unknown command (%s), params: %s' % (cmd, params)) + + def gotSegment(self, data): + """ called when a segment (block) of data arrives. """ + self.file.write(data) + +class FileSend(LineReceiver): + """ + This class provides support for sending files to other contacts. + + @ivar bytesSent: the number of bytes that have currently been sent. + @ivar completed: true if the send has completed. + @ivar connected: true if a connection has been established. + @ivar targetUser: the target user (contact). + @ivar segmentSize: the segment (block) size. + @ivar auth: the auth cookie (number) to use when sending the + transfer invitation + """ + + def __init__(self, file): + """ + @param file: A string or file object represnting the file to send. + """ + + if isinstance(file, types.StringType): + self.file = open(file, 'rb') + else: + self.file = file + + self.fileSize = 0 + self.bytesSent = 0 + self.completed = 0 + self.connected = 0 + self.targetUser = None + self.segmentSize = 2045 + self.auth = randint(0, 2**30) + self._pendingSend = None # :( + + def connectionMade(self): + self.connected = 1 + + def connectionLost(self, reason): + if self._pendingSend.active(): + self._pendingSend.cancel() + self._pendingSend = None + if self.bytesSent == self.fileSize: + self.completed = 1 + self.connected = 0 + self.file.close() + + def lineReceived(self, line): + temp = line.split() + if len(temp) == 1: + params = [] + else: + params = temp[1:] + cmd = temp[0] + handler = getattr(self, "handle_%s" % cmd.upper(), None) + if handler: + handler(params) + else: + self.handle_UNKNOWN(cmd, params) + + def handle_VER(self, params): + checkParamLen(len(params), 1, 'VER') + if params[0].upper() == "MSNFTP": + self.sendLine("VER MSNFTP") + else: # they sent some weird version during negotiation, i'm quitting. + self.transport.loseConnection() + + def handle_USR(self, params): + checkParamLen(len(params), 2, 'USR') + self.targetUser = params[0] + if self.auth == int(params[1]): + self.sendLine("FIL %s" % (self.fileSize)) + else: # they failed the auth test, disconnecting. + self.transport.loseConnection() + + def handle_TFR(self, params): + checkParamLen(len(params), 0, 'TFR') + # they are ready for me to start sending + self.sendPart() + + def handle_BYE(self, params): + self.completed = (self.bytesSent == self.fileSize) + self.transport.loseConnection() + + def handle_CCL(self, params): + self.completed = (self.bytesSent == self.fileSize) + self.transport.loseConnection() + + def handle_UNKNOWN(self, cmd, params): + log.msg('received unknown command (%s), params: %s' % (cmd, params)) + + def makeHeader(self, size): + """ make the appropriate header given a specific segment size. """ + quotient, remainder = divmod(size, 256) + return chr(0) + chr(remainder) + chr(quotient) + + def sendPart(self): + """ send a segment of data """ + if not self.connected: + self._pendingSend = None + return # may be buggy (if handle_CCL/BYE is called but self.connected is still 1) + data = self.file.read(self.segmentSize) + if data: + dataSize = len(data) + header = self.makeHeader(dataSize) + self.bytesSent += dataSize + self.transport.write(header + data) + self._pendingSend = reactor.callLater(0, self.sendPart) + else: + self._pendingSend = None + self.completed = 1 + +# mapping of error codes to error messages +errorCodes = { + + 200 : "Syntax error", + 201 : "Invalid parameter", + 205 : "Invalid user", + 206 : "Domain name missing", + 207 : "Already logged in", + 208 : "Invalid username", + 209 : "Invalid screen name", + 210 : "User list full", + 215 : "User already there", + 216 : "User already on list", + 217 : "User not online", + 218 : "Already in mode", + 219 : "User is in the opposite list", + 223 : "Too many groups", + 224 : "Invalid group", + 225 : "User not in group", + 229 : "Group name too long", + 230 : "Cannot remove group 0", + 231 : "Invalid group", + 280 : "Switchboard failed", + 281 : "Transfer to switchboard failed", + + 300 : "Required field missing", + 301 : "Too many FND responses", + 302 : "Not logged in", + + 500 : "Internal server error", + 501 : "Database server error", + 502 : "Command disabled", + 510 : "File operation failed", + 520 : "Memory allocation failed", + 540 : "Wrong CHL value sent to server", + + 600 : "Server is busy", + 601 : "Server is unavaliable", + 602 : "Peer nameserver is down", + 603 : "Database connection failed", + 604 : "Server is going down", + 605 : "Server unavailable", + + 707 : "Could not create connection", + 710 : "Invalid CVR parameters", + 711 : "Write is blocking", + 712 : "Session is overloaded", + 713 : "Too many active users", + 714 : "Too many sessions", + 715 : "Not expected", + 717 : "Bad friend file", + 731 : "Not expected", + + 800 : "Requests too rapid", + + 910 : "Server too busy", + 911 : "Authentication failed", + 912 : "Server too busy", + 913 : "Not allowed when offline", + 914 : "Server too busy", + 915 : "Server too busy", + 916 : "Server too busy", + 917 : "Server too busy", + 918 : "Server too busy", + 919 : "Server too busy", + 920 : "Not accepting new users", + 921 : "Server too busy", + 922 : "Server too busy", + 923 : "No parent consent", + 924 : "Passport account not yet verified" + +} + +# mapping of status codes to readable status format +statusCodes = { + + STATUS_ONLINE : "Online", + STATUS_OFFLINE : "Offline", + STATUS_HIDDEN : "Appear Offline", + STATUS_IDLE : "Idle", + STATUS_AWAY : "Away", + STATUS_BUSY : "Busy", + STATUS_BRB : "Be Right Back", + STATUS_PHONE : "On the Phone", + STATUS_LUNCH : "Out to Lunch" + +} + +# mapping of list ids to list codes +listIDToCode = { + + FORWARD_LIST : 'fl', + BLOCK_LIST : 'bl', + ALLOW_LIST : 'al', + REVERSE_LIST : 'rl' + +} + +# mapping of list codes to list ids +listCodeToID = {} +for id,code in listIDToCode.items(): + listCodeToID[code] = id + +del id, code + +# Mapping of class GUIDs to simple english names +guidToClassName = { + "{5D3E02AB-6190-11d3-BBBB-00C04F795683}": "file transfer", + } + +# Reverse of the above +classNameToGUID = {} +for guid, name in guidToClassName.iteritems(): + classNameToGUID[name] = guid diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/oscar.py b/vendor/Twisted-10.0.0/twisted/words/protocols/oscar.py new file mode 100644 index 000000000000..3ca4c0efe135 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/oscar.py @@ -0,0 +1,1235 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +An implementation of the OSCAR protocol, which AIM and ICQ use to communcate. + +Maintainer: Paul Swartz +""" + +import struct +import string +import socket +import random +import types +import re + +from twisted.internet import reactor, defer, protocol +from twisted.python import log +from twisted.python.hashlib import md5 + +def logPacketData(data): + lines = len(data)/16 + if lines*16 != len(data): lines=lines+1 + for i in range(lines): + d = tuple(data[16*i:16*i+16]) + hex = map(lambda x: "%02X"%ord(x),d) + text = map(lambda x: (len(repr(x))>3 and '.') or x, d) + log.msg(' '.join(hex)+ ' '*3*(16-len(d)) +''.join(text)) + log.msg('') + +def SNAC(fam,sub,id,data,flags=[0,0]): + header="!HHBBL" + head=struct.pack(header,fam,sub, + flags[0],flags[1], + id) + return head+str(data) + +def readSNAC(data): + header="!HHBBL" + head=list(struct.unpack(header,data[:10])) + return head+[data[10:]] + +def TLV(type,value): + header="!HH" + head=struct.pack(header,type,len(value)) + return head+str(value) + +def readTLVs(data,count=None): + header="!HH" + dict={} + while data and len(dict)!=count: + head=struct.unpack(header,data[:4]) + dict[head[0]]=data[4:4+head[1]] + data=data[4+head[1]:] + if not count: + return dict + return dict,data + +def encryptPasswordMD5(password,key): + m=md5() + m.update(key) + m.update(md5(password).digest()) + m.update("AOL Instant Messenger (SM)") + return m.digest() + +def encryptPasswordICQ(password): + key=[0xF3,0x26,0x81,0xC4,0x39,0x86,0xDB,0x92,0x71,0xA3,0xB9,0xE6,0x53,0x7A,0x95,0x7C] + bytes=map(ord,password) + r="" + for i in range(len(bytes)): + r=r+chr(bytes[i]^key[i%len(key)]) + return r + +def dehtml(text): + text=string.replace(text,"
                                ","\n") + text=string.replace(text,"
                                ","\n") + text=string.replace(text,"
                                ","\n") # XXX make this a regexp + text=string.replace(text,"
                                ","\n") + text=re.sub('<.*?>','',text) + text=string.replace(text,'>','>') + text=string.replace(text,'<','<') + text=string.replace(text,' ',' ') + text=string.replace(text,'"','"') + text=string.replace(text,'&','&') + return text + +def html(text): + text=string.replace(text,'"','"') + text=string.replace(text,'&','&') + text=string.replace(text,'<','<') + text=string.replace(text,'>','>') + text=string.replace(text,"\n","
                                ") + return '%s'%text + +class OSCARUser: + def __init__(self, name, warn, tlvs): + self.name = name + self.warning = warn + self.flags = [] + self.caps = [] + for k,v in tlvs.items(): + if k == 1: # user flags + v=struct.unpack('!H',v)[0] + for o, f in [(1,'trial'), + (2,'unknown bit 2'), + (4,'aol'), + (8,'unknown bit 4'), + (16,'aim'), + (32,'away'), + (1024,'activebuddy')]: + if v&o: self.flags.append(f) + elif k == 2: # member since date + self.memberSince = struct.unpack('!L',v)[0] + elif k == 3: # on-since + self.onSince = struct.unpack('!L',v)[0] + elif k == 4: # idle time + self.idleTime = struct.unpack('!H',v)[0] + elif k == 5: # unknown + pass + elif k == 6: # icq online status + if v[2] == '\x00': + self.icqStatus = 'online' + elif v[2] == '\x01': + self.icqStatus = 'away' + elif v[2] == '\x02': + self.icqStatus = 'dnd' + elif v[2] == '\x04': + self.icqStatus = 'out' + elif v[2] == '\x10': + self.icqStatus = 'busy' + else: + self.icqStatus = 'unknown' + elif k == 10: # icq ip address + self.icqIPaddy = socket.inet_ntoa(v) + elif k == 12: # icq random stuff + self.icqRandom = v + elif k == 13: # capabilities + caps=[] + while v: + c=v[:16] + if c==CAP_ICON: caps.append("icon") + elif c==CAP_IMAGE: caps.append("image") + elif c==CAP_VOICE: caps.append("voice") + elif c==CAP_CHAT: caps.append("chat") + elif c==CAP_GET_FILE: caps.append("getfile") + elif c==CAP_SEND_FILE: caps.append("sendfile") + elif c==CAP_SEND_LIST: caps.append("sendlist") + elif c==CAP_GAMES: caps.append("games") + else: caps.append(("unknown",c)) + v=v[16:] + caps.sort() + self.caps=caps + elif k == 14: pass + elif k == 15: # session length (aim) + self.sessionLength = struct.unpack('!L',v)[0] + elif k == 16: # session length (aol) + self.sessionLength = struct.unpack('!L',v)[0] + elif k == 30: # no idea + pass + else: + log.msg("unknown tlv for user %s\nt: %s\nv: %s"%(self.name,k,repr(v))) + + def __str__(self): + s = '' + return s + + +class SSIGroup: + def __init__(self, name, tlvs = {}): + self.name = name + #self.tlvs = [] + #self.userIDs = [] + self.usersToID = {} + self.users = [] + #if not tlvs.has_key(0xC8): return + #buddyIDs = tlvs[0xC8] + #while buddyIDs: + # bid = struct.unpack('!H',buddyIDs[:2])[0] + # buddyIDs = buddyIDs[2:] + # self.users.append(bid) + + def findIDFor(self, user): + return self.usersToID[user] + + def addUser(self, buddyID, user): + self.usersToID[user] = buddyID + self.users.append(user) + user.group = self + + def oscarRep(self, groupID, buddyID): + tlvData = TLV(0xc8, reduce(lambda x,y:x+y, [struct.pack('!H',self.usersToID[x]) for x in self.users])) + return struct.pack('!H', len(self.name)) + self.name + \ + struct.pack('!HH', groupID, buddyID) + '\000\001' + tlvData + + +class SSIBuddy: + def __init__(self, name, tlvs = {}): + self.name = name + self.tlvs = tlvs + for k,v in tlvs.items(): + if k == 0x013c: # buddy comment + self.buddyComment = v + elif k == 0x013d: # buddy alerts + actionFlag = ord(v[0]) + whenFlag = ord(v[1]) + self.alertActions = [] + self.alertWhen = [] + if actionFlag&1: + self.alertActions.append('popup') + if actionFlag&2: + self.alertActions.append('sound') + if whenFlag&1: + self.alertWhen.append('online') + if whenFlag&2: + self.alertWhen.append('unidle') + if whenFlag&4: + self.alertWhen.append('unaway') + elif k == 0x013e: + self.alertSound = v + + def oscarRep(self, groupID, buddyID): + tlvData = reduce(lambda x,y: x+y, map(lambda (k,v):TLV(k,v), self.tlvs.items()), '\000\000') + return struct.pack('!H', len(self.name)) + self.name + \ + struct.pack('!HH', groupID, buddyID) + '\000\000' + tlvData + + +class OscarConnection(protocol.Protocol): + def connectionMade(self): + self.state="" + self.seqnum=0 + self.buf='' + self.stopKeepAliveID = None + self.setKeepAlive(4*60) # 4 minutes + + def connectionLost(self, reason): + log.msg("Connection Lost! %s" % self) + self.stopKeepAlive() + +# def connectionFailed(self): +# log.msg("Connection Failed! %s" % self) +# self.stopKeepAlive() + + def sendFLAP(self,data,channel = 0x02): + header="!cBHH" + self.seqnum=(self.seqnum+1)%0xFFFF + seqnum=self.seqnum + head=struct.pack(header,'*', channel, + seqnum, len(data)) + self.transport.write(head+str(data)) +# if isinstance(self, ChatService): +# logPacketData(head+str(data)) + + def readFlap(self): + header="!cBHH" + if len(self.buf)<6: return + flap=struct.unpack(header,self.buf[:6]) + if len(self.buf)<6+flap[3]: return + data,self.buf=self.buf[6:6+flap[3]],self.buf[6+flap[3]:] + return [flap[1],data] + + def dataReceived(self,data): +# if isinstance(self, ChatService): +# logPacketData(data) + self.buf=self.buf+data + flap=self.readFlap() + while flap: + func=getattr(self,"oscar_%s"%self.state,None) + if not func: + log.msg("no func for state: %s" % self.state) + state=func(flap) + if state: + self.state=state + flap=self.readFlap() + + def setKeepAlive(self,t): + self.keepAliveDelay=t + self.stopKeepAlive() + self.stopKeepAliveID = reactor.callLater(t, self.sendKeepAlive) + + def sendKeepAlive(self): + self.sendFLAP("",0x05) + self.stopKeepAliveID = reactor.callLater(self.keepAliveDelay, self.sendKeepAlive) + + def stopKeepAlive(self): + if self.stopKeepAliveID: + self.stopKeepAliveID.cancel() + self.stopKeepAliveID = None + + def disconnect(self): + """ + send the disconnect flap, and sever the connection + """ + self.sendFLAP('', 0x04) + def f(reason): pass + self.connectionLost = f + self.transport.loseConnection() + + +class SNACBased(OscarConnection): + snacFamilies = { + # family : (version, toolID, toolVersion) + } + def __init__(self,cookie): + self.cookie=cookie + self.lastID=0 + self.supportedFamilies = () + self.requestCallbacks={} # request id:Deferred + + def sendSNAC(self,fam,sub,data,flags=[0,0]): + """ + send a snac and wait for the response by returning a Deferred. + """ + reqid=self.lastID + self.lastID=reqid+1 + d = defer.Deferred() + d.reqid = reqid + + #d.addErrback(self._ebDeferredError,fam,sub,data) # XXX for testing + + self.requestCallbacks[reqid] = d + self.sendFLAP(SNAC(fam,sub,reqid,data)) + return d + + def _ebDeferredError(self, error, fam, sub, data): + log.msg('ERROR IN DEFERRED %s' % error) + log.msg('on sending of message, family 0x%02x, subtype 0x%02x' % (fam, sub)) + log.msg('data: %s' % repr(data)) + + def sendSNACnr(self,fam,sub,data,flags=[0,0]): + """ + send a snac, but don't bother adding a deferred, we don't care. + """ + self.sendFLAP(SNAC(fam,sub,0x10000*fam+sub,data)) + + def oscar_(self,data): + self.sendFLAP("\000\000\000\001"+TLV(6,self.cookie), 0x01) + return "Data" + + def oscar_Data(self,data): + snac=readSNAC(data[1]) + if self.requestCallbacks.has_key(snac[4]): + d = self.requestCallbacks[snac[4]] + del self.requestCallbacks[snac[4]] + if snac[1]!=1: + d.callback(snac) + else: + d.errback(snac) + return + func=getattr(self,'oscar_%02X_%02X'%(snac[0],snac[1]),None) + if not func: + self.oscar_unknown(snac) + else: + func(snac[2:]) + return "Data" + + def oscar_unknown(self,snac): + log.msg("unknown for %s" % self) + log.msg(snac) + + + def oscar_01_03(self, snac): + numFamilies = len(snac[3])/2 + self.supportedFamilies = struct.unpack("!"+str(numFamilies)+'H', snac[3]) + d = '' + for fam in self.supportedFamilies: + if self.snacFamilies.has_key(fam): + d=d+struct.pack('!2H',fam,self.snacFamilies[fam][0]) + self.sendSNACnr(0x01,0x17, d) + + def oscar_01_0A(self,snac): + """ + change of rate information. + """ + # this can be parsed, maybe we can even work it in + pass + + def oscar_01_18(self,snac): + """ + host versions, in the same format as we sent + """ + self.sendSNACnr(0x01,0x06,"") #pass + + def clientReady(self): + """ + called when the client is ready to be online + """ + d = '' + for fam in self.supportedFamilies: + if self.snacFamilies.has_key(fam): + version, toolID, toolVersion = self.snacFamilies[fam] + d = d + struct.pack('!4H',fam,version,toolID,toolVersion) + self.sendSNACnr(0x01,0x02,d) + +class BOSConnection(SNACBased): + snacFamilies = { + 0x01:(3, 0x0110, 0x059b), + 0x13:(3, 0x0110, 0x059b), + 0x02:(1, 0x0110, 0x059b), + 0x03:(1, 0x0110, 0x059b), + 0x04:(1, 0x0110, 0x059b), + 0x06:(1, 0x0110, 0x059b), + 0x08:(1, 0x0104, 0x0001), + 0x09:(1, 0x0110, 0x059b), + 0x0a:(1, 0x0110, 0x059b), + 0x0b:(1, 0x0104, 0x0001), + 0x0c:(1, 0x0104, 0x0001) + } + + capabilities = None + + def __init__(self,username,cookie): + SNACBased.__init__(self,cookie) + self.username=username + self.profile = None + self.awayMessage = None + self.services = {} + + if not self.capabilities: + self.capabilities = [CAP_CHAT] + + def parseUser(self,data,count=None): + l=ord(data[0]) + name=data[1:1+l] + warn,foo=struct.unpack("!HH",data[1+l:5+l]) + warn=int(warn/10) + tlvs=data[5+l:] + if count: + tlvs,rest = readTLVs(tlvs,foo) + else: + tlvs,rest = readTLVs(tlvs), None + u = OSCARUser(name, warn, tlvs) + if rest == None: + return u + else: + return u, rest + + def oscar_01_05(self, snac, d = None): + """ + data for a new service connection + d might be a deferred to be called back when the service is ready + """ + tlvs = readTLVs(snac[3][2:]) + service = struct.unpack('!H',tlvs[0x0d])[0] + ip = tlvs[5] + cookie = tlvs[6] + #c = serviceClasses[service](self, cookie, d) + c = protocol.ClientCreator(reactor, serviceClasses[service], self, cookie, d) + def addService(x): + self.services[service] = x + c.connectTCP(ip, 5190).addCallback(addService) + #self.services[service] = c + + def oscar_01_07(self,snac): + """ + rate paramaters + """ + self.sendSNACnr(0x01,0x08,"\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05") # ack + self.initDone() + self.sendSNACnr(0x13,0x02,'') # SSI rights info + self.sendSNACnr(0x02,0x02,'') # location rights info + self.sendSNACnr(0x03,0x02,'') # buddy list rights + self.sendSNACnr(0x04,0x04,'') # ICBM parms + self.sendSNACnr(0x09,0x02,'') # BOS rights + + def oscar_01_10(self,snac): + """ + we've been warned + """ + skip = struct.unpack('!H',snac[3][:2])[0] + newLevel = struct.unpack('!H',snac[3][2+skip:4+skip])[0]/10 + if len(snac[3])>4+skip: + by = self.parseUser(snac[3][4+skip:]) + else: + by = None + self.receiveWarning(newLevel, by) + + def oscar_01_13(self,snac): + """ + MOTD + """ + pass # we don't care for now + + def oscar_02_03(self, snac): + """ + location rights response + """ + tlvs = readTLVs(snac[3]) + self.maxProfileLength = tlvs[1] + + def oscar_03_03(self, snac): + """ + buddy list rights response + """ + tlvs = readTLVs(snac[3]) + self.maxBuddies = tlvs[1] + self.maxWatchers = tlvs[2] + + def oscar_03_0B(self, snac): + """ + buddy update + """ + self.updateBuddy(self.parseUser(snac[3])) + + def oscar_03_0C(self, snac): + """ + buddy offline + """ + self.offlineBuddy(self.parseUser(snac[3])) + +# def oscar_04_03(self, snac): + + def oscar_04_05(self, snac): + """ + ICBM parms response + """ + self.sendSNACnr(0x04,0x02,'\x00\x00\x00\x00\x00\x0b\x1f@\x03\xe7\x03\xe7\x00\x00\x00\x00') # IM rights + + def oscar_04_07(self, snac): + """ + ICBM message (instant message) + """ + data = snac[3] + cookie, data = data[:8], data[8:] + channel = struct.unpack('!H',data[:2])[0] + data = data[2:] + user, data = self.parseUser(data, 1) + tlvs = readTLVs(data) + if channel == 1: # message + flags = [] + multiparts = [] + for k, v in tlvs.items(): + if k == 2: + while v: + v = v[2:] # skip bad data + messageLength, charSet, charSubSet = struct.unpack('!3H', v[:6]) + messageLength -= 4 + message = [v[6:6+messageLength]] + if charSet == 0: + pass # don't add anything special + elif charSet == 2: + message.append('unicode') + elif charSet == 3: + message.append('iso-8859-1') + elif charSet == 0xffff: + message.append('none') + if charSubSet == 0xb: + message.append('macintosh') + if messageLength > 0: multiparts.append(tuple(message)) + v = v[6+messageLength:] + elif k == 3: + flags.append('acknowledge') + elif k == 4: + flags.append('auto') + elif k == 6: + flags.append('offline') + elif k == 8: + iconLength, foo, iconSum, iconStamp = struct.unpack('!LHHL',v) + if iconLength: + flags.append('icon') + flags.append((iconLength, iconSum, iconStamp)) + elif k == 9: + flags.append('buddyrequest') + elif k == 0xb: # unknown + pass + elif k == 0x17: + flags.append('extradata') + flags.append(v) + else: + log.msg('unknown TLV for incoming IM, %04x, %s' % (k,repr(v))) + +# unknown tlv for user SNewdorf +# t: 29 +# v: '\x00\x00\x00\x05\x02\x01\xd2\x04r\x00\x01\x01\x10/\x8c\x8b\x8a\x1e\x94*\xbc\x80}\x8d\xc4;\x1dEM' +# XXX what is this? + self.receiveMessage(user, multiparts, flags) + elif channel == 2: # rondevouz + status = struct.unpack('!H',tlvs[5][:2])[0] + requestClass = tlvs[5][10:26] + moreTLVs = readTLVs(tlvs[5][26:]) + if requestClass == CAP_CHAT: # a chat request + exchange = struct.unpack('!H',moreTLVs[10001][:2])[0] + name = moreTLVs[10001][3:-2] + instance = struct.unpack('!H',moreTLVs[10001][-2:])[0] + if not self.services.has_key(SERVICE_CHATNAV): + self.connectService(SERVICE_CHATNAV,1).addCallback(lambda x: self.services[SERVICE_CHATNAV].getChatInfo(exchange, name, instance).\ + addCallback(self._cbGetChatInfoForInvite, user, moreTLVs[12])) + else: + self.services[SERVICE_CHATNAV].getChatInfo(exchange, name, instance).\ + addCallback(self._cbGetChatInfoForInvite, user, moreTLVs[12]) + elif requestClass == CAP_SEND_FILE: + if moreTLVs.has_key(11): # cancel + log.msg('cancelled file request') + log.msg(status) + return # handle this later + name = moreTLVs[10001][9:-7] + desc = moreTLVs[12] + log.msg('file request from %s, %s, %s' % (user, name, desc)) + self.receiveSendFileRequest(user, name, desc, cookie) + else: + log.msg('unsupported rondevouz: %s' % requestClass) + log.msg(repr(moreTLVs)) + else: + log.msg('unknown channel %02x' % channel) + log.msg(tlvs) + + def _cbGetChatInfoForInvite(self, info, user, message): + apply(self.receiveChatInvite, (user,message)+info) + + def oscar_09_03(self, snac): + """ + BOS rights response + """ + tlvs = readTLVs(snac[3]) + self.maxPermitList = tlvs[1] + self.maxDenyList = tlvs[2] + + def oscar_0B_02(self, snac): + """ + stats reporting interval + """ + self.reportingInterval = struct.unpack('!H',snac[3][:2])[0] + + def oscar_13_03(self, snac): + """ + SSI rights response + """ + #tlvs = readTLVs(snac[3]) + pass # we don't know how to parse this + + # methods to be called by the client, and their support methods + def requestSelfInfo(self): + """ + ask for the OSCARUser for ourselves + """ + d = defer.Deferred() + self.sendSNAC(0x01, 0x0E, '').addCallback(self._cbRequestSelfInfo, d) + return d + + def _cbRequestSelfInfo(self, snac, d): + d.callback(self.parseUser(snac[5])) + + def initSSI(self): + """ + this sends the rate request for family 0x13 (Server Side Information) + so we can then use it + """ + return self.sendSNAC(0x13, 0x02, '').addCallback(self._cbInitSSI) + + def _cbInitSSI(self, snac, d): + return {} # don't even bother parsing this + + def requestSSI(self, timestamp = 0, revision = 0): + """ + request the server side information + if the deferred gets None, it means the SSI is the same + """ + return self.sendSNAC(0x13, 0x05, + struct.pack('!LH',timestamp,revision)).addCallback(self._cbRequestSSI) + + def _cbRequestSSI(self, snac, args = ()): + if snac[1] == 0x0f: # same SSI as we have + return + itemdata = snac[5][3:] + if args: + revision, groups, permit, deny, permitMode, visibility = args + else: + version, revision = struct.unpack('!BH', snac[5][:3]) + groups = {} + permit = [] + deny = [] + permitMode = None + visibility = None + while len(itemdata)>4: + nameLength = struct.unpack('!H', itemdata[:2])[0] + name = itemdata[2:2+nameLength] + groupID, buddyID, itemType, restLength = \ + struct.unpack('!4H', itemdata[2+nameLength:10+nameLength]) + tlvs = readTLVs(itemdata[10+nameLength:10+nameLength+restLength]) + itemdata = itemdata[10+nameLength+restLength:] + if itemType == 0: # buddies + groups[groupID].addUser(buddyID, SSIBuddy(name, tlvs)) + elif itemType == 1: # group + g = SSIGroup(name, tlvs) + if groups.has_key(0): groups[0].addUser(groupID, g) + groups[groupID] = g + elif itemType == 2: # permit + permit.append(name) + elif itemType == 3: # deny + deny.append(name) + elif itemType == 4: # permit deny info + if not tlvs.has_key(0xcb): + continue # this happens with ICQ + permitMode = {1:'permitall',2:'denyall',3:'permitsome',4:'denysome',5:'permitbuddies'}[ord(tlvs[0xca])] + visibility = {'\xff\xff\xff\xff':'all','\x00\x00\x00\x04':'notaim'}[tlvs[0xcb]] + elif itemType == 5: # unknown (perhaps idle data)? + pass + else: + log.msg('%s %s %s %s %s' % (name, groupID, buddyID, itemType, tlvs)) + timestamp = struct.unpack('!L',itemdata)[0] + if not timestamp: # we've got more packets coming + # which means add some deferred stuff + d = defer.Deferred() + self.requestCallbacks[snac[4]] = d + d.addCallback(self._cbRequestSSI, (revision, groups, permit, deny, permitMode, visibility)) + return d + return (groups[0].users,permit,deny,permitMode,visibility,timestamp,revision) + + def activateSSI(self): + """ + active the data stored on the server (use buddy list, permit deny settings, etc.) + """ + self.sendSNACnr(0x13,0x07,'') + + def startModifySSI(self): + """ + tell the OSCAR server to be on the lookout for SSI modifications + """ + self.sendSNACnr(0x13,0x11,'') + + def addItemSSI(self, item, groupID = None, buddyID = None): + """ + add an item to the SSI server. if buddyID == 0, then this should be a group. + this gets a callback when it's finished, but you can probably ignore it. + """ + if groupID is None: + if isinstance(item, SSIGroup): + groupID = 0 + else: + groupID = item.group.group.findIDFor(item.group) + if buddyID is None: + buddyID = item.group.findIDFor(item) + return self.sendSNAC(0x13,0x08, item.oscarRep(groupID, buddyID)) + + def modifyItemSSI(self, item, groupID = None, buddyID = None): + if groupID is None: + if isinstance(item, SSIGroup): + groupID = 0 + else: + groupID = item.group.group.findIDFor(item.group) + if buddyID is None: + buddyID = item.group.findIDFor(item) + return self.sendSNAC(0x13,0x09, item.oscarRep(groupID, buddyID)) + + def delItemSSI(self, item, groupID = None, buddyID = None): + if groupID is None: + if isinstance(item, SSIGroup): + groupID = 0 + else: + groupID = item.group.group.findIDFor(item.group) + if buddyID is None: + buddyID = item.group.findIDFor(item) + return self.sendSNAC(0x13,0x0A, item.oscarRep(groupID, buddyID)) + + def endModifySSI(self): + self.sendSNACnr(0x13,0x12,'') + + def setProfile(self, profile): + """ + set the profile. + send None to not set a profile (different from '' for a blank one) + """ + self.profile = profile + tlvs = '' + if self.profile is not None: + tlvs = TLV(1,'text/aolrtf; charset="us-ascii"') + \ + TLV(2,self.profile) + + tlvs = tlvs + TLV(5, ''.join(self.capabilities)) + self.sendSNACnr(0x02, 0x04, tlvs) + + def setAway(self, away = None): + """ + set the away message, or return (if away == None) + """ + self.awayMessage = away + tlvs = TLV(3,'text/aolrtf; charset="us-ascii"') + \ + TLV(4,away or '') + self.sendSNACnr(0x02, 0x04, tlvs) + + def setIdleTime(self, idleTime): + """ + set our idle time. don't call more than once with a non-0 idle time. + """ + self.sendSNACnr(0x01, 0x11, struct.pack('!L',idleTime)) + + def sendMessage(self, user, message, wantAck = 0, autoResponse = 0, offline = 0 ): \ + #haveIcon = 0, ): + """ + send a message to user (not an OSCARUseR). + message can be a string, or a multipart tuple. + if wantAck, we return a Deferred that gets a callback when the message is sent. + if autoResponse, this message is an autoResponse, as if from an away message. + if offline, this is an offline message (ICQ only, I think) + """ + data = ''.join([chr(random.randrange(0, 127)) for i in range(8)]) # cookie + data = data + '\x00\x01' + chr(len(user)) + user + if not type(message) in (types.TupleType, types.ListType): + message = [[message,]] + if type(message[0][0]) == types.UnicodeType: + message[0].append('unicode') + messageData = '' + for part in message: + charSet = 0 + if 'unicode' in part[1:]: + charSet = 2 + part[0] = part[0].encode('utf-8') + elif 'iso-8859-1' in part[1:]: + charSet = 3 + part[0] = part[0].encode('iso-8859-1') + elif 'none' in part[1:]: + charSet = 0xffff + if 'macintosh' in part[1:]: + charSubSet = 0xb + else: + charSubSet = 0 + messageData = messageData + '\x01\x01' + \ + struct.pack('!3H',len(part[0])+4,charSet,charSubSet) + messageData = messageData + part[0] + data = data + TLV(2, '\x05\x01\x00\x03\x01\x01\x02'+messageData) + if wantAck: + data = data + TLV(3,'') + if autoResponse: + data = data + TLV(4,'') + if offline: + data = data + TLV(6,'') + if wantAck: + return self.sendSNAC(0x04, 0x06, data).addCallback(self._cbSendMessageAck, user, message) + self.sendSNACnr(0x04, 0x06, data) + + def _cbSendMessageAck(self, snac, user, message): + return user, message + + def connectService(self, service, wantCallback = 0, extraData = ''): + """ + connect to another service + if wantCallback, we return a Deferred that gets called back when the service is online. + if extraData, append that to our request. + """ + if wantCallback: + d = defer.Deferred() + self.sendSNAC(0x01,0x04,struct.pack('!H',service) + extraData).addCallback(self._cbConnectService, d) + return d + else: + self.sendSNACnr(0x01,0x04,struct.pack('!H',service)) + + def _cbConnectService(self, snac, d): + self.oscar_01_05(snac[2:], d) + + def createChat(self, shortName): + """ + create a chat room + """ + if self.services.has_key(SERVICE_CHATNAV): + return self.services[SERVICE_CHATNAV].createChat(shortName) + else: + return self.connectService(SERVICE_CHATNAV,1).addCallback(lambda s: s.createChat(shortName)) + + + def joinChat(self, exchange, fullName, instance): + """ + join a chat room + """ + #d = defer.Deferred() + return self.connectService(0x0e, 1, TLV(0x01, struct.pack('!HB',exchange, len(fullName)) + fullName + + struct.pack('!H', instance))).addCallback(self._cbJoinChat) #, d) + #return d + + def _cbJoinChat(self, chat): + del self.services[SERVICE_CHAT] + return chat + + def warnUser(self, user, anon = 0): + return self.sendSNAC(0x04, 0x08, '\x00'+chr(anon)+chr(len(user))+user).addCallback(self._cbWarnUser) + + def _cbWarnUser(self, snac): + oldLevel, newLevel = struct.unpack('!2H', snac[5]) + return oldLevel, newLevel + + def getInfo(self, user): + #if user. + return self.sendSNAC(0x02, 0x05, '\x00\x01'+chr(len(user))+user).addCallback(self._cbGetInfo) + + def _cbGetInfo(self, snac): + user, rest = self.parseUser(snac[5],1) + tlvs = readTLVs(rest) + return tlvs.get(0x02,None) + + def getAway(self, user): + return self.sendSNAC(0x02, 0x05, '\x00\x03'+chr(len(user))+user).addCallback(self._cbGetAway) + + def _cbGetAway(self, snac): + user, rest = self.parseUser(snac[5],1) + tlvs = readTLVs(rest) + return tlvs.get(0x04,None) # return None if there is no away message + + #def acceptSendFileRequest(self, + + # methods to be overriden by the client + def initDone(self): + """ + called when we get the rate information, which means we should do other init. stuff. + """ + log.msg('%s initDone' % self) + pass + + def updateBuddy(self, user): + """ + called when a buddy changes status, with the OSCARUser for that buddy. + """ + log.msg('%s updateBuddy %s' % (self, user)) + pass + + def offlineBuddy(self, user): + """ + called when a buddy goes offline + """ + log.msg('%s offlineBuddy %s' % (self, user)) + pass + + def receiveMessage(self, user, multiparts, flags): + """ + called when someone sends us a message + """ + pass + + def receiveWarning(self, newLevel, user): + """ + called when someone warns us. + user is either None (if it was anonymous) or an OSCARUser + """ + pass + + def receiveChatInvite(self, user, message, exchange, fullName, instance, shortName, inviteTime): + """ + called when someone invites us to a chat room + """ + pass + + def chatReceiveMessage(self, chat, user, message): + """ + called when someone in a chatroom sends us a message in the chat + """ + pass + + def chatMemberJoined(self, chat, member): + """ + called when a member joins the chat + """ + pass + + def chatMemberLeft(self, chat, member): + """ + called when a member leaves the chat + """ + pass + + def receiveSendFileRequest(self, user, file, description, cookie): + """ + called when someone tries to send a file to us + """ + pass + +class OSCARService(SNACBased): + def __init__(self, bos, cookie, d = None): + SNACBased.__init__(self, cookie) + self.bos = bos + self.d = d + + def connectionLost(self, reason): + for k,v in self.bos.services.items(): + if v == self: + del self.bos.services[k] + return + + def clientReady(self): + SNACBased.clientReady(self) + if self.d: + self.d.callback(self) + self.d = None + +class ChatNavService(OSCARService): + snacFamilies = { + 0x01:(3, 0x0010, 0x059b), + 0x0d:(1, 0x0010, 0x059b) + } + def oscar_01_07(self, snac): + # rate info + self.sendSNACnr(0x01, 0x08, '\000\001\000\002\000\003\000\004\000\005') + self.sendSNACnr(0x0d, 0x02, '') + + def oscar_0D_09(self, snac): + self.clientReady() + + def getChatInfo(self, exchange, name, instance): + d = defer.Deferred() + self.sendSNAC(0x0d,0x04,struct.pack('!HB',exchange,len(name)) + \ + name + struct.pack('!HB',instance,2)). \ + addCallback(self._cbGetChatInfo, d) + return d + + def _cbGetChatInfo(self, snac, d): + data = snac[5][4:] + exchange, length = struct.unpack('!HB',data[:3]) + fullName = data[3:3+length] + instance = struct.unpack('!H',data[3+length:5+length])[0] + tlvs = readTLVs(data[8+length:]) + shortName = tlvs[0x6a] + inviteTime = struct.unpack('!L',tlvs[0xca])[0] + info = (exchange,fullName,instance,shortName,inviteTime) + d.callback(info) + + def createChat(self, shortName): + #d = defer.Deferred() + data = '\x00\x04\x06create\xff\xff\x01\x00\x03' + data = data + TLV(0xd7, 'en') + data = data + TLV(0xd6, 'us-ascii') + data = data + TLV(0xd3, shortName) + return self.sendSNAC(0x0d, 0x08, data).addCallback(self._cbCreateChat) + #return d + + def _cbCreateChat(self, snac): #d): + exchange, length = struct.unpack('!HB',snac[5][4:7]) + fullName = snac[5][7:7+length] + instance = struct.unpack('!H',snac[5][7+length:9+length])[0] + #d.callback((exchange, fullName, instance)) + return exchange, fullName, instance + +class ChatService(OSCARService): + snacFamilies = { + 0x01:(3, 0x0010, 0x059b), + 0x0E:(1, 0x0010, 0x059b) + } + def __init__(self,bos,cookie, d = None): + OSCARService.__init__(self,bos,cookie,d) + self.exchange = None + self.fullName = None + self.instance = None + self.name = None + self.members = None + + clientReady = SNACBased.clientReady # we'll do our own callback + + def oscar_01_07(self,snac): + self.sendSNAC(0x01,0x08,"\000\001\000\002\000\003\000\004\000\005") + self.clientReady() + + def oscar_0E_02(self, snac): +# try: # this is EVIL +# data = snac[3][4:] +# self.exchange, length = struct.unpack('!HB',data[:3]) +# self.fullName = data[3:3+length] +# self.instance = struct.unpack('!H',data[3+length:5+length])[0] +# tlvs = readTLVs(data[8+length:]) +# self.name = tlvs[0xd3] +# self.d.callback(self) +# except KeyError: + data = snac[3] + self.exchange, length = struct.unpack('!HB',data[:3]) + self.fullName = data[3:3+length] + self.instance = struct.unpack('!H',data[3+length:5+length])[0] + tlvs = readTLVs(data[8+length:]) + self.name = tlvs[0xd3] + self.d.callback(self) + + def oscar_0E_03(self,snac): + users=[] + rest=snac[3] + while rest: + user, rest = self.bos.parseUser(rest, 1) + users.append(user) + if not self.fullName: + self.members = users + else: + self.members.append(users[0]) + self.bos.chatMemberJoined(self,users[0]) + + def oscar_0E_04(self,snac): + user=self.bos.parseUser(snac[3]) + for u in self.members: + if u.name == user.name: # same person! + self.members.remove(u) + self.bos.chatMemberLeft(self,user) + + def oscar_0E_06(self,snac): + data = snac[3] + user,rest=self.bos.parseUser(snac[3][14:],1) + tlvs = readTLVs(rest[8:]) + message=tlvs[1] + self.bos.chatReceiveMessage(self,user,message) + + def sendMessage(self,message): + tlvs=TLV(0x02,"us-ascii")+TLV(0x03,"en")+TLV(0x01,message) + self.sendSNAC(0x0e,0x05, + "\x46\x30\x38\x30\x44\x00\x63\x00\x00\x03\x00\x01\x00\x00\x00\x06\x00\x00\x00\x05"+ + struct.pack("!H",len(tlvs))+ + tlvs) + + def leaveChat(self): + self.disconnect() + +class OscarAuthenticator(OscarConnection): + BOSClass = BOSConnection + def __init__(self,username,password,deferred=None,icq=0): + self.username=username + self.password=password + self.deferred=deferred + self.icq=icq # icq mode is disabled + #if icq and self.BOSClass==BOSConnection: + # self.BOSClass=ICQConnection + + def oscar_(self,flap): + if not self.icq: + self.sendFLAP("\000\000\000\001", 0x01) + self.sendFLAP(SNAC(0x17,0x06,0, + TLV(TLV_USERNAME,self.username)+ + TLV(0x004B,''))) + self.state="Key" + else: + encpass=encryptPasswordICQ(self.password) + self.sendFLAP('\000\000\000\001'+ + TLV(0x01,self.username)+ + TLV(0x02,encpass)+ + TLV(0x03,'ICQ Inc. - Product of ICQ (TM).2001b.5.18.1.3659.85')+ + TLV(0x16,"\x01\x0a")+ + TLV(0x17,"\x00\x05")+ + TLV(0x18,"\x00\x12")+ + TLV(0x19,"\000\001")+ + TLV(0x1a,"\x0eK")+ + TLV(0x14,"\x00\x00\x00U")+ + TLV(0x0f,"en")+ + TLV(0x0e,"us"),0x01) + self.state="Cookie" + + def oscar_Key(self,data): + snac=readSNAC(data[1]) + key=snac[5][2:] + encpass=encryptPasswordMD5(self.password,key) + self.sendFLAP(SNAC(0x17,0x02,0, + TLV(TLV_USERNAME,self.username)+ + TLV(TLV_PASSWORD,encpass)+ + TLV(0x004C, '')+ # unknown + TLV(TLV_CLIENTNAME,"AOL Instant Messenger (SM), version 4.8.2790/WIN32")+ + TLV(0x0016,"\x01\x09")+ + TLV(TLV_CLIENTMAJOR,"\000\004")+ + TLV(TLV_CLIENTMINOR,"\000\010")+ + TLV(0x0019,"\000\000")+ + TLV(TLV_CLIENTSUB,"\x0A\xE6")+ + TLV(0x0014,"\x00\x00\x00\xBB")+ + TLV(TLV_LANG,"en")+ + TLV(TLV_COUNTRY,"us")+ + TLV(TLV_USESSI,"\001"))) + return "Cookie" + + def oscar_Cookie(self,data): + snac=readSNAC(data[1]) + if self.icq: + i=snac[5].find("\000") + snac[5]=snac[5][i:] + tlvs=readTLVs(snac[5]) + if tlvs.has_key(6): + self.cookie=tlvs[6] + server,port=string.split(tlvs[5],":") + d = self.connectToBOS(server, int(port)) + d.addErrback(lambda x: log.msg("Connection Failed! Reason: %s" % x)) + if self.deferred: + d.chainDeferred(self.deferred) + self.disconnect() + elif tlvs.has_key(8): + errorcode=tlvs[8] + errorurl=tlvs[4] + if errorcode=='\000\030': + error="You are attempting to sign on again too soon. Please try again later." + elif errorcode=='\000\005': + error="Invalid Username or Password." + else: error=repr(errorcode) + self.error(error,errorurl) + else: + log.msg('hmm, weird tlvs for %s cookie packet' % str(self)) + log.msg(tlvs) + log.msg('snac') + log.msg(str(snac)) + return "None" + + def oscar_None(self,data): pass + + def connectToBOS(self, server, port): + c = protocol.ClientCreator(reactor, self.BOSClass, self.username, self.cookie) + return c.connectTCP(server, int(port)) + + def error(self,error,url): + log.msg("ERROR! %s %s" % (error,url)) + if self.deferred: self.deferred.errback((error,url)) + self.transport.loseConnection() + +FLAP_CHANNEL_NEW_CONNECTION = 0x01 +FLAP_CHANNEL_DATA = 0x02 +FLAP_CHANNEL_ERROR = 0x03 +FLAP_CHANNEL_CLOSE_CONNECTION = 0x04 + +SERVICE_CHATNAV = 0x0d +SERVICE_CHAT = 0x0e +serviceClasses = { + SERVICE_CHATNAV:ChatNavService, + SERVICE_CHAT:ChatService +} +TLV_USERNAME = 0x0001 +TLV_CLIENTNAME = 0x0003 +TLV_COUNTRY = 0x000E +TLV_LANG = 0x000F +TLV_CLIENTMAJOR = 0x0017 +TLV_CLIENTMINOR = 0x0018 +TLV_CLIENTSUB = 0x001A +TLV_PASSWORD = 0x0025 +TLV_USESSI = 0x004A + +CAP_ICON = '\011F\023FL\177\021\321\202"DEST\000\000' +CAP_VOICE = '\011F\023AL\177\021\321\202"DEST\000\000' +CAP_IMAGE = '\011F\023EL\177\021\321\202"DEST\000\000' +CAP_CHAT = 't\217$ b\207\021\321\202"DEST\000\000' +CAP_GET_FILE = '\011F\023HL\177\021\321\202"DEST\000\000' +CAP_SEND_FILE = '\011F\023CL\177\021\321\202"DEST\000\000' +CAP_GAMES = '\011F\023GL\177\021\321\202"DEST\000\000' +CAP_SEND_LIST = '\011F\023KL\177\021\321\202"DEST\000\000' +CAP_SERV_REL = '\011F\023IL\177\021\321\202"DEST\000\000' diff --git a/vendor/Twisted-10.0.0/twisted/words/protocols/toc.py b/vendor/Twisted-10.0.0/twisted/words/protocols/toc.py new file mode 100644 index 000000000000..4612e835c8f7 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/protocols/toc.py @@ -0,0 +1,1622 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Implements a AOL Instant Messenger TOC server and client, using the Twisted +framework. + +TODO: +info,dir: see how gaim connects for this...it may never work if it tries to +connect to the aim server automatically + +This module is deprecated. + +Maintainer: Paul Swartz +""" + +import warnings +warnings.warn( + "twisted.words.protocols.toc is deprecated since Twisted 9.0. " + "Use twisted.words.protocols.oscar instead.", + category=DeprecationWarning, + stacklevel=2) + + +# twisted imports +from twisted.internet import reactor, protocol +from twisted.python import log + +# base imports +import struct +import string +import time +import base64 +import os +import StringIO + +SIGNON,DATA,ERROR,SIGNOFF,KEEP_ALIVE=range(1,6) +PERMITALL,DENYALL,PERMITSOME,DENYSOME=range(1,5) + +DUMMY_CHECKSUM = -559038737 # 0xdeadbeef + +def quote(s): + rep=['\\','$','{','}','[',']','(',')','"'] + for r in rep: + s=string.replace(s,r,"\\"+r) + return "\""+s+"\"" + +def unquote(s): + if s=="": return "" + if s[0]!='"': return s + r=string.replace + s=s[1:-1] + s=r(s,"\\\\","\\") + s=r(s,"\\$","$") + s=r(s,"\\{","{") + s=r(s,"\\}","}") + s=r(s,"\\[","[") + s=r(s,"\\]","]") + s=r(s,"\\(","(") + s=r(s,"\\)",")") + s=r(s,"\\\"","\"") + return s + +def unquotebeg(s): + for i in range(1,len(s)): + if s[i]=='"' and s[i-1]!='\\': + q=unquote(s[:i+1]) + return [q,s[i+2:]] + +def unroast(pw): + roaststring="Tic/Toc" + pw=string.lower(pw[2:]) + r="" + count=0 + hex=["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f"] + while pw: + st,pw=pw[:2],pw[2:] + value=(16*hex.index(st[0]))+hex.index(st[1]) + xor=ord(roaststring[count]) + count=(count+1)%len(roaststring) + r=r+chr(value^xor) + return r + +def roast(pw): + # contributed by jemfinch on #python + key="Tic/Toc" + ro="0x" + i=0 + ascii=map(ord,pw) + for c in ascii: + ro=ro+'%02x'%(c^ord(key[i%len(key)])) + i=i+1 + return string.lower(ro) + +def checksum(b): + return DUMMY_CHECKSUM # do it like gaim does, since the checksum + # formula doesn't work +## # used in file transfers +## check0 = check1 = 0x00ff +## for i in range(len(b)): +## if i%2: +## if ord(b[i])>check1: +## check1=check1+0x100 # wrap +## if check0==0: +## check0=0x00ff +## if check1==0x100: +## check1=check1-1 +## else: +## check0=check0-1 +## check1=check1-ord(b[i]) +## else: +## if ord(b[i])>check0: # wrap +## check0=check0+0x100 +## if check1==0: +## check1=0x00ff +## if check0==0x100: +## check0=check0-1 +## else: +## check1=check1-1 +## check0=check0-ord(b[i]) +## check0=check0 & 0xff +## check1=check1 & 0xff +## checksum=(long(check0)*0x1000000)+(long(check1)*0x10000) +## return checksum + +def checksum_file(f): + return DUMMY_CHECKSUM # do it like gaim does, since the checksum + # formula doesn't work +## check0=check1=0x00ff +## i=0 +## while 1: +## b=f.read() +## if not b: break +## for char in b: +## i=not i +## if i: +## if ord(char)>check1: +## check1=check1+0x100 # wrap +## if check0==0: +## check0=0x00ff +## if check1==0x100: +## check1=check1-1 +## else: +## check0=check0-1 +## check1=check1-ord(char) +## else: +## if ord(char)>check0: # wrap +## check0=check0+0x100 +## if check1==0: +## check1=0x00ff +## if check0==0x100: +## check0=check0-1 +## else: +## check1=check1-1 +## check0=check0-ord(char) +## check0=check0 & 0xff +## check1=check1 & 0xff +## checksum=(long(check0)*0x1000000)+(long(check1)*0x10000) +## return checksum + +def normalize(s): + s=string.lower(s) + s=string.replace(s," ","") + return s + + +class TOCParseError(ValueError): + pass + + +class TOC(protocol.Protocol): + users={} + + def connectionMade(self): + # initialization of protocol + self._buf="" + self._ourseqnum=0L + self._theirseqnum=0L + self._mode="Flapon" + self._onlyflaps=0 + self._laststatus={} # the last status for a user + self.username=None + self.permitmode=PERMITALL + self.permitlist=[] + self.denylist=[] + self.buddylist=[] + self.signontime=0 + self.idletime=0 + self.userinfo="
                                " + self.userclass=" O" + self.away="" + self.saved=None + + def _debug(self,data): + log.msg(data) + + def connectionLost(self, reason): + self._debug("dropped connection from %s" % self.username) + try: + del self.factory.users[self.username] + except: + pass + for k in self.factory.chatroom.keys(): + try: + self.factory.chatroom[k].leave(self) + except TOCParseError: + pass + if self.saved: + self.factory.savedusers[self.username]=self.saved + self.updateUsers() + + def sendFlap(self,type,data): + """ + send a FLAP to the client + """ + send="*" + self._debug(data) + if type==DATA: + data=data+"\000" + length=len(data) + send=send+struct.pack("!BHH",type,self._ourseqnum,length) + send=send+data + self._ourseqnum=self._ourseqnum+1 + if self._ourseqnum>(256L**4): + self._ourseqnum=0 + self.transport.write(send) + + def dataReceived(self,data): + self._buf=self._buf+data + try: + func=getattr(self,"mode%s"%self._mode) + except: + return + self._mode=func() + if self._onlyflaps and self.isFlap(): self.dataReceived("") + + def isFlap(self): + """ + tests to see if a flap is actually on the buffer + """ + if self._buf=='': return 0 + if self._buf[0]!="*": return 0 + if len(self._buf)<6: return 0 + foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) + if type not in range(1,6): return 0 + if len(self._buf)<6+length: return 0 + return 1 + + def readFlap(self): + """ + read the first FLAP off self._buf, raising errors if it isn't in the right form. + the FLAP is the basic TOC message format, and is logically equivilant to a packet in TCP + """ + if self._buf=='': return None + if self._buf[0]!="*": + raise TOCParseError + if len(self._buf)<6: return None + foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) + if len(self._buf)<6+length: return None + data=self._buf[6:6+length] + self._buf=self._buf[6+length:] + if data and data[-1]=="\000": + data=data[:-1] + self._debug([type,data]) + return [type,data] + + #def modeWeb(self): + # try: + # line,rest=string.split(self._buf,"\n",1) + # get,username,http=string.split(line," ",2) + # except: + # return "Web" # not enough data + # foo,type,username=string.split(username,"/") + # if type=="info": + # user=self.factory.users[username] + # text="User Information for %sUsername: %s
                                \nWarning Level: %s%
                                \n Online Since: %s
                                \nIdle Minutes: %s
                                \n

                                \n%s\n

                                \n"%(user.saved.nick, user.saved.nick, user.saved.evilness, time.asctime(user.signontime), int((time.time()-user.idletime)/60), user.userinfo) + # self.transport.write("HTTP/1.1 200 OK\n") + # self.transport.write("Content-Type: text/html\n") + # self.transport.write("Content-Length: %s\n\n"%len(text)) + # self.transport.write(text) + # self.loseConnection() + + def modeFlapon(self): + #if self._buf[:3]=="GET": self.modeWeb() # TODO: get this working + if len(self._buf)<10: return "Flapon" # not enough bytes + flapon,self._buf=self._buf[:10],self._buf[10:] + if flapon!="FLAPON\r\n\r\n": + raise TOCParseError + self.sendFlap(SIGNON,"\000\000\000\001") + self._onlyflaps=1 + return "Signon" + + def modeSignon(self): + flap=self.readFlap() + if flap==None: + return "Signon" + if flap[0]!=SIGNON: raise TOCParseError + version,tlv,unlength=struct.unpack("!LHH",flap[1][:8]) + if version!=1 or tlv!=1 or unlength+8!=len(flap[1]): + raise TOCParseError + self.username=normalize(flap[1][8:]) + if self.username in self.factory.savedusers.keys(): + self.saved=self.factory.savedusers[self.username] + else: + self.saved=SavedUser() + self.saved.nick=self.username + return "TocSignon" + + def modeTocSignon(self): + flap=self.readFlap() + if flap==None: + return "TocSignon" + if flap[0]!=DATA: raise TOCParseError + data=string.split(flap[1]," ") + if data[0]!="toc_signon": raise TOCParseError + for i in data: + if not i:data.remove(i) + password=unroast(data[4]) + if not(self.authorize(data[1],int(data[2]),data[3],password)): + self.sendError(BAD_NICKNAME) + self.transport.loseConnection() + return + self.sendFlap(DATA,"SIGN_ON:TOC1.0") + self.sendFlap(DATA,"NICK:%s"%self.saved.nick) + self.sendFlap(DATA,"CONFIG:%s"%self.saved.config) + # sending user configuration goes here + return "Connected" + + def authorize(self,server,port,username,password): + if self.saved.password=="": + self.saved.password=password + return 1 + else: + return self.saved.password==password + + def modeConnected(self): + flap=self.readFlap() + while flap!=None: + if flap[0] not in [DATA,KEEP_ALIVE]: raise TOCParseError + flapdata=string.split(flap[1]," ",1) + tocname=flapdata[0][4:] + if len(flapdata)==2: + data=flapdata[1] + else: + data="" + func=getattr(self,"toc_"+tocname,None) + if func!=None: + func(data) + else: + self.toc_unknown(tocname,data) + flap=self.readFlap() + return "Connected" + + def toc_unknown(self,tocname,data): + self._debug("unknown! %s %s" % (tocname,data)) + + def toc_init_done(self,data): + """ + called when all the setup is done. + + toc_init_done + """ + self.signontime=int(time.time()) + self.factory.users[self.username]=self + self.updateUsers() + + def toc_add_permit(self,data): + """ + adds users to the permit list. if the list is null, then set the mode to DENYALL + """ + if data=="": + self.permitmode=DENYALL + self.permitlist=[] + self.denylist=[] + else: + self.permitmode=PERMITSOME + self.denylist=[] + users=string.split(data," ") + map(self.permitlist.append,users) + self.updateUsers() + + def toc_add_deny(self,data): + """ + adds users to the deny list. if the list is null, then set the mode to PERMITALL + """ + if data=="": + self.permitmode=PERMITALL + self.permitlist=[] + self.denylist=[] + else: + self.permitmode=DENYSOME + self.permitlist=[] + users=string.split(data," ") + map(self.denylist.append,users) + self.updateUsers() + + def toc_evil(self,data): + """ + warns a user. + + toc_evil + """ + username,nora=string.split(data," ") + if nora=="anon": + user="" + else: + user=self.saved.nick + if not(self.factory.users.has_key(username)): + self.sendError(CANT_WARN,username) + return + if self.factory.users[username].saved.evilness>=100: + self.sendError(CANT_WARN,username) + return + self.factory.users[username].evilFrom(user) + + def toc_add_buddy(self,data): + """ + adds users to the buddy list + + toc_add_buddy [] []... + """ + buddies=map(normalize,string.split(data," ")) + for b in buddies: + if b not in self.buddylist: + self.buddylist.append(b) + for buddy in buddies: + try: + buddy=self.factory.users[buddy] + except: + pass + else: + self.buddyUpdate(buddy) + + def toc_remove_buddy(self,data): + """ + removes users from the buddy list + + toc_remove_buddy [] []... + """ + buddies=string.split(data," ") + for buddy in buddies: + try: + self.buddylist.remove(normalize(buddy)) + except: pass + + def toc_send_im(self,data): + """ + incoming instant message + + toc_send_im [auto] + """ + username,data=string.split(data," ",1) + auto=0 + if data[-4:]=="auto": + auto=1 + data=data[:-5] + data=unquote(data) + if not(self.factory.users.has_key(username)): + self.sendError(NOT_AVAILABLE,username) + return + user=self.factory.users[username] + if not(self.canContact(user)): + self.sendError(NOT_AVAILABLE,username) + return + user.hearWhisper(self,data,auto) + + def toc_set_info(self,data): + """ + set the users information, retrivable with toc_get_info + + toc_set_info + """ + info=unquote(data) + self._userinfo=info + + def toc_set_idle(self,data): + """ + set/unset idle + + toc_set_idle + """ + seconds=int(data) + self.idletime=time.time()-seconds # time when they started being idle + self.updateUsers() + + def toc_set_away(self,data): + """ + set/unset away message + + toc_set_away [] + """ + away=unquote(data) + if not self.away and away: # setting an away message + self.away=away + self.userclass=self.userclass+'U' + self.updateUsers() + elif self.away and not away: # coming back + self.away="" + self.userclass=self.userclass[:2] + self.updateUsers() + else: + raise TOCParseError + + def toc_chat_join(self,data): + """ + joins the chat room. + + toc_chat_join + """ + exchange,name=string.split(data," ",1) + self.factory.getChatroom(int(exchange),unquote(name)).join(self) + + def toc_chat_invite(self,data): + """ + invite others to the room. + + toc_chat_invite []... + """ + id,data=string.split(data," ",1) + id=int(id) + message,data=unquotebeg(data) + buddies=string.split(data," ") + for b in buddies: + room=self.factory.chatroom[id] + bud=self.factory.users[b] + bud.chatInvite(room,self,message) + + def toc_chat_accept(self,data): + """ + accept an invitation. + + toc_chat_accept + """ + id=int(data) + self.factory.chatroom[id].join(self) + + def toc_chat_send(self,data): + """ + send a message to the chat room. + + toc_chat_send + """ + id,message=string.split(data," ",1) + id=int(id) + message=unquote(message) + self.factory.chatroom[id].say(self,message) + + def toc_chat_whisper(self,data): + id,user,message=string.split(data," ",2) + id=int(id) + room=self.factory.chatroom[id] + message=unquote(message) + self.factory.users[user].chatWhisper(room,self,message) + + def toc_chat_leave(self,data): + """ + leave the room. + + toc_chat_leave + """ + id=int(data) + self.factory.chatroom[id].leave(self) + + def toc_set_config(self,data): + """ + set the saved config. this gets send when you log in. + + toc_set_config + """ + self.saved.config=unquote(data) + + def toc_get_info(self,data): + """ + get the user info for a user + + toc_get_info + """ + if not self.factory.users.has_key(data): + self.sendError(901,data) + return + self.sendFlap(2,"GOTO_URL:TIC:info/%s"%data) + + def toc_format_nickname(self,data): + """ + change the format of your nickname. + + toc_format_nickname + """ + # XXX may not work + nick=unquote(data) + if normalize(nick)==self.username: + self.saved.nick=nick + self.sendFlap(2,"ADMIN_NICK_STATUS:0") + else: + self.sendError(BAD_INPUT) + + def toc_change_passwd(self,data): + orig,data=unquotebeg(data) + new=unquote(data) + if orig==self.saved.password: + self.saved.password=new + self.sendFlap(2,"ADMIN_PASSWD_STATUS:0") + else: + self.sendError(BAD_INPUT) + + def sendError(self,code,*varargs): + """ + send an error to the user. listing of error messages is below. + """ + send="ERROR:%s"%code + for v in varargs: + send=send+":"+v + self.sendFlap(DATA,send) + + def updateUsers(self): + """ + Update the users who have us on their buddylist. + Called when the user changes anything (idle,away) so people can get updates. + """ + for user in self.factory.users.values(): + if self.username in user.buddylist and self.canContact(user): + user.buddyUpdate(self) + + def getStatus(self,user): + if self.canContact(user): + if self in self.factory.users.values():ol='T' + else: ol='F' + idle=0 + if self.idletime: + idle=int((time.time()-self.idletime)/60) + return (self.saved.nick,ol,self.saved.evilness,self.signontime,idle,self.userclass) + else: + return (self.saved.nick,'F',0,0,0,self.userclass) + + def canContact(self,user): + if self.permitmode==PERMITALL: return 1 + elif self.permitmode==DENYALL: return 0 + elif self.permitmode==PERMITSOME: + if user.username in self.permitlist: return 1 + else: return 0 + elif self.permitmode==DENYSOME: + if user.username in self.denylist: return 0 + else: return 1 + else: + assert 0,"bad permitmode %s" % self.permitmode + + def buddyUpdate(self,user): + """ + Update the buddy. Called from updateUsers() + """ + if not self.canContact(user): return + status=user.getStatus(self) + if not self._laststatus.has_key(user): + self._laststatus[user]=() + if self._laststatus[user]!=status: + send="UPDATE_BUDDY:%s:%s:%s:%s:%s:%s"%status + self.sendFlap(DATA,send) + self._laststatus[user]=status + + def hearWhisper(self,user,data,auto=0): + """ + Called when you get an IM. If auto=1, it's an autoreply from an away message. + """ + if not self.canContact(user): return + if auto: auto='T' + else: auto='F' + send="IM_IN:%s:%s:%s"%(user.saved.nick,auto,data) + self.sendFlap(DATA,send) + + def evilFrom(self,user): + if user=="": + percent=0.03 + else: + percent=0.1 + self.saved.evilness=self.saved.evilness+int((100-self.saved.evilness)*percent) + self.sendFlap(2,"EVILED:%s:%s"%(self.saved.evilness,user)) + self.updateUsers() + + def chatJoin(self,room): + self.sendFlap(2,"CHAT_JOIN:%s:%s"%(room.id,room.name)) + f="CHAT_UPDATE_BUDDY:%s:T"%room.id + for u in room.users: + if u!=self: + u.chatUserUpdate(room,self) + f=f+":"+u.saved.nick + self.sendFlap(2,f) + + def chatInvite(self,room,user,message): + if not self.canContact(user): return + self.sendFlap(2,"CHAT_INVITE:%s:%s:%s:%s"%(room.name,room.id,user.saved.nick,message)) + + def chatUserUpdate(self,room,user): + if user in room.users: + inroom='T' + else: + inroom='F' + self.sendFlap(2,"CHAT_UPDATE_BUDDY:%s:%s:%s"%(room.id,inroom,user.saved.nick)) + + def chatMessage(self,room,user,message): + if not self.canContact(user): return + self.sendFlap(2,"CHAT_IN:%s:%s:F:%s"%(room.id,user.saved.nick,message)) + + def chatWhisper(self,room,user,message): + if not self.canContact(user): return + self.sendFlap(2,"CHAT_IN:%s:%s:T:%s"%(room.id,user.saved.nick,message)) + + def chatLeave(self,room): + self.sendFlap(2,"CHAT_LEFT:%s"%(room.id)) + + +class Chatroom: + def __init__(self,fac,exchange,name,id): + self.exchange=exchange + self.name=name + self.id=id + self.factory=fac + self.users=[] + + def join(self,user): + if user in self.users: + return + self.users.append(user) + user.chatJoin(self) + + def leave(self,user): + if user not in self.users: + raise TOCParseError + self.users.remove(user) + user.chatLeave(self) + for u in self.users: + u.chatUserUpdate(self,user) + if len(self.users)==0: + self.factory.remChatroom(self) + + def say(self,user,message): + for u in self.users: + u.chatMessage(self,user,message) + + +class SavedUser: + def __init__(self): + self.config="" + self.nick="" + self.password="" + self.evilness=0 + + +class TOCFactory(protocol.Factory): + def __init__(self): + self.users={} + self.savedusers={} + self.chatroom={} + self.chatroomid=0 + + def buildProtocol(self,addr): + p=TOC() + p.factory=self + return p + + def getChatroom(self,exchange,name): + for i in self.chatroom.values(): + if normalize(i.name)==normalize(name): + return i + self.chatroom[self.chatroomid]=Chatroom(self,exchange,name,self.chatroomid) + self.chatroomid=self.chatroomid+1 + return self.chatroom[self.chatroomid-1] + + def remChatroom(self,room): + id=room.id + del self.chatroom[id] + +MAXARGS={} +MAXARGS["CONFIG"]=0 +MAXARGS["NICK"]=0 +MAXARGS["IM_IN"]=2 +MAXARGS["UPDATE_BUDDY"]=5 +MAXARGS["ERROR"]=-1 +MAXARGS["EVILED"]=1 +MAXARGS["CHAT_JOIN"]=1 +MAXARGS["CHAT_IN"]=3 +MAXARGS["CHAT_UPDATE_BUDDY"]=-1 +MAXARGS["CHAT_INVITE"]=3 +MAXARGS["CHAT_LEFT"]=0 +MAXARGS["ADMIN_NICK_STATUS"]=0 +MAXARGS["ADMIN_PASSWD_STATUS"]=0 + + +class TOCClient(protocol.Protocol): + def __init__(self,username,password,authhost="login.oscar.aol.com",authport=5190): + + self.username=normalize(username) # our username + self._password=password # our password + self._mode="SendNick" # current mode + self._ourseqnum=19071 # current sequence number (for sendFlap) + self._authhost=authhost # authorization host + self._authport=authport # authorization port + self._online=0 # are we online? + self._buddies=[] # the current buddy list + self._privacymode=PERMITALL # current privacy mode + self._permitlist=[] # list of users on the permit list + self._roomnames={} # the names for each of the rooms we're in + self._receivedchatmembers={} # have we gotten who's in our room yet? + self._denylist=[] + self._cookies={} # for file transfers + self._buf='' # current data buffer + self._awaymessage='' + + def _debug(self,data): + log.msg(data) + + def sendFlap(self,type,data): + if type==DATA: + data=data+"\000" + length=len(data) + s="*" + s=s+struct.pack("!BHH",type,self._ourseqnum,length) + s=s+data + self._ourseqnum=self._ourseqnum+1 + if self._ourseqnum>(256*256+256): + self._ourseqnum=0 + self._debug(data) + self.transport.write(s) + + def isFlap(self): + """ + tests to see if a flap is actually on the buffer + """ + if self._buf=='': return 0 + if self._buf[0]!="*": return 0 + if len(self._buf)<6: return 0 + foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) + if type not in range(1,6): return 0 + if len(self._buf)<6+length: return 0 + return 1 + + def readFlap(self): + if self._buf=='': return None + if self._buf[0]!="*": + raise TOCParseError + if len(self._buf)<6: return None + foo,type,seqnum,length=struct.unpack("!BBHH",self._buf[:6]) + if len(self._buf)<6+length: return None + data=self._buf[6:6+length] + self._buf=self._buf[6+length:] + if data and data[-1]=="\000": + data=data[:-1] + return [type,data] + + def connectionMade(self): + self._debug("connection made! %s" % self.transport) + self.transport.write("FLAPON\r\n\r\n") + + def connectionLost(self, reason): + self._debug("connection lost!") + self._online=0 + + def dataReceived(self,data): + self._buf=self._buf+data + while self.isFlap(): + flap=self.readFlap() + func=getattr(self,"mode%s"%self._mode) + func(flap) + + def modeSendNick(self,flap): + if flap!=[1,"\000\000\000\001"]: raise TOCParseError + s="\000\000\000\001\000\001"+struct.pack("!H",len(self.username))+self.username + self.sendFlap(1,s) + s="toc_signon %s %s %s %s english \"penguin\""%(self._authhost,\ + self._authport,self.username,roast(self._password)) + self.sendFlap(2,s) + self._mode="Data" + + def modeData(self,flap): + if not flap[1]: + return + if not ':' in flap[1]: + self._debug("bad SNAC:%s"%(flap[1])) + return + command,rest=string.split(flap[1],":",1) + if MAXARGS.has_key(command): + maxsplit=MAXARGS[command] + else: + maxsplit=-1 + if maxsplit==-1: + l=tuple(string.split(rest,":")) + elif maxsplit==0: + l=(rest,) + else: + l=tuple(string.split(rest,":",maxsplit)) + self._debug("%s %s"%(command,l)) + try: + func=getattr(self,"toc%s"%command) + self._debug("calling %s"%func) + except: + self._debug("calling %s"%self.tocUNKNOWN) + self.tocUNKNOWN(command,l) + return + func(l) + + def tocUNKNOWN(self,command,data): + pass + + def tocSIGN_ON(self,data): + if data!=("TOC1.0",): raise TOCParseError + self._debug("Whee, signed on!") + if self._buddies: self.add_buddy(self._buddies) + self._online=1 + self.onLine() + + def tocNICK(self,data): + """ + Handle a message that looks like:: + + NICK: + """ + self.username=data[0] + + def tocCONFIG(self,data): + """ + Handle a message that looks like:: + + CONFIG: + + Format of config data: + + - g: group. all users until next g or end of config are in this group + - b: buddy + - p: person on the permit list + - d: person on the deny list + - m: permit/deny mode (1: permit all, 2: deny all, 3: permit some, 4: deny some) + """ + data=data[0] + if data and data[0]=="{":data=data[1:-1] + lines=string.split(data,"\n") + buddylist={} + currentgroup="" + permit=[] + deny=[] + mode=1 + for l in lines: + if l: + code,data=l[0],l[2:] + if code=='g': # group + currentgroup=data + buddylist[currentgroup]=[] + elif code=='b': + buddylist[currentgroup].append(data) + elif code=='p': + permit.append(data) + elif code=='d': + deny.append(data) + elif code=='m': + mode=int(data) + self.gotConfig(mode,buddylist,permit,deny) + + def tocIM_IN(self,data): + """ + Handle a message that looks like:: + + IM_IN:::message + """ + user=data[0] + autoreply=(data[1]=='T') + message=data[2] + self.hearMessage(user,message,autoreply) + + def tocUPDATE_BUDDY(self,data): + """ + Handle a message that looks like:: + + UPDATE_BUDDY:::::: + """ + data=list(data) + online=(data[1]=='T') + if len(data[5])==2: + data[5]=data[5]+" " + away=(data[5][-1]=='U') + if data[5][-1]=='U': + data[5]=data[5][:-1] + self.updateBuddy(data[0],online,int(data[2]),int(data[3]),int(data[4]),data[5],away) + + def tocERROR(self,data): + """ + Handle a message that looks like:: + + ERROR:: + """ + code,args=data[0],data[1:] + self.hearError(int(code),args) + + def tocEVILED(self,data): + """ + Handle a message that looks like:: + + EVILED:: + """ + self.hearWarning(data[0],data[1]) + + def tocCHAT_JOIN(self,data): + """ + Handle a message that looks like:: + + CHAT_JOIN:: + """ + #self.chatJoined(int(data[0]),data[1]) + self._roomnames[int(data[0])]=data[1] + self._receivedchatmembers[int(data[0])]=0 + + def tocCHAT_UPDATE_BUDDY(self,data): + """ + Handle a message that looks like:: + + CHAT_UPDATE_BUDDY::::... + """ + roomid=int(data[0]) + inroom=(data[1]=='T') + if self._receivedchatmembers[roomid]: + for u in data[2:]: + self.chatUpdate(roomid,u,inroom) + else: + self._receivedchatmembers[roomid]=1 + self.chatJoined(roomid,self._roomnames[roomid],list(data[2:])) + + def tocCHAT_IN(self,data): + """ + Handle a message that looks like:: + + CHAT_IN:::: + + whisper isn't used + """ + whisper=(data[2]=='T') + if whisper: + self.chatHearWhisper(int(data[0]),data[1],data[3]) + else: + self.chatHearMessage(int(data[0]),data[1],data[3]) + + def tocCHAT_INVITE(self,data): + """ + Handle a message that looks like:: + + CHAT_INVITE:::: + """ + self.chatInvited(int(data[1]),data[0],data[2],data[3]) + + def tocCHAT_LEFT(self,data): + """ + Handle a message that looks like:: + + CHAT_LEFT: + """ + self.chatLeft(int(data[0])) + del self._receivedchatmembers[int(data[0])] + del self._roomnames[int(data[0])] + + def tocRVOUS_PROPOSE(self,data): + """ + Handle a message that looks like:: + + RVOUS_PROPOSE:::::::: + [:tlv tag1:tlv value1[:tlv tag2:tlv value2[:...]]] + """ + user,uid,cookie,seq,rip,pip,vip,port=data[:8] + cookie=base64.decodestring(cookie) + port=int(port) + tlvs={} + for i in range(8,len(data),2): + key=data[i] + value=base64.decodestring(data[i+1]) + tlvs[key]=value + name=UUIDS[uid] + try: + func=getattr(self,"toc%s"%name) + except: + self._debug("no function for UID %s" % uid) + return + func(user,cookie,seq,pip,vip,port,tlvs) + + def tocSEND_FILE(self,user,cookie,seq,pip,vip,port,tlvs): + if tlvs.has_key('12'): + description=tlvs['12'] + else: + description="" + subtype,numfiles,size=struct.unpack("!HHI",tlvs['10001'][:8]) + name=tlvs['10001'][8:-4] + while name[-1]=='\000': + name=name[:-1] + self._cookies[cookie]=[user,SEND_FILE_UID,pip,port,{'name':name}] + self.rvousProposal("send",cookie,user,vip,port,description=description, + name=name,files=numfiles,size=size) + + def tocGET_FILE(self,user,cookie,seq,pip,vip,port,tlvs): + return + # XXX add this back in + #reactor.clientTCP(pip,port,GetFileTransfer(self,cookie,os.path.expanduser("~"))) + #self.rvous_accept(user,cookie,GET_FILE_UID) + + def onLine(self): + """ + called when we are first online + """ + pass + + def gotConfig(self,mode,buddylist,permit,deny): + """ + called when we get a configuration from the server + mode := permit/deny mode + buddylist := current buddylist + permit := permit list + deny := deny list + """ + pass + + def hearError(self,code,args): + """ + called when an error is received + code := error code + args := misc. arguments (username, etc.) + """ + pass + + def hearWarning(self,newamount,username): + """ + called when we get warned + newamount := the current warning level + username := the user who warned us, or '' if it's anonymous + """ + pass + + def hearMessage(self,username,message,autoreply): + """ + called when you receive an IM + username := the user who the IM is from + message := the message + autoreply := true if the message is an autoreply from an away message + """ + pass + + def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away): + """ + called when a buddy changes state + username := the user whos state changed + online := true if the user is online + evilness := the users current warning level + signontime := the time the user signed on (UNIX epoch) + idletime := the time the user has been idle (minutes) + away := true if the user is away + userclass := the class of the user (generally " O") + """ + pass + + def chatJoined(self,roomid,roomname,users): + """ + we just joined a chat room + roomid := the AIM id for the room + roomname := the name for the room + users := a list of the users already in the room + """ + pass + + def chatUpdate(self,roomid,username,inroom): + """ + a user has joined the room + roomid := the AIM id for the room + username := the username + inroom := true if the user is in the room + """ + pass + + def chatHearMessage(self,roomid,username,message): + """ + a message was sent to the room + roomid := the AIM id for the room + username := the user who sent the message + message := the message + """ + pass + + def chatHearWhisper(self,roomid,username,message): + """ + someone whispered to us in a chatroom + roomid := the AIM for the room + username := the user who whispered to us + message := the message + """ + pass + + def chatInvited(self,roomid,roomname,username,message): + """ + we were invited to a chat room + roomid := the AIM id for the room + roomname := the name of the room + username := the user who invited us + message := the invite message + """ + pass + + def chatLeft(self,roomid): + """ + we left the room + roomid := the AIM id for the room + """ + pass + + def rvousProposal(self,type,cookie,user,vip,port,**kw): + """ + we were asked for a rondevouz + type := the type of rondevous. currently, one of ["send"] + cookie := the cookie. pass this to rvous_accept() + user := the user who asked us + vip := their verified_ip + port := the port they want us to conenct to + kw := misc. args + """ + pass #self.rvous_accept(cookie) + + def receiveBytes(self,user,file,chunk,sofar,total): + """ + we received part of a file from a file transfer + file := the name of the file + chunk := the chunk of data + sofar := how much data we've gotten so far + total := the total amount of data + """ + pass #print user,file,sofar,total + + def isaway(self): + """ + return our away status + """ + return len(self._awaymessage)>0 + + def set_config(self,mode,buddylist,permit,deny): + """ + set the server configuration + mode := permit mode + buddylist := buddy list + permit := permit list + deny := deny list + """ + s="m %s\n"%mode + for g in buddylist.keys(): + s=s+"g %s\n"%g + for u in buddylist[g]: + s=s+"b %s\n"%u + for p in permit: + s=s+"p %s\n"%p + for d in deny: + s=s+"d %s\n"%d + #s="{\n"+s+"\n}" + self.sendFlap(2,"toc_set_config %s"%quote(s)) + + def add_buddy(self,buddies): + s="" + if type(buddies)==type(""): buddies=[buddies] + for b in buddies: + s=s+" "+normalize(b) + self.sendFlap(2,"toc_add_buddy%s"%s) + + def del_buddy(self,buddies): + s="" + if type(buddies)==type(""): buddies=[buddies] + for b in buddies: + s=s+" "+b + self.sendFlap(2,"toc_remove_buddy%s"%s) + + def add_permit(self,users): + if type(users)==type(""): users=[users] + s="" + if self._privacymode!=PERMITSOME: + self._privacymode=PERMITSOME + self._permitlist=[] + for u in users: + u=normalize(u) + if u not in self._permitlist:self._permitlist.append(u) + s=s+" "+u + if not s: + self._privacymode=DENYALL + self._permitlist=[] + self._denylist=[] + self.sendFlap(2,"toc_add_permit"+s) + + def del_permit(self,users): + if type(users)==type(""): users=[users] + p=self._permitlist[:] + for u in users: + u=normalize(u) + if u in p: + p.remove(u) + self.add_permit([]) + self.add_permit(p) + + def add_deny(self,users): + if type(users)==type(""): users=[users] + s="" + if self._privacymode!=DENYSOME: + self._privacymode=DENYSOME + self._denylist=[] + for u in users: + u=normalize(u) + if u not in self._denylist:self._denylist.append(u) + s=s+" "+u + if not s: + self._privacymode=PERMITALL + self._permitlist=[] + self._denylist=[] + self.sendFlap(2,"toc_add_deny"+s) + + def del_deny(self,users): + if type(users)==type(""): users=[users] + d=self._denylist[:] + for u in users: + u=normalize(u) + if u in d: + d.remove(u) + self.add_deny([]) + if d: + self.add_deny(d) + + def signon(self): + """ + called to finish the setup, and signon to the network + """ + self.sendFlap(2,"toc_init_done") + self.sendFlap(2,"toc_set_caps %s" % (SEND_FILE_UID,)) # GET_FILE_UID) + + def say(self,user,message,autoreply=0): + """ + send a message + user := the user to send to + message := the message + autoreply := true if the message is an autoreply (good for away messages) + """ + if autoreply: a=" auto" + else: a='' + self.sendFlap(2,"toc_send_im %s %s%s"%(normalize(user),quote(message),a)) + + def idle(self,idletime=0): + """ + change idle state + idletime := the seconds that the user has been away, or 0 if they're back + """ + self.sendFlap(2,"toc_set_idle %s" % int(idletime)) + + def evil(self,user,anon=0): + """ + warn a user + user := the user to warn + anon := if true, an anonymous warning + """ + self.sendFlap(2,"toc_evil %s %s"%(normalize(user), (not anon and "anon") or "norm")) + + def away(self,message=''): + """ + change away state + message := the message, or '' to come back from awayness + """ + self._awaymessage=message + if message: + message=' '+quote(message) + self.sendFlap(2,"toc_set_away%s"%message) + + def chat_join(self,exchange,roomname): + """ + join a chat room + exchange := should almost always be 4 + roomname := room name + """ + roomname=string.replace(roomname," ","") + self.sendFlap(2,"toc_chat_join %s %s"%(int(exchange),roomname)) + + def chat_say(self,roomid,message): + """ + send a message to a chatroom + roomid := the AIM id for the room + message := the message to send + """ + self.sendFlap(2,"toc_chat_send %s %s"%(int(roomid),quote(message))) + + def chat_whisper(self,roomid,user,message): + """ + whisper to another user in a chatroom + roomid := the AIM id for the room + user := the user to whisper to + message := the message to send + """ + self.sendFlap(2,"toc_chat_whisper %s %s %s"%(int(roomid),normalize(user),quote(message))) + + def chat_leave(self,roomid): + """ + leave a chat room. + roomid := the AIM id for the room + """ + self.sendFlap(2,"toc_chat_leave %s" % int(roomid)) + + def chat_invite(self,roomid,usernames,message): + """ + invite a user[s] to the chat room + roomid := the AIM id for the room + usernames := either a string (one username) or a list (more than one) + message := the message to invite them with + """ + if type(usernames)==type(""): # a string, one username + users=usernames + else: + users="" + for u in usernames: + users=users+u+" " + users=users[:-1] + self.sendFlap(2,"toc_chat_invite %s %s %s" % (int(roomid),quote(message),users)) + + def chat_accept(self,roomid): + """ + accept an invite to a chat room + roomid := the AIM id for the room + """ + self.sendFlap(2,"toc_chat_accept %s"%int(roomid)) + + def rvous_accept(self,cookie): + user,uuid,pip,port,d=self._cookies[cookie] + self.sendFlap(2,"toc_rvous_accept %s %s %s" % (normalize(user), + cookie,uuid)) + if uuid==SEND_FILE_UID: + protocol.ClientCreator(reactor, SendFileTransfer,self,cookie,user,d["name"]).connectTCP(pip,port) + + def rvous_cancel(self,cookie): + user,uuid,pip,port,d=self._cookies[cookie] + self.sendFlap(2,"toc_rvous_accept %s %s %s" % (normalize(user), + cookie,uuid)) + del self._cookies[cookie] + + +class SendFileTransfer(protocol.Protocol): + header_fmt="!4s2H8s6H10I32s3c69s16s2H64s" + + def __init__(self,client,cookie,user,filename): + self.client=client + self.cookie=cookie + self.user=user + self.filename=filename + self.hdr=[0,0,0] + self.sofar=0 + + def dataReceived(self,data): + if not self.hdr[2]==0x202: + self.hdr=list(struct.unpack(self.header_fmt,data[:256])) + self.hdr[2]=0x202 + self.hdr[3]=self.cookie + self.hdr[4]=0 + self.hdr[5]=0 + self.transport.write(apply(struct.pack,[self.header_fmt]+self.hdr)) + data=data[256:] + if self.hdr[6]==1: + self.name=self.filename + else: + self.name=self.filename+self.hdr[-1] + while self.name[-1]=="\000": + self.name=self.name[:-1] + if not data: return + self.sofar=self.sofar+len(data) + self.client.receiveBytes(self.user,self.name,data,self.sofar,self.hdr[11]) + if self.sofar==self.hdr[11]: # end of this file + self.hdr[2]=0x204 + self.hdr[7]=self.hdr[7]-1 + self.hdr[9]=self.hdr[9]-1 + self.hdr[19]=DUMMY_CHECKSUM # XXX really calculate this + self.hdr[18]=self.hdr[18]+1 + self.hdr[21]="\000" + self.transport.write(apply(struct.pack,[self.header_fmt]+self.hdr)) + self.sofar=0 + if self.hdr[7]==0: + self.transport.loseConnection() + + +class GetFileTransfer(protocol.Protocol): + header_fmt="!4s 2H 8s 6H 10I 32s 3c 69s 16s 2H 64s" + def __init__(self,client,cookie,dir): + self.client=client + self.cookie=cookie + self.dir=dir + self.buf="" + + def connectionMade(self): + def func(f,path,names): + names.sort(lambda x,y:cmp(string.lower(x),string.lower(y))) + for n in names: + name=os.path.join(path,n) + lt=time.localtime(os.path.getmtime(name)) + size=os.path.getsize(name) + f[1]=f[1]+size + f.append("%02d/%02d/%4d %02d:%02d %8d %s" % + (lt[1],lt[2],lt[0],lt[3],lt[4],size,name[f[0]:])) + f=[len(self.dir)+1,0] + os.path.walk(self.dir,func,f) + size=f[1] + self.listing=string.join(f[2:],"\r\n")+"\r\n" + open("\\listing.txt","w").write(self.listing) + hdr=["OFT2",256,0x1108,self.cookie,0,0,len(f)-2,len(f)-2,1,1,size, + len(self.listing),os.path.getmtime(self.dir), + checksum(self.listing),0,0,0,0,0,0,"OFT_Windows ICBMFT V1.1 32", + "\002",chr(0x1a),chr(0x10),"","",0,0,""] + self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) + + def dataReceived(self,data): + self.buf=self.buf+data + while len(self.buf)>=256: + hdr=list(struct.unpack(self.header_fmt,self.buf[:256])) + self.buf=self.buf[256:] + if hdr[2]==0x1209: + self.file=StringIO.StringIO(self.listing) + self.transport.registerProducer(self,0) + elif hdr[2]==0x120b: pass + elif hdr[2]==0x120c: # file request + file=hdr[-1] + for k,v in [["\000",""],["\001",os.sep]]: + file=string.replace(file,k,v) + self.name=os.path.join(self.dir,file) + self.file=open(self.name,'rb') + hdr[2]=0x0101 + hdr[6]=hdr[7]=1 + hdr[10]=hdr[11]=os.path.getsize(self.name) + hdr[12]=os.path.getmtime(self.name) + hdr[13]=checksum_file(self.file) + self.file.seek(0) + hdr[18]=hdr[19]=0 + hdr[21]=chr(0x20) + self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) + log.msg("got file request for %s"%file,hex(hdr[13])) + elif hdr[2]==0x0202: + log.msg("sending file") + self.transport.registerProducer(self,0) + elif hdr[2]==0x0204: + log.msg("real checksum: %s"%hex(hdr[19])) + del self.file + elif hdr[2]==0x0205: # resume + already=hdr[18] + if already: + data=self.file.read(already) + else: + data="" + log.msg("restarting at %s"%already) + hdr[2]=0x0106 + hdr[19]=checksum(data) + self.transport.write(apply(struct.pack,[self.header_fmt]+hdr)) + elif hdr[2]==0x0207: + self.transport.registerProducer(self,0) + else: + log.msg("don't understand 0x%04x"%hdr[2]) + log.msg(hdr) + + def resumeProducing(self): + data=self.file.read(4096) + log.msg(len(data)) + if not data: + self.transport.unregisterProducer() + self.transport.write(data) + + def pauseProducing(self): pass + + def stopProducing(self): del self.file + +# UUIDs +SEND_FILE_UID = "09461343-4C7F-11D1-8222-444553540000" +GET_FILE_UID = "09461348-4C7F-11D1-8222-444553540000" +UUIDS={ + SEND_FILE_UID:"SEND_FILE", + GET_FILE_UID:"GET_FILE" +} + +# ERRORS +# general +NOT_AVAILABLE=901 +CANT_WARN=902 +MESSAGES_TOO_FAST=903 +# admin +BAD_INPUT=911 +BAD_ACCOUNT=912 +REQUEST_ERROR=913 +SERVICE_UNAVAILABLE=914 +# chat +NO_CHAT_IN=950 +# im and info +SEND_TOO_FAST=960 +MISSED_BIG_IM=961 +MISSED_FAST_IM=962 +# directory +DIR_FAILURE=970 +TOO_MANY_MATCHES=971 +NEED_MORE_QUALIFIERS=972 +DIR_UNAVAILABLE=973 +NO_EMAIL_LOOKUP=974 +KEYWORD_IGNORED=975 +NO_KEYWORDS=976 +BAD_LANGUAGE=977 +BAD_COUNTRY=978 +DIR_FAIL_UNKNOWN=979 +# authorization +BAD_NICKNAME=980 +SERVICE_TEMP_UNAVAILABLE=981 +WARNING_TOO_HIGH=982 +CONNECTING_TOO_QUICK=983 +UNKNOWN_SIGNON=989 + +STD_MESSAGE={} +STD_MESSAGE[NOT_AVAILABLE]="%s not currently available" +STD_MESSAGE[CANT_WARN]="Warning of %s not currently available" +STD_MESSAGE[MESSAGES_TOO_FAST]="A message has been dropped, you are exceeding the server speed limit" +STD_MESSAGE[BAD_INPUT]="Error validating input" +STD_MESSAGE[BAD_ACCOUNT]="Invalid account" +STD_MESSAGE[REQUEST_ERROR]="Error encountered while processing request" +STD_MESSAGE[SERVICE_UNAVAILABLE]="Service unavailable" +STD_MESSAGE[NO_CHAT_IN]="Chat in %s is unavailable" +STD_MESSAGE[SEND_TOO_FAST]="You are sending messages too fast to %s" +STD_MESSAGE[MISSED_BIG_IM]="You missed an IM from %s because it was too big" +STD_MESSAGE[MISSED_FAST_IM]="You missed an IM from %s because it was sent too fast" +# skipping directory for now +STD_MESSAGE[BAD_NICKNAME]="Incorrect nickname or password" +STD_MESSAGE[SERVICE_TEMP_UNAVAILABLE]="The service is temporarily unavailable" +STD_MESSAGE[WARNING_TOO_HIGH]="Your warning level is currently too high to sign on" +STD_MESSAGE[CONNECTING_TOO_QUICK]="You have been connecting and disconnecting too frequently. Wait 10 minutes and try again. If you continue to try, you will need to wait even longer." +STD_MESSAGE[UNKNOWN_SIGNON]="An unknown signon error has occurred %s" diff --git a/vendor/Twisted-10.0.0/twisted/words/service.py b/vendor/Twisted-10.0.0/twisted/words/service.py new file mode 100644 index 000000000000..d7f29ab2f4f3 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/service.py @@ -0,0 +1,1223 @@ +# -*- test-case-name: twisted.words.test.test_service -*- +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +A module that needs a better name. + +Implements new cred things for words. + +How does this thing work? + + - Network connection on some port expecting to speak some protocol + + - Protocol-specific authentication, resulting in some kind of credentials object + + - twisted.cred.portal login using those credentials for the interface + IUser and with something implementing IChatClient as the mind + + - successful login results in an IUser avatar the protocol can call + methods on, and state added to the realm such that the mind will have + methods called on it as is necessary + + - protocol specific actions lead to calls onto the avatar; remote events + lead to calls onto the mind + + - protocol specific hangup, realm is notified, user is removed from active + play, the end. +""" + +from time import time, ctime + +from zope.interface import implements + +from twisted.words import iwords, ewords + +from twisted.python.components import registerAdapter +from twisted.cred import portal, credentials, error as ecred +from twisted.spread import pb +from twisted.words.protocols import irc +from twisted.internet import defer, protocol +from twisted.python import log, failure, reflect +from twisted import copyright + + +class Group(object): + implements(iwords.IGroup) + + def __init__(self, name): + self.name = name + self.users = {} + self.meta = { + "topic": "", + "topic_author": "", + } + + + def _ebUserCall(self, err, p): + return failure.Failure(Exception(p, err)) + + + def _cbUserCall(self, results): + for (success, result) in results: + if not success: + user, err = result.value # XXX + self.remove(user, err.getErrorMessage()) + + + def add(self, user): + assert iwords.IChatClient.providedBy(user), "%r is not a chat client" % (user,) + if user.name not in self.users: + additions = [] + self.users[user.name] = user + for p in self.users.itervalues(): + if p is not user: + d = defer.maybeDeferred(p.userJoined, self, user) + d.addErrback(self._ebUserCall, p=p) + additions.append(d) + defer.DeferredList(additions).addCallback(self._cbUserCall) + return defer.succeed(None) + + + def remove(self, user, reason=None): + assert reason is None or isinstance(reason, unicode) + try: + del self.users[user.name] + except KeyError: + pass + else: + removals = [] + for p in self.users.itervalues(): + if p is not user: + d = defer.maybeDeferred(p.userLeft, self, user, reason) + d.addErrback(self._ebUserCall, p=p) + removals.append(d) + defer.DeferredList(removals).addCallback(self._cbUserCall) + return defer.succeed(None) + + + def size(self): + return defer.succeed(len(self.users)) + + + def receive(self, sender, recipient, message): + assert recipient is self + receives = [] + for p in self.users.itervalues(): + if p is not sender: + d = defer.maybeDeferred(p.receive, sender, self, message) + d.addErrback(self._ebUserCall, p=p) + receives.append(d) + defer.DeferredList(receives).addCallback(self._cbUserCall) + return defer.succeed(None) + + + def setMetadata(self, meta): + self.meta = meta + sets = [] + for p in self.users.itervalues(): + d = defer.maybeDeferred(p.groupMetaUpdate, self, meta) + d.addErrback(self._ebUserCall, p=p) + sets.append(d) + defer.DeferredList(sets).addCallback(self._cbUserCall) + return defer.succeed(None) + + + def iterusers(self): + # XXX Deferred? + return iter(self.users.values()) + + +class User(object): + implements(iwords.IUser) + + realm = None + mind = None + + def __init__(self, name): + self.name = name + self.groups = [] + self.lastMessage = time() + + + def loggedIn(self, realm, mind): + self.realm = realm + self.mind = mind + self.signOn = time() + + + def join(self, group): + def cbJoin(result): + self.groups.append(group) + return result + return group.add(self.mind).addCallback(cbJoin) + + + def leave(self, group, reason=None): + def cbLeave(result): + self.groups.remove(group) + return result + return group.remove(self.mind, reason).addCallback(cbLeave) + + + def send(self, recipient, message): + self.lastMessage = time() + return recipient.receive(self.mind, recipient, message) + + + def itergroups(self): + return iter(self.groups) + + + def logout(self): + for g in self.groups[:]: + self.leave(g) + + +NICKSERV = 'NickServ!NickServ@services' + + +class IRCUser(irc.IRC): + """ + Protocol instance representing an IRC user connected to the server. + """ + implements(iwords.IChatClient) + + # A list of IGroups in which I am participating + groups = None + + # A no-argument callable I should invoke when I go away + logout = None + + # An IUser we use to interact with the chat service + avatar = None + + # To whence I belong + realm = None + + # How to handle unicode (TODO: Make this customizable on a per-user basis) + encoding = 'utf-8' + + # Twisted callbacks + def connectionMade(self): + self.irc_PRIVMSG = self.irc_NICKSERV_PRIVMSG + self.realm = self.factory.realm + self.hostname = self.realm.name + + + def connectionLost(self, reason): + if self.logout is not None: + self.logout() + self.avatar = None + + + # Make sendMessage a bit more useful to us + def sendMessage(self, command, *parameter_list, **kw): + if not kw.has_key('prefix'): + kw['prefix'] = self.hostname + if not kw.has_key('to'): + kw['to'] = self.name.encode(self.encoding) + + arglist = [self, command, kw['to']] + list(parameter_list) + irc.IRC.sendMessage(*arglist, **kw) + + + # IChatClient implementation + def userJoined(self, group, user): + self.join( + "%s!%s@%s" % (user.name, user.name, self.hostname), + '#' + group.name) + + + def userLeft(self, group, user, reason=None): + assert reason is None or isinstance(reason, unicode) + self.part( + "%s!%s@%s" % (user.name, user.name, self.hostname), + '#' + group.name, + (reason or u"leaving").encode(self.encoding, 'replace')) + + + def receive(self, sender, recipient, message): + #>> :glyph!glyph@adsl-64-123-27-108.dsl.austtx.swbell.net PRIVMSG glyph_ :hello + + # omg??????????? + if iwords.IGroup.providedBy(recipient): + recipientName = '#' + recipient.name + else: + recipientName = recipient.name + + text = message.get('text', '') + for L in text.splitlines(): + self.privmsg( + '%s!%s@%s' % (sender.name, sender.name, self.hostname), + recipientName, + L) + + + def groupMetaUpdate(self, group, meta): + if 'topic' in meta: + topic = meta['topic'] + author = meta.get('topic_author', '') + self.topic( + self.name, + '#' + group.name, + topic, + '%s!%s@%s' % (author, author, self.hostname) + ) + + # irc.IRC callbacks - starting with login related stuff. + nickname = None + password = None + + def irc_PASS(self, prefix, params): + """Password message -- Register a password. + + Parameters: + + [REQUIRED] + + Note that IRC requires the client send this *before* NICK + and USER. + """ + self.password = params[-1] + + + def irc_NICK(self, prefix, params): + """Nick message -- Set your nickname. + + Parameters: + + [REQUIRED] + """ + try: + nickname = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.privmsg( + NICKSERV, + nickname, + 'Your nickname is cannot be decoded. Please use ASCII or UTF-8.') + self.transport.loseConnection() + return + + self.nickname = nickname + self.name = nickname + + for code, text in self._motdMessages: + self.sendMessage(code, text % self.factory._serverInfo) + + if self.password is None: + self.privmsg( + NICKSERV, + nickname, + 'Password?') + else: + password = self.password + self.password = None + self.logInAs(nickname, password) + + + def irc_USER(self, prefix, params): + """User message -- Set your realname. + + Parameters: + """ + # Note: who gives a crap about this? The IUser has the real + # information we care about. Save it anyway, I guess, just + # for fun. + self.realname = params[-1] + + + def irc_NICKSERV_PRIVMSG(self, prefix, params): + """Send a (private) message. + + Parameters: + """ + target = params[0] + password = params[-1] + + if self.nickname is None: + # XXX Send an error response here + self.transport.loseConnection() + elif target.lower() != "nickserv": + self.privmsg( + NICKSERV, + self.nickname, + "Denied. Please send me (NickServ) your password.") + else: + nickname = self.nickname + self.nickname = None + self.logInAs(nickname, password) + + + def logInAs(self, nickname, password): + d = self.factory.portal.login( + credentials.UsernamePassword(nickname, password), + self, + iwords.IUser) + d.addCallbacks(self._cbLogin, self._ebLogin, errbackArgs=(nickname,)) + + + _welcomeMessages = [ + (irc.RPL_WELCOME, + ":connected to Twisted IRC"), + (irc.RPL_YOURHOST, + ":Your host is %(serviceName)s, running version %(serviceVersion)s"), + (irc.RPL_CREATED, + ":This server was created on %(creationDate)s"), + + # "Bummer. This server returned a worthless 004 numeric. + # I'll have to guess at all the values" + # -- epic + (irc.RPL_MYINFO, + # w and n are the currently supported channel and user modes + # -- specify this better + "%(serviceName)s %(serviceVersion)s w n") + ] + + _motdMessages = [ + (irc.RPL_MOTDSTART, + ":- %(serviceName)s Message of the Day - "), + (irc.RPL_ENDOFMOTD, + ":End of /MOTD command.") + ] + + def _cbLogin(self, (iface, avatar, logout)): + assert iface is iwords.IUser, "Realm is buggy, got %r" % (iface,) + + # Let them send messages to the world + del self.irc_PRIVMSG + + self.avatar = avatar + self.logout = logout + for code, text in self._welcomeMessages: + self.sendMessage(code, text % self.factory._serverInfo) + + + def _ebLogin(self, err, nickname): + if err.check(ewords.AlreadyLoggedIn): + self.privmsg( + NICKSERV, + nickname, + "Already logged in. No pod people allowed!") + elif err.check(ecred.UnauthorizedLogin): + self.privmsg( + NICKSERV, + nickname, + "Login failed. Goodbye.") + else: + log.msg("Unhandled error during login:") + log.err(err) + self.privmsg( + NICKSERV, + nickname, + "Server error during login. Sorry.") + self.transport.loseConnection() + + + # Great, now that's out of the way, here's some of the interesting + # bits + def irc_PING(self, prefix, params): + """Ping message + + Parameters: [ ] + """ + if self.realm is not None: + self.sendMessage('PONG', self.hostname) + + + def irc_QUIT(self, prefix, params): + """Quit + + Parameters: [ ] + """ + self.transport.loseConnection() + + + def _channelMode(self, group, modes=None, *args): + if modes: + self.sendMessage( + irc.ERR_UNKNOWNMODE, + ":Unknown MODE flag.") + else: + self.channelMode(self.name, '#' + group.name, '+') + + + def _userMode(self, user, modes=None): + if modes: + self.sendMessage( + irc.ERR_UNKNOWNMODE, + ":Unknown MODE flag.") + elif user is self.avatar: + self.sendMessage( + irc.RPL_UMODEIS, + "+") + else: + self.sendMessage( + irc.ERR_USERSDONTMATCH, + ":You can't look at someone else's modes.") + + + def irc_MODE(self, prefix, params): + """User mode message + + Parameters: + *( ( "+" / "-" ) *( "i" / "w" / "o" / "O" / "r" ) ) + + """ + try: + channelOrUser = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHNICK, params[0], + ":No such nickname (could not decode your unicode!)") + return + + if channelOrUser.startswith('#'): + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, params[0], + ":That channel doesn't exist.") + d = self.realm.lookupGroup(channelOrUser[1:]) + d.addCallbacks( + self._channelMode, + ebGroup, + callbackArgs=tuple(params[1:])) + else: + def ebUser(err): + self.sendMessage( + irc.ERR_NOSUCHNICK, + ":No such nickname.") + + d = self.realm.lookupUser(channelOrUser) + d.addCallbacks( + self._userMode, + ebUser, + callbackArgs=tuple(params[1:])) + + + def irc_USERHOST(self, prefix, params): + """Userhost message + + Parameters: *( SPACE ) + + [Optional] + """ + pass + + + def irc_PRIVMSG(self, prefix, params): + """Send a (private) message. + + Parameters: + """ + try: + targetName = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHNICK, targetName, + ":No such nick/channel (could not decode your unicode!)") + return + + messageText = params[-1] + if targetName.startswith('#'): + target = self.realm.lookupGroup(targetName[1:]) + else: + target = self.realm.lookupUser(targetName).addCallback(lambda user: user.mind) + + def cbTarget(targ): + if targ is not None: + return self.avatar.send(targ, {"text": messageText}) + + def ebTarget(err): + self.sendMessage( + irc.ERR_NOSUCHNICK, targetName, + ":No such nick/channel.") + + target.addCallbacks(cbTarget, ebTarget) + + + def irc_JOIN(self, prefix, params): + """Join message + + Parameters: ( *( "," ) [ *( "," ) ] ) + """ + try: + groupName = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.IRC_NOSUCHCHANNEL, params[0], + ":No such channel (could not decode your unicode!)") + return + + if groupName.startswith('#'): + groupName = groupName[1:] + + def cbGroup(group): + def cbJoin(ign): + self.userJoined(group, self) + self.names( + self.name, + '#' + group.name, + [user.name for user in group.iterusers()]) + self._sendTopic(group) + return self.avatar.join(group).addCallback(cbJoin) + + def ebGroup(err): + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, '#' + groupName, + ":No such channel.") + + self.realm.getGroup(groupName).addCallbacks(cbGroup, ebGroup) + + + def irc_PART(self, prefix, params): + """Part message + + Parameters: *( "," ) [ ] + """ + try: + groupName = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOTONCHANNEL, params[0], + ":Could not decode your unicode!") + return + + if groupName.startswith('#'): + groupName = groupName[1:] + + if len(params) > 1: + reason = params[1].decode('utf-8') + else: + reason = None + + def cbGroup(group): + def cbLeave(result): + self.userLeft(group, self, reason) + return self.avatar.leave(group, reason).addCallback(cbLeave) + + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + self.sendMessage( + irc.ERR_NOTONCHANNEL, + '#' + groupName, + ":" + err.getErrorMessage()) + + self.realm.lookupGroup(groupName).addCallbacks(cbGroup, ebGroup) + + + def irc_NAMES(self, prefix, params): + """Names message + + Parameters: [ *( "," ) [ ] ] + """ + #<< NAMES #python + #>> :benford.openprojects.net 353 glyph = #python :Orban ... @glyph ... Zymurgy skreech + #>> :benford.openprojects.net 366 glyph #python :End of /NAMES list. + try: + channel = params[-1].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, params[-1], + ":No such channel (could not decode your unicode!)") + return + + if channel.startswith('#'): + channel = channel[1:] + + def cbGroup(group): + self.names( + self.name, + '#' + group.name, + [user.name for user in group.iterusers()]) + + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + # No group? Fine, no names! + self.names( + self.name, + '#' + channel, + []) + + self.realm.lookupGroup(channel).addCallbacks(cbGroup, ebGroup) + + + def irc_TOPIC(self, prefix, params): + """Topic message + + Parameters: [ ] + """ + try: + channel = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, + ":That channel doesn't exist (could not decode your unicode!)") + return + + if channel.startswith('#'): + channel = channel[1:] + + if len(params) > 1: + self._setTopic(channel, params[1]) + else: + self._getTopic(channel) + + + def _sendTopic(self, group): + """ + Send the topic of the given group to this user, if it has one. + """ + topic = group.meta.get("topic") + if topic: + author = group.meta.get("topic_author") or "" + date = group.meta.get("topic_date", 0) + self.topic(self.name, '#' + group.name, topic) + self.topicAuthor(self.name, '#' + group.name, author, date) + + + def _getTopic(self, channel): + #<< TOPIC #python + #>> :benford.openprojects.net 332 glyph #python : I really did. I sprained all my toes. + #>> :benford.openprojects.net 333 glyph #python itamar|nyc 994713482 + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, '=', channel, + ":That channel doesn't exist.") + + self.realm.lookupGroup(channel).addCallbacks(self._sendTopic, ebGroup) + + + def _setTopic(self, channel, topic): + #<< TOPIC #divunal :foo + #>> :glyph!glyph@adsl-64-123-27-108.dsl.austtx.swbell.net TOPIC #divunal :foo + + def cbGroup(group): + newMeta = group.meta.copy() + newMeta['topic'] = topic + newMeta['topic_author'] = self.name + newMeta['topic_date'] = int(time()) + + def ebSet(err): + self.sendMessage( + irc.ERR_CHANOPRIVSNEEDED, + "#" + group.name, + ":You need to be a channel operator to do that.") + + return group.setMetadata(newMeta).addErrback(ebSet) + + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, '=', channel, + ":That channel doesn't exist.") + + self.realm.lookupGroup(channel).addCallbacks(cbGroup, ebGroup) + + + def list(self, channels): + """Send a group of LIST response lines + + @type channel: C{list} of C{(str, int, str)} + @param channel: Information about the channels being sent: + their name, the number of participants, and their topic. + """ + for (name, size, topic) in channels: + self.sendMessage(irc.RPL_LIST, name, str(size), ":" + topic) + self.sendMessage(irc.RPL_LISTEND, ":End of /LIST") + + + def irc_LIST(self, prefix, params): + """List query + + Return information about the indicated channels, or about all + channels if none are specified. + + Parameters: [ *( "," ) [ ] ] + """ + #<< list #python + #>> :orwell.freenode.net 321 exarkun Channel :Users Name + #>> :orwell.freenode.net 322 exarkun #python 358 :The Python programming language + #>> :orwell.freenode.net 323 exarkun :End of /LIST + if params: + # Return information about indicated channels + try: + channels = params[0].decode(self.encoding).split(',') + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHCHANNEL, params[0], + ":No such channel (could not decode your unicode!)") + return + + groups = [] + for ch in channels: + if ch.startswith('#'): + ch = ch[1:] + groups.append(self.realm.lookupGroup(ch)) + + groups = defer.DeferredList(groups, consumeErrors=True) + groups.addCallback(lambda gs: [r for (s, r) in gs if s]) + else: + # Return information about all channels + groups = self.realm.itergroups() + + def cbGroups(groups): + def gotSize(size, group): + return group.name, size, group.meta.get('topic') + d = defer.DeferredList([ + group.size().addCallback(gotSize, group) for group in groups]) + d.addCallback(lambda results: self.list([r for (s, r) in results if s])) + return d + groups.addCallback(cbGroups) + + + def _channelWho(self, group): + self.who(self.name, '#' + group.name, + [(m.name, self.hostname, self.realm.name, m.name, "H", 0, m.name) for m in group.iterusers()]) + + + def _userWho(self, user): + self.sendMessage(irc.RPL_ENDOFWHO, + ":User /WHO not implemented") + + + def irc_WHO(self, prefix, params): + """Who query + + Parameters: [ [ "o" ] ] + """ + #<< who #python + #>> :x.opn 352 glyph #python aquarius pc-62-31-193-114-du.blueyonder.co.uk y.opn Aquarius H :3 Aquarius + # ... + #>> :x.opn 352 glyph #python foobar europa.tranquility.net z.opn skreech H :0 skreech + #>> :x.opn 315 glyph #python :End of /WHO list. + ### also + #<< who glyph + #>> :x.opn 352 glyph #python glyph adsl-64-123-27-108.dsl.austtx.swbell.net x.opn glyph H :0 glyph + #>> :x.opn 315 glyph glyph :End of /WHO list. + if not params: + self.sendMessage(irc.RPL_ENDOFWHO, ":/WHO not supported.") + return + + try: + channelOrUser = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.RPL_ENDOFWHO, params[0], + ":End of /WHO list (could not decode your unicode!)") + return + + if channelOrUser.startswith('#'): + def ebGroup(err): + err.trap(ewords.NoSuchGroup) + self.sendMessage( + irc.RPL_ENDOFWHO, channelOrUser, + ":End of /WHO list.") + d = self.realm.lookupGroup(channelOrUser[1:]) + d.addCallbacks(self._channelWho, ebGroup) + else: + def ebUser(err): + err.trap(ewords.NoSuchUser) + self.sendMessage( + irc.RPL_ENDOFWHO, channelOrUser, + ":End of /WHO list.") + d = self.realm.lookupUser(channelOrUser) + d.addCallbacks(self._userWho, ebUser) + + + + def irc_WHOIS(self, prefix, params): + """Whois query + + Parameters: [ ] *( "," ) + """ + def cbUser(user): + self.whois( + self.name, + user.name, user.name, self.realm.name, + user.name, self.realm.name, 'Hi mom!', False, + int(time() - user.lastMessage), user.signOn, + ['#' + group.name for group in user.itergroups()]) + + def ebUser(err): + err.trap(ewords.NoSuchUser) + self.sendMessage( + irc.ERR_NOSUCHNICK, + params[0], + ":No such nick/channel") + + try: + user = params[0].decode(self.encoding) + except UnicodeDecodeError: + self.sendMessage( + irc.ERR_NOSUCHNICK, + params[0], + ":No such nick/channel") + return + + self.realm.lookupUser(user).addCallbacks(cbUser, ebUser) + + + # Unsupported commands, here for legacy compatibility + def irc_OPER(self, prefix, params): + """Oper message + + Parameters: + """ + self.sendMessage(irc.ERR_NOOPERHOST, ":O-lines not applicable") + + +class IRCFactory(protocol.ServerFactory): + """ + IRC server that creates instances of the L{IRCUser} protocol. + + @ivar _serverInfo: A dictionary mapping: + "serviceName" to the name of the server, + "serviceVersion" to the copyright version, + "creationDate" to the time that the server was started. + """ + protocol = IRCUser + + def __init__(self, realm, portal): + self.realm = realm + self.portal = portal + self._serverInfo = { + "serviceName": self.realm.name, + "serviceVersion": copyright.version, + "creationDate": ctime() + } + + + +class PBMind(pb.Referenceable): + def __init__(self): + pass + + def jellyFor(self, jellier): + return reflect.qual(PBMind), jellier.invoker.registerReference(self) + + def remote_userJoined(self, user, group): + pass + + def remote_userLeft(self, user, group, reason): + pass + + def remote_receive(self, sender, recipient, message): + pass + + def remote_groupMetaUpdate(self, group, meta): + pass + + +class PBMindReference(pb.RemoteReference): + implements(iwords.IChatClient) + + def receive(self, sender, recipient, message): + if iwords.IGroup.providedBy(recipient): + rec = PBGroup(self.realm, self.avatar, recipient) + else: + rec = PBUser(self.realm, self.avatar, recipient) + return self.callRemote( + 'receive', + PBUser(self.realm, self.avatar, sender), + rec, + message) + + def groupMetaUpdate(self, group, meta): + return self.callRemote( + 'groupMetaUpdate', + PBGroup(self.realm, self.avatar, group), + meta) + + def userJoined(self, group, user): + return self.callRemote( + 'userJoined', + PBGroup(self.realm, self.avatar, group), + PBUser(self.realm, self.avatar, user)) + + def userLeft(self, group, user, reason=None): + assert reason is None or isinstance(reason, unicode) + return self.callRemote( + 'userLeft', + PBGroup(self.realm, self.avatar, group), + PBUser(self.realm, self.avatar, user), + reason) +pb.setUnjellyableForClass(PBMind, PBMindReference) + + +class PBGroup(pb.Referenceable): + def __init__(self, realm, avatar, group): + self.realm = realm + self.avatar = avatar + self.group = group + + + def processUniqueID(self): + return hash((self.realm.name, self.avatar.name, self.group.name)) + + + def jellyFor(self, jellier): + return reflect.qual(self.__class__), self.group.name.encode('utf-8'), jellier.invoker.registerReference(self) + + + def remote_leave(self, reason=None): + return self.avatar.leave(self.group, reason) + + + def remote_send(self, message): + return self.avatar.send(self.group, message) + + +class PBGroupReference(pb.RemoteReference): + implements(iwords.IGroup) + + def unjellyFor(self, unjellier, unjellyList): + clsName, name, ref = unjellyList + self.name = name.decode('utf-8') + return pb.RemoteReference.unjellyFor(self, unjellier, [clsName, ref]) + + def leave(self, reason=None): + return self.callRemote("leave", reason) + + def send(self, message): + return self.callRemote("send", message) +pb.setUnjellyableForClass(PBGroup, PBGroupReference) + +class PBUser(pb.Referenceable): + def __init__(self, realm, avatar, user): + self.realm = realm + self.avatar = avatar + self.user = user + + def processUniqueID(self): + return hash((self.realm.name, self.avatar.name, self.user.name)) + + +class ChatAvatar(pb.Referenceable): + implements(iwords.IChatClient) + + def __init__(self, avatar): + self.avatar = avatar + + + def jellyFor(self, jellier): + return reflect.qual(self.__class__), jellier.invoker.registerReference(self) + + + def remote_join(self, groupName): + assert isinstance(groupName, unicode) + def cbGroup(group): + def cbJoin(ignored): + return PBGroup(self.avatar.realm, self.avatar, group) + d = self.avatar.join(group) + d.addCallback(cbJoin) + return d + d = self.avatar.realm.getGroup(groupName) + d.addCallback(cbGroup) + return d +registerAdapter(ChatAvatar, iwords.IUser, pb.IPerspective) + +class AvatarReference(pb.RemoteReference): + def join(self, groupName): + return self.callRemote('join', groupName) + + def quit(self): + d = defer.Deferred() + self.broker.notifyOnDisconnect(lambda: d.callback(None)) + self.broker.transport.loseConnection() + return d + +pb.setUnjellyableForClass(ChatAvatar, AvatarReference) + + +class WordsRealm(object): + implements(portal.IRealm, iwords.IChatService) + + _encoding = 'utf-8' + + def __init__(self, name): + self.name = name + + + def userFactory(self, name): + return User(name) + + + def groupFactory(self, name): + return Group(name) + + + def logoutFactory(self, avatar, facet): + def logout(): + # XXX Deferred support here + getattr(facet, 'logout', lambda: None)() + avatar.realm = avatar.mind = None + return logout + + + def requestAvatar(self, avatarId, mind, *interfaces): + if isinstance(avatarId, str): + avatarId = avatarId.decode(self._encoding) + + def gotAvatar(avatar): + if avatar.realm is not None: + raise ewords.AlreadyLoggedIn() + for iface in interfaces: + facet = iface(avatar, None) + if facet is not None: + avatar.loggedIn(self, mind) + mind.name = avatarId + mind.realm = self + mind.avatar = avatar + return iface, facet, self.logoutFactory(avatar, facet) + raise NotImplementedError(self, interfaces) + + return self.getUser(avatarId).addCallback(gotAvatar) + + + # IChatService, mostly. + createGroupOnRequest = False + createUserOnRequest = True + + def lookupUser(self, name): + raise NotImplementedError + + + def lookupGroup(self, group): + raise NotImplementedError + + + def addUser(self, user): + """Add the given user to this service. + + This is an internal method intented to be overridden by + L{WordsRealm} subclasses, not called by external code. + + @type user: L{IUser} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with C{None} when the user is + added, or which fails with + L{twisted.words.ewords.DuplicateUser} if a user with the + same name exists already. + """ + raise NotImplementedError + + + def addGroup(self, group): + """Add the given group to this service. + + @type group: L{IGroup} + + @rtype: L{twisted.internet.defer.Deferred} + @return: A Deferred which fires with C{None} when the group is + added, or which fails with + L{twisted.words.ewords.DuplicateGroup} if a group with the + same name exists already. + """ + raise NotImplementedError + + + def getGroup(self, name): + assert isinstance(name, unicode) + if self.createGroupOnRequest: + def ebGroup(err): + err.trap(ewords.DuplicateGroup) + return self.lookupGroup(name) + return self.createGroup(name).addErrback(ebGroup) + return self.lookupGroup(name) + + + def getUser(self, name): + assert isinstance(name, unicode) + if self.createUserOnRequest: + def ebUser(err): + err.trap(ewords.DuplicateUser) + return self.lookupUser(name) + return self.createUser(name).addErrback(ebUser) + return self.lookupUser(name) + + + def createUser(self, name): + assert isinstance(name, unicode) + def cbLookup(user): + return failure.Failure(ewords.DuplicateUser(name)) + def ebLookup(err): + err.trap(ewords.NoSuchUser) + return self.userFactory(name) + + name = name.lower() + d = self.lookupUser(name) + d.addCallbacks(cbLookup, ebLookup) + d.addCallback(self.addUser) + return d + + + def createGroup(self, name): + assert isinstance(name, unicode) + def cbLookup(group): + return failure.Failure(ewords.DuplicateGroup(name)) + def ebLookup(err): + err.trap(ewords.NoSuchGroup) + return self.groupFactory(name) + + name = name.lower() + d = self.lookupGroup(name) + d.addCallbacks(cbLookup, ebLookup) + d.addCallback(self.addGroup) + return d + + +class InMemoryWordsRealm(WordsRealm): + def __init__(self, *a, **kw): + super(InMemoryWordsRealm, self).__init__(*a, **kw) + self.users = {} + self.groups = {} + + + def itergroups(self): + return defer.succeed(self.groups.itervalues()) + + + def addUser(self, user): + if user.name in self.users: + return defer.fail(failure.Failure(ewords.DuplicateUser())) + self.users[user.name] = user + return defer.succeed(user) + + + def addGroup(self, group): + if group.name in self.groups: + return defer.fail(failure.Failure(ewords.DuplicateGroup())) + self.groups[group.name] = group + return defer.succeed(group) + + + def lookupUser(self, name): + assert isinstance(name, unicode) + name = name.lower() + try: + user = self.users[name] + except KeyError: + return defer.fail(failure.Failure(ewords.NoSuchUser(name))) + else: + return defer.succeed(user) + + + def lookupGroup(self, name): + assert isinstance(name, unicode) + name = name.lower() + try: + group = self.groups[name] + except KeyError: + return defer.fail(failure.Failure(ewords.NoSuchGroup(name))) + else: + return defer.succeed(group) + +__all__ = [ + 'Group', 'User', + + 'WordsRealm', 'InMemoryWordsRealm', + ] diff --git a/vendor/Twisted-10.0.0/twisted/words/tap.py b/vendor/Twisted-10.0.0/twisted/words/tap.py new file mode 100644 index 000000000000..2a6656ee03b2 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/tap.py @@ -0,0 +1,72 @@ +# -*- test-case-name: twisted.words.test.test_tap -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. +""" +Shiny new words service maker +""" + +import sys, socket + +from twisted.application import strports +from twisted.application.service import MultiService +from twisted.python import usage +from twisted import plugin + +from twisted.words import iwords, service +from twisted.cred import checkers, credentials, portal, strcred + +class Options(usage.Options, strcred.AuthOptionMixin): + supportedInterfaces = [credentials.IUsernamePassword] + optParameters = [ + ('hostname', None, socket.gethostname(), + 'Name of this server; purely an informative')] + + interfacePlugins = {} + plg = None + for plg in plugin.getPlugins(iwords.IProtocolPlugin): + assert plg.name not in interfacePlugins + interfacePlugins[plg.name] = plg + optParameters.append(( + plg.name + '-port', + None, None, + 'strports description of the port to bind for the ' + plg.name + ' server')) + del plg + + def __init__(self, *a, **kw): + usage.Options.__init__(self, *a, **kw) + self['groups'] = [] + + def opt_group(self, name): + """Specify a group which should exist + """ + self['groups'].append(name.decode(sys.stdin.encoding)) + + def opt_passwd(self, filename): + """ + Name of a passwd-style file. (This is for + backwards-compatibility only; you should use the --auth + command instead.) + """ + self.addChecker(checkers.FilePasswordDB(filename)) + +def makeService(config): + credCheckers = config.get('credCheckers', []) + wordsRealm = service.InMemoryWordsRealm(config['hostname']) + wordsPortal = portal.Portal(wordsRealm, credCheckers) + + msvc = MultiService() + + # XXX Attribute lookup on config is kind of bad - hrm. + for plgName in config.interfacePlugins: + port = config.get(plgName + '-port') + if port is not None: + factory = config.interfacePlugins[plgName].getFactory(wordsRealm, wordsPortal) + svc = strports.service(port, factory) + svc.setServiceParent(msvc) + + # This is bogus. createGroup is async. makeService must be + # allowed to return a Deferred or some crap. + for g in config['groups']: + wordsRealm.createGroup(g) + + return msvc diff --git a/vendor/Twisted-10.0.0/twisted/words/test/__init__.py b/vendor/Twisted-10.0.0/twisted/words/test/__init__.py new file mode 100644 index 000000000000..d599f20dc7cd --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/__init__.py @@ -0,0 +1 @@ +"Words tests" diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_basesupport.py b/vendor/Twisted-10.0.0/twisted/words/test/test_basesupport.py new file mode 100644 index 000000000000..00b852b391b7 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_basesupport.py @@ -0,0 +1,97 @@ +# Copyright (c) 2001-2006 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.trial import unittest +from twisted.words.im import basesupport +from twisted.internet import error, defer + +class DummyAccount(basesupport.AbstractAccount): + """ + An account object that will do nothing when asked to start to log on. + """ + + loginHasFailed = False + loginCallbackCalled = False + + def _startLogOn(self, *args): + """ + Set self.loginDeferred to the same as the deferred returned, allowing a + testcase to .callback or .errback. + + @return: A deferred. + """ + self.loginDeferred = defer.Deferred() + return self.loginDeferred + + def _loginFailed(self, result): + self.loginHasFailed = True + return basesupport.AbstractAccount._loginFailed(self, result) + + def _cb_logOn(self, result): + self.loginCallbackCalled = True + return basesupport.AbstractAccount._cb_logOn(self, result) + +class DummyUI(object): + """ + Provide just the interface required to be passed to AbstractAccount.logOn. + """ + clientRegistered = False + + def registerAccountClient(self, result): + self.clientRegistered = True + +class ClientMsgTests(unittest.TestCase): + def makeUI(self): + return DummyUI() + + def makeAccount(self): + return DummyAccount('la', False, 'la', None, 'localhost', 6667) + + def test_connect(self): + """ + Test that account.logOn works, and it calls the right callback when a + connection is established. + """ + account = self.makeAccount() + ui = self.makeUI() + d = account.logOn(ui) + account.loginDeferred.callback(None) + + def check(result): + self.assert_(not account.loginHasFailed, + "Login shouldn't have failed") + self.assert_(account.loginCallbackCalled, + "We should be logged in") + d.addCallback(check) + return d + + def test_failedConnect(self): + """ + Test that account.logOn works, and it calls the right callback when a + connection is established. + """ + account = self.makeAccount() + ui = self.makeUI() + d = account.logOn(ui) + account.loginDeferred.errback(Exception()) + + def err(reason): + self.assert_(account.loginHasFailed, "Login should have failed") + self.assert_(not account.loginCallbackCalled, + "We shouldn't be logged in") + self.assert_(not ui.clientRegistered, + "Client shouldn't be registered in the UI") + cb = lambda r: self.assert_(False, "Shouldn't get called back") + d.addCallbacks(cb, err) + return d + + def test_alreadyConnecting(self): + """ + Test that it can fail sensibly when someone tried to connect before + we did. + """ + account = self.makeAccount() + ui = self.makeUI() + account.logOn(ui) + self.assertRaises(error.ConnectError, account.logOn, ui) + diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_domish.py b/vendor/Twisted-10.0.0/twisted/words/test/test_domish.py new file mode 100644 index 000000000000..fcff3ee36b2c --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_domish.py @@ -0,0 +1,421 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.xish.domish}, a DOM-like library for XMPP. +""" + +from twisted.trial import unittest +from twisted.words.xish import domish + + +class DomishTestCase(unittest.TestCase): + def testEscaping(self): + s = "&<>'\"" + self.assertEquals(domish.escapeToXml(s), "&<>'\"") + self.assertEquals(domish.escapeToXml(s, 1), "&<>'"") + + def testNamespaceObject(self): + ns = domish.Namespace("testns") + self.assertEquals(ns.foo, ("testns", "foo")) + + def testElementInit(self): + e = domish.Element((None, "foo")) + self.assertEquals(e.name, "foo") + self.assertEquals(e.uri, None) + self.assertEquals(e.defaultUri, None) + self.assertEquals(e.parent, None) + + e = domish.Element(("", "foo")) + self.assertEquals(e.name, "foo") + self.assertEquals(e.uri, "") + self.assertEquals(e.defaultUri, "") + self.assertEquals(e.parent, None) + + e = domish.Element(("testns", "foo")) + self.assertEquals(e.name, "foo") + self.assertEquals(e.uri, "testns") + self.assertEquals(e.defaultUri, "testns") + self.assertEquals(e.parent, None) + + e = domish.Element(("testns", "foo"), "test2ns") + self.assertEquals(e.name, "foo") + self.assertEquals(e.uri, "testns") + self.assertEquals(e.defaultUri, "test2ns") + + def testChildOps(self): + e = domish.Element(("testns", "foo")) + e.addContent("somecontent") + b2 = e.addElement(("testns2", "bar2")) + e["attrib1"] = "value1" + e[("testns2", "attrib2")] = "value2" + e.addElement("bar") + e.addElement("bar") + e.addContent("abc") + e.addContent("123") + + # Check content merging + self.assertEquals(e.children[-1], "abc123") + + # Check str()/content extraction + self.assertEquals(str(e), "somecontent") + + # Check direct child accessor + self.assertEquals(e.bar2, b2) + e.bar2.addContent("subcontent") + e.bar2["bar2value"] = "somevalue" + + # Check child ops + self.assertEquals(e.children[1], e.bar2) + self.assertEquals(e.children[2], e.bar) + + # Check attribute ops + self.assertEquals(e["attrib1"], "value1") + del e["attrib1"] + self.assertEquals(e.hasAttribute("attrib1"), 0) + self.assertEquals(e.hasAttribute("attrib2"), 0) + self.assertEquals(e[("testns2", "attrib2")], "value2") + + + def test_elements(self): + """ + Calling C{elements} without arguments on a L{domish.Element} returns + all child elements, whatever the qualfied name. + """ + e = domish.Element((u"testns", u"foo")) + c1 = e.addElement(u"name") + c2 = e.addElement((u"testns2", u"baz")) + c3 = e.addElement(u"quux") + c4 = e.addElement((u"testns", u"name")) + + elts = list(e.elements()) + + self.assertIn(c1, elts) + self.assertIn(c2, elts) + self.assertIn(c3, elts) + self.assertIn(c4, elts) + + + def test_elementsWithQN(self): + """ + Calling C{elements} with a namespace and local name on a + L{domish.Element} returns all child elements with that qualified name. + """ + e = domish.Element((u"testns", u"foo")) + c1 = e.addElement(u"name") + c2 = e.addElement((u"testns2", u"baz")) + c3 = e.addElement(u"quux") + c4 = e.addElement((u"testns", u"name")) + + elts = list(e.elements(u"testns", u"name")) + + self.assertIn(c1, elts) + self.assertNotIn(c2, elts) + self.assertNotIn(c3, elts) + self.assertIn(c4, elts) + + + +class DomishStreamTestsMixin: + """ + Mixin defining tests for different stream implementations. + + @ivar streamClass: A no-argument callable which will be used to create an + XML parser which can produce a stream of elements from incremental + input. + """ + def setUp(self): + self.doc_started = False + self.doc_ended = False + self.root = None + self.elements = [] + self.stream = self.streamClass() + self.stream.DocumentStartEvent = self._docStarted + self.stream.ElementEvent = self.elements.append + self.stream.DocumentEndEvent = self._docEnded + + def _docStarted(self, root): + self.root = root + self.doc_started = True + + def _docEnded(self): + self.doc_ended = True + + def doTest(self, xml): + self.stream.parse(xml) + + def testHarness(self): + xml = "" + self.stream.parse(xml) + self.assertEquals(self.doc_started, True) + self.assertEquals(self.root.name, 'root') + self.assertEquals(self.elements[0].name, 'child') + self.assertEquals(self.elements[1].name, 'child2') + self.assertEquals(self.doc_ended, True) + + def testBasic(self): + xml = "\n" + \ + " " + \ + " some&data>" + \ + " " + \ + "" + + self.stream.parse(xml) + self.assertEquals(self.root.name, 'stream') + self.assertEquals(self.root.uri, 'etherx') + self.assertEquals(self.elements[0].name, 'message') + self.assertEquals(self.elements[0].uri, 'jabber') + self.assertEquals(self.elements[0]['to'], 'bar') + self.assertEquals(self.elements[0].x.uri, 'xdelay') + self.assertEquals(unicode(self.elements[0].x), 'some&data>') + + def testNoRootNS(self): + xml = "" + + self.stream.parse(xml) + self.assertEquals(self.root.uri, '') + self.assertEquals(self.elements[0].uri, 'etherx') + + def testNoDefaultNS(self): + xml = """" + + self.stream.parse(xml) + self.assertEquals(self.root.uri, 'etherx') + self.assertEquals(self.root.defaultUri, '') + self.assertEquals(self.elements[0].uri, '') + self.assertEquals(self.elements[0].defaultUri, '') + + def testChildDefaultNS(self): + xml = "" + + self.stream.parse(xml) + self.assertEquals(self.root.uri, 'testns') + self.assertEquals(self.elements[0].uri, 'testns') + + def testEmptyChildNS(self): + xml = "" + + self.stream.parse(xml) + self.assertEquals(self.elements[0].child2.uri, '') + + def testChildPrefix(self): + xml = "" + + self.stream.parse(xml) + self.assertEquals(self.root.localPrefixes['foo'], 'testns2') + self.assertEquals(self.elements[0].uri, 'testns2') + + def testUnclosedElement(self): + self.assertRaises(domish.ParserError, self.stream.parse, + "") + + def test_namespaceReuse(self): + """ + Test that reuse of namespaces does affect an element's serialization. + + When one element uses a prefix for a certain namespace, this is + stored in the C{localPrefixes} attribute of the element. We want + to make sure that elements created after such use, won't have this + prefix end up in their C{localPrefixes} attribute, too. + """ + + xml = """ + + + """ + + self.stream.parse(xml) + self.assertEquals('child1', self.elements[0].name) + self.assertEquals('testns', self.elements[0].uri) + self.assertEquals('', self.elements[0].defaultUri) + self.assertEquals({'foo': 'testns'}, self.elements[0].localPrefixes) + self.assertEquals('child2', self.elements[1].name) + self.assertEquals('testns', self.elements[1].uri) + self.assertEquals('testns', self.elements[1].defaultUri) + self.assertEquals({}, self.elements[1].localPrefixes) + + + +class DomishExpatStreamTestCase(DomishStreamTestsMixin, unittest.TestCase): + """ + Tests for L{domish.ExpatElementStream}, the expat-based element stream + implementation. + """ + streamClass = domish.ExpatElementStream + + try: + import pyexpat + except ImportError: + skip = "pyexpat is required for ExpatElementStream tests." + + + +class DomishSuxStreamTestCase(DomishStreamTestsMixin, unittest.TestCase): + """ + Tests for L{domish.SuxElementStream}, the L{twisted.web.sux}-based element + stream implementation. + """ + streamClass = domish.SuxElementStream + + if domish.SuxElementStream is None: + skip = "twisted.web is required for SuxElementStream tests." + + + +class SerializerTests(unittest.TestCase): + def testNoNamespace(self): + e = domish.Element((None, "foo")) + self.assertEquals(e.toXml(), "") + self.assertEquals(e.toXml(closeElement = 0), "") + + def testDefaultNamespace(self): + e = domish.Element(("testns", "foo")) + self.assertEquals(e.toXml(), "") + + def testOtherNamespace(self): + e = domish.Element(("testns", "foo"), "testns2") + self.assertEquals(e.toXml({'testns': 'bar'}), + "") + + def testChildDefaultNamespace(self): + e = domish.Element(("testns", "foo")) + e.addElement("bar") + self.assertEquals(e.toXml(), "") + + def testChildSameNamespace(self): + e = domish.Element(("testns", "foo")) + e.addElement(("testns", "bar")) + self.assertEquals(e.toXml(), "") + + def testChildSameDefaultNamespace(self): + e = domish.Element(("testns", "foo")) + e.addElement("bar", "testns") + self.assertEquals(e.toXml(), "") + + def testChildOtherDefaultNamespace(self): + e = domish.Element(("testns", "foo")) + e.addElement(("testns2", "bar"), 'testns2') + self.assertEquals(e.toXml(), "") + + def testOnlyChildDefaultNamespace(self): + e = domish.Element((None, "foo")) + e.addElement(("ns2", "bar"), 'ns2') + self.assertEquals(e.toXml(), "") + + def testOnlyChildDefaultNamespace2(self): + e = domish.Element((None, "foo")) + e.addElement("bar") + self.assertEquals(e.toXml(), "") + + def testChildInDefaultNamespace(self): + e = domish.Element(("testns", "foo"), "testns2") + e.addElement(("testns2", "bar")) + self.assertEquals(e.toXml(), "") + + def testQualifiedAttribute(self): + e = domish.Element((None, "foo"), + attribs = {("testns2", "bar"): "baz"}) + self.assertEquals(e.toXml(), "") + + def testQualifiedAttributeDefaultNS(self): + e = domish.Element(("testns", "foo"), + attribs = {("testns", "bar"): "baz"}) + self.assertEquals(e.toXml(), "") + + def testTwoChilds(self): + e = domish.Element(('', "foo")) + child1 = e.addElement(("testns", "bar"), "testns2") + child1.addElement(('testns2', 'quux')) + child2 = e.addElement(("testns3", "baz"), "testns4") + child2.addElement(('testns', 'quux')) + self.assertEquals(e.toXml(), "") + + def testXMLNamespace(self): + e = domish.Element((None, "foo"), + attribs = {("http://www.w3.org/XML/1998/namespace", + "lang"): "en_US"}) + self.assertEquals(e.toXml(), "") + + def testQualifiedAttributeGivenListOfPrefixes(self): + e = domish.Element((None, "foo"), + attribs = {("testns2", "bar"): "baz"}) + self.assertEquals(e.toXml({"testns2": "qux"}), + "") + + def testNSPrefix(self): + e = domish.Element((None, "foo"), + attribs = {("testns2", "bar"): "baz"}) + c = e.addElement(("testns2", "qux")) + c[("testns2", "bar")] = "quux" + + self.assertEquals(e.toXml(), "") + + def testDefaultNSPrefix(self): + e = domish.Element((None, "foo"), + attribs = {("testns2", "bar"): "baz"}) + c = e.addElement(("testns2", "qux")) + c[("testns2", "bar")] = "quux" + c.addElement('foo') + + self.assertEquals(e.toXml(), "") + + def testPrefixScope(self): + e = domish.Element(('testns', 'foo')) + + self.assertEquals(e.toXml(prefixes={'testns': 'bar'}, + prefixesInScope=['bar']), + "") + + def testLocalPrefixes(self): + e = domish.Element(('testns', 'foo'), localPrefixes={'bar': 'testns'}) + self.assertEquals(e.toXml(), "") + + def testLocalPrefixesWithChild(self): + e = domish.Element(('testns', 'foo'), localPrefixes={'bar': 'testns'}) + e.addElement('baz') + self.assertIdentical(e.baz.defaultUri, None) + self.assertEquals(e.toXml(), "") + + def test_prefixesReuse(self): + """ + Test that prefixes passed to serialization are not modified. + + This test makes sure that passing a dictionary of prefixes repeatedly + to C{toXml} of elements does not cause serialization errors. A + previous implementation changed the passed in dictionary internally, + causing havoc later on. + """ + prefixes = {'testns': 'foo'} + + # test passing of dictionary + s = domish.SerializerClass(prefixes=prefixes) + self.assertNotIdentical(prefixes, s.prefixes) + + # test proper serialization on prefixes reuse + e = domish.Element(('testns2', 'foo'), + localPrefixes={'quux': 'testns2'}) + self.assertEquals("", + e.toXml(prefixes=prefixes)) + e = domish.Element(('testns2', 'foo')) + self.assertEquals("", + e.toXml(prefixes=prefixes)) + + def testRawXMLSerialization(self): + e = domish.Element((None, "foo")) + e.addRawXml("") + # The testcase below should NOT generate valid XML -- that's + # the whole point of using the raw XML call -- it's the callers + # responsiblity to ensure that the data inserted is valid + self.assertEquals(e.toXml(), "") + + def testRawXMLWithUnicodeSerialization(self): + e = domish.Element((None, "foo")) + e.addRawXml(u"\u00B0") + self.assertEquals(e.toXml(), u"\u00B0") + + def testUnicodeSerialization(self): + e = domish.Element((None, "foo")) + e["test"] = u"my value\u0221e" + e.addContent(u"A degree symbol...\u00B0") + self.assertEquals(e.toXml(), + u"A degree symbol...\u00B0") diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_irc.py b/vendor/Twisted-10.0.0/twisted/words/test/test_irc.py new file mode 100644 index 000000000000..7a62ef85f2d0 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_irc.py @@ -0,0 +1,1566 @@ +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.irc}. +""" + +import time + +from twisted.trial import unittest +from twisted.trial.unittest import TestCase +from twisted.words.protocols import irc +from twisted.words.protocols.irc import IRCClient +from twisted.internet import protocol +from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing + + + +class ModeParsingTests(unittest.TestCase): + """ + Tests for L{twisted.words.protocols.irc.parseModes}. + """ + paramModes = ('klb', 'b') + + + def test_emptyModes(self): + """ + Parsing an empty mode string raises L{irc.IRCBadModes}. + """ + self.assertRaises(irc.IRCBadModes, irc.parseModes, '', []) + + + def test_emptyModeSequence(self): + """ + Parsing a mode string that contains an empty sequence (either a C{+} or + C{-} followed directly by another C{+} or C{-}, or not followed by + anything at all) raises L{irc.IRCBadModes}. + """ + self.assertRaises(irc.IRCBadModes, irc.parseModes, '++k', []) + self.assertRaises(irc.IRCBadModes, irc.parseModes, '-+k', []) + self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', []) + self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', []) + + + def test_malformedModes(self): + """ + Parsing a mode string that does not start with C{+} or C{-} raises + L{irc.IRCBadModes}. + """ + self.assertRaises(irc.IRCBadModes, irc.parseModes, 'foo', []) + self.assertRaises(irc.IRCBadModes, irc.parseModes, '%', []) + + + def test_nullModes(self): + """ + Parsing a mode string that contains no mode characters raises + L{irc.IRCBadModes}. + """ + self.assertRaises(irc.IRCBadModes, irc.parseModes, '+', []) + self.assertRaises(irc.IRCBadModes, irc.parseModes, '-', []) + + + def test_singleMode(self): + """ + Parsing a single mode setting with no parameters results in that mode, + with no parameters, in the "added" direction and no modes in the + "removed" direction. + """ + added, removed = irc.parseModes('+s', []) + self.assertEquals(added, [('s', None)]) + self.assertEquals(removed, []) + + added, removed = irc.parseModes('-s', []) + self.assertEquals(added, []) + self.assertEquals(removed, [('s', None)]) + + + def test_singleDirection(self): + """ + Parsing a single-direction mode setting with multiple modes and no + parameters, results in all modes falling into the same direction group. + """ + added, removed = irc.parseModes('+stn', []) + self.assertEquals(added, [('s', None), + ('t', None), + ('n', None)]) + self.assertEquals(removed, []) + + added, removed = irc.parseModes('-nt', []) + self.assertEquals(added, []) + self.assertEquals(removed, [('n', None), + ('t', None)]) + + + def test_multiDirection(self): + """ + Parsing a multi-direction mode setting with no parameters. + """ + added, removed = irc.parseModes('+s-n+ti', []) + self.assertEquals(added, [('s', None), + ('t', None), + ('i', None)]) + self.assertEquals(removed, [('n', None)]) + + + def test_consecutiveDirection(self): + """ + Parsing a multi-direction mode setting containing two consecutive mode + sequences with the same direction results in the same result as if + there were only one mode sequence in the same direction. + """ + added, removed = irc.parseModes('+sn+ti', []) + self.assertEquals(added, [('s', None), + ('n', None), + ('t', None), + ('i', None)]) + self.assertEquals(removed, []) + + + def test_mismatchedParams(self): + """ + If the number of mode parameters does not match the number of modes + expecting parameters, L{irc.IRCBadModes} is raised. + """ + self.assertRaises(irc.IRCBadModes, + irc.parseModes, + '+k', [], + self.paramModes) + self.assertRaises(irc.IRCBadModes, + irc.parseModes, + '+kl', ['foo', '10', 'lulz_extra_param'], + self.paramModes) + + + def test_parameters(self): + """ + Modes which require parameters are parsed and paired with their relevant + parameter, modes which do not require parameters do not consume any of + the parameters. + """ + added, removed = irc.parseModes( + '+klbb', + ['somekey', '42', 'nick!user@host', 'other!*@*'], + self.paramModes) + self.assertEquals(added, [('k', 'somekey'), + ('l', '42'), + ('b', 'nick!user@host'), + ('b', 'other!*@*')]) + self.assertEquals(removed, []) + + added, removed = irc.parseModes( + '-klbb', + ['nick!user@host', 'other!*@*'], + self.paramModes) + self.assertEquals(added, []) + self.assertEquals(removed, [('k', None), + ('l', None), + ('b', 'nick!user@host'), + ('b', 'other!*@*')]) + + # Mix a no-argument mode in with argument modes. + added, removed = irc.parseModes( + '+knbb', + ['somekey', 'nick!user@host', 'other!*@*'], + self.paramModes) + self.assertEquals(added, [('k', 'somekey'), + ('n', None), + ('b', 'nick!user@host'), + ('b', 'other!*@*')]) + self.assertEquals(removed, []) + + + +stringSubjects = [ + "Hello, this is a nice string with no complications.", + "xargs%(NUL)smight%(NUL)slike%(NUL)sthis" % {'NUL': irc.NUL }, + "embedded%(CR)snewline%(CR)s%(NL)sFUN%(NL)s" % {'CR': irc.CR, + 'NL': irc.NL}, + "escape!%(X)s escape!%(M)s %(X)s%(X)sa %(M)s0" % {'X': irc.X_QUOTE, + 'M': irc.M_QUOTE} + ] + + +class QuotingTest(unittest.TestCase): + def test_lowquoteSanity(self): + """Testing client-server level quote/dequote""" + for s in stringSubjects: + self.failUnlessEqual(s, irc.lowDequote(irc.lowQuote(s))) + + + def test_ctcpquoteSanity(self): + """Testing CTCP message level quote/dequote""" + for s in stringSubjects: + self.failUnlessEqual(s, irc.ctcpDequote(irc.ctcpQuote(s))) + + + +class Dispatcher(irc._CommandDispatcherMixin): + """ + A dispatcher that exposes one known command and handles unknown commands. + """ + prefix = 'disp' + + def disp_working(self, a, b): + """ + A known command that returns its input. + """ + return a, b + + + def disp_unknown(self, name, a, b): + """ + Handle unknown commands by returning their name and inputs. + """ + return name, a, b + + + +class DispatcherTests(unittest.TestCase): + """ + Tests for L{irc._CommandDispatcherMixin}. + """ + def test_dispatch(self): + """ + Dispatching a command invokes the correct handler. + """ + disp = Dispatcher() + args = (1, 2) + res = disp.dispatch('working', *args) + self.assertEquals(res, args) + + + def test_dispatchUnknown(self): + """ + Dispatching an unknown command invokes the default handler. + """ + disp = Dispatcher() + name = 'missing' + args = (1, 2) + res = disp.dispatch(name, *args) + self.assertEquals(res, (name,) + args) + + + def test_dispatchMissingUnknown(self): + """ + Dispatching an unknown command, when no default handler is present, + results in an exception being raised. + """ + disp = Dispatcher() + disp.disp_unknown = None + self.assertRaises(irc.UnhandledCommand, disp.dispatch, 'bar') + + + +class ServerSupportedFeatureTests(unittest.TestCase): + """ + Tests for L{ServerSupportedFeatures} and related functions. + """ + def test_intOrDefault(self): + """ + L{_intOrDefault} converts values to C{int} if possible, otherwise + returns a default value. + """ + self.assertEquals(irc._intOrDefault(None), None) + self.assertEquals(irc._intOrDefault([]), None) + self.assertEquals(irc._intOrDefault(''), None) + self.assertEquals(irc._intOrDefault('hello', 5), 5) + self.assertEquals(irc._intOrDefault('123'), 123) + self.assertEquals(irc._intOrDefault(123), 123) + + + def test_splitParam(self): + """ + L{ServerSupportedFeatures._splitParam} splits ISUPPORT parameters + into key and values. Parameters without a separator are split into a + key and a list containing only the empty string. Escaped parameters + are unescaped. + """ + params = [('FOO', ('FOO', [''])), + ('FOO=', ('FOO', [''])), + ('FOO=1', ('FOO', ['1'])), + ('FOO=1,2,3', ('FOO', ['1', '2', '3'])), + ('FOO=A\\x20B', ('FOO', ['A B'])), + ('FOO=\\x5Cx', ('FOO', ['\\x'])), + ('FOO=\\', ('FOO', ['\\'])), + ('FOO=\\n', ('FOO', ['\\n']))] + + _splitParam = irc.ServerSupportedFeatures._splitParam + + for param, expected in params: + res = _splitParam(param) + self.assertEquals(res, expected) + + self.assertRaises(ValueError, _splitParam, 'FOO=\\x') + self.assertRaises(ValueError, _splitParam, 'FOO=\\xNN') + self.assertRaises(ValueError, _splitParam, 'FOO=\\xN') + self.assertRaises(ValueError, _splitParam, 'FOO=\\x20\\x') + + + def test_splitParamArgs(self): + """ + L{ServerSupportedFeatures._splitParamArgs} splits ISUPPORT parameter + arguments into key and value. Arguments without a separator are + split into a key and an empty string. + """ + res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C:', 'D']) + self.assertEquals(res, [('A', '1'), + ('B', '2'), + ('C', ''), + ('D', '')]) + + + def test_splitParamArgsProcessor(self): + """ + L{ServerSupportedFeatures._splitParamArgs} uses the argument processor + passed to to convert ISUPPORT argument values to some more suitable + form. + """ + res = irc.ServerSupportedFeatures._splitParamArgs(['A:1', 'B:2', 'C'], + irc._intOrDefault) + self.assertEquals(res, [('A', 1), + ('B', 2), + ('C', None)]) + + + def test_parsePrefixParam(self): + """ + L{ServerSupportedFeatures._parsePrefixParam} parses the ISUPPORT PREFIX + parameter into a mapping from modes to prefix symbols, returns + C{None} if there is no parseable prefix parameter or raises + C{ValueError} if the prefix parameter is malformed. + """ + _parsePrefixParam = irc.ServerSupportedFeatures._parsePrefixParam + self.assertEquals(_parsePrefixParam(''), None) + self.assertRaises(ValueError, _parsePrefixParam, 'hello') + self.assertEquals(_parsePrefixParam('(ov)@+'), + {'o': ('@', 0), + 'v': ('+', 1)}) + + + def test_parseChanModesParam(self): + """ + L{ServerSupportedFeatures._parseChanModesParam} parses the ISUPPORT + CHANMODES parameter into a mapping from mode categories to mode + characters. Passing fewer than 4 parameters results in the empty string + for the relevant categories. Passing more than 4 parameters raises + C{ValueError}. + """ + _parseChanModesParam = irc.ServerSupportedFeatures._parseChanModesParam + self.assertEquals( + _parseChanModesParam([]), + {'addressModes': '', + 'param': '', + 'setParam': '', + 'noParam': ''}) + + self.assertEquals( + _parseChanModesParam(['b', 'k', 'l', 'imnpst']), + {'addressModes': 'b', + 'param': 'k', + 'setParam': 'l', + 'noParam': 'imnpst'}) + + self.assertEquals( + _parseChanModesParam(['b', 'k', 'l']), + {'addressModes': 'b', + 'param': 'k', + 'setParam': 'l', + 'noParam': ''}) + + self.assertRaises( + ValueError, + _parseChanModesParam, ['a', 'b', 'c', 'd', 'e']) + + + def test_parse(self): + """ + L{ServerSupportedFeatures.parse} changes the internal state of the + instance to reflect the features indicated by the parsed ISUPPORT + parameters, including unknown parameters and unsetting previously set + parameters. + """ + supported = irc.ServerSupportedFeatures() + supported.parse(['MODES=4', + 'CHANLIMIT=#:20,&:10', + 'INVEX', + 'EXCEPTS=Z', + 'UNKNOWN=A,B,C']) + + self.assertEquals(supported.getFeature('MODES'), 4) + self.assertEquals(supported.getFeature('CHANLIMIT'), + [('#', 20), + ('&', 10)]) + self.assertEquals(supported.getFeature('INVEX'), 'I') + self.assertEquals(supported.getFeature('EXCEPTS'), 'Z') + self.assertEquals(supported.getFeature('UNKNOWN'), ('A', 'B', 'C')) + + self.assertTrue(supported.hasFeature('INVEX')) + supported.parse(['-INVEX']) + self.assertFalse(supported.hasFeature('INVEX')) + # Unsetting a previously unset parameter should not be a problem. + supported.parse(['-INVEX']) + + + def _parse(self, features): + """ + Parse all specified features according to the ISUPPORT specifications. + + @type features: C{list} of C{(featureName, value)} + @param features: Feature names and values to parse + + @rtype: L{irc.ServerSupportedFeatures} + """ + supported = irc.ServerSupportedFeatures() + features = ['%s=%s' % (name, value or '') + for name, value in features] + supported.parse(features) + return supported + + + def _parseFeature(self, name, value=None): + """ + Parse a feature, with the given name and value, according to the + ISUPPORT specifications and return the parsed value. + """ + supported = self._parse([(name, value)]) + return supported.getFeature(name) + + + def _testIntOrDefaultFeature(self, name, default=None): + """ + Perform some common tests on a feature known to use L{_intOrDefault}. + """ + self.assertEquals( + self._parseFeature(name, None), + default) + self.assertEquals( + self._parseFeature(name, 'notanint'), + default) + self.assertEquals( + self._parseFeature(name, '42'), + 42) + + + def _testFeatureDefault(self, name, features=None): + """ + Features known to have default values are reported as being present by + L{irc.ServerSupportedFeatures.hasFeature}, and their value defaults + correctly, when they don't appear in an ISUPPORT message. + """ + default = irc.ServerSupportedFeatures()._features[name] + + if features is None: + features = [('DEFINITELY_NOT', 'a_feature')] + + supported = self._parse(features) + self.assertTrue(supported.hasFeature(name)) + self.assertEquals(supported.getFeature(name), default) + + + def test_support_CHANMODES(self): + """ + The CHANMODES ISUPPORT parameter is parsed into a C{dict} giving the + four mode categories, C{'addressModes'}, C{'param'}, C{'setParam'}, and + C{'noParam'}. + """ + self._testFeatureDefault('CHANMODES') + self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,')]) + self._testFeatureDefault('CHANMODES', [('CHANMODES', 'b,,lk,ha,ha')]) + + self.assertEquals( + self._parseFeature('CHANMODES', ''), + {'addressModes': '', + 'param': '', + 'setParam': '', + 'noParam': ''}) + + self.assertEquals( + self._parseFeature('CHANMODES', ',A'), + {'addressModes': '', + 'param': 'A', + 'setParam': '', + 'noParam': ''}) + + self.assertEquals( + self._parseFeature('CHANMODES', 'A,Bc,Def,Ghij'), + {'addressModes': 'A', + 'param': 'Bc', + 'setParam': 'Def', + 'noParam': 'Ghij'}) + + + def test_support_IDCHAN(self): + """ + The IDCHAN support parameter is parsed into a sequence of two-tuples + giving channel prefix and ID length pairs. + """ + self.assertEquals( + self._parseFeature('IDCHAN', '!:5'), + [('!', '5')]) + + + def test_support_MAXLIST(self): + """ + The MAXLIST support parameter is parsed into a sequence of two-tuples + giving modes and their limits. + """ + self.assertEquals( + self._parseFeature('MAXLIST', 'b:25,eI:50'), + [('b', 25), ('eI', 50)]) + # A non-integer parameter argument results in None. + self.assertEquals( + self._parseFeature('MAXLIST', 'b:25,eI:50,a:3.1415'), + [('b', 25), ('eI', 50), ('a', None)]) + self.assertEquals( + self._parseFeature('MAXLIST', 'b:25,eI:50,a:notanint'), + [('b', 25), ('eI', 50), ('a', None)]) + + + def test_support_NETWORK(self): + """ + The NETWORK support parameter is parsed as the network name, as + specified by the server. + """ + self.assertEquals( + self._parseFeature('NETWORK', 'IRCNet'), + 'IRCNet') + + + def test_support_SAFELIST(self): + """ + The SAFELIST support parameter is parsed into a boolean indicating + whether the safe "list" command is supported or not. + """ + self.assertEquals( + self._parseFeature('SAFELIST'), + True) + + + def test_support_STATUSMSG(self): + """ + The STATUSMSG support parameter is parsed into a string of channel + status that support the exclusive channel notice method. + """ + self.assertEquals( + self._parseFeature('STATUSMSG', '@+'), + '@+') + + + def test_support_TARGMAX(self): + """ + The TARGMAX support parameter is parsed into a dictionary, mapping + strings to integers, of the maximum number of targets for a particular + command. + """ + self.assertEquals( + self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3'), + {'PRIVMSG': 4, + 'NOTICE': 3}) + # A non-integer parameter argument results in None. + self.assertEquals( + self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:3.1415'), + {'PRIVMSG': 4, + 'NOTICE': 3, + 'KICK': None}) + self.assertEquals( + self._parseFeature('TARGMAX', 'PRIVMSG:4,NOTICE:3,KICK:notanint'), + {'PRIVMSG': 4, + 'NOTICE': 3, + 'KICK': None}) + + + def test_support_NICKLEN(self): + """ + The NICKLEN support parameter is parsed into an integer value + indicating the maximum length of a nickname the client may use, + otherwise, if the parameter is missing or invalid, the default value + (as specified by RFC 1459) is used. + """ + default = irc.ServerSupportedFeatures()._features['NICKLEN'] + self._testIntOrDefaultFeature('NICKLEN', default) + + + def test_support_CHANNELLEN(self): + """ + The CHANNELLEN support parameter is parsed into an integer value + indicating the maximum channel name length, otherwise, if the + parameter is missing or invalid, the default value (as specified by + RFC 1459) is used. + """ + default = irc.ServerSupportedFeatures()._features['CHANNELLEN'] + self._testIntOrDefaultFeature('CHANNELLEN', default) + + + def test_support_CHANTYPES(self): + """ + The CHANTYPES support parameter is parsed into a tuple of + valid channel prefix characters. + """ + self._testFeatureDefault('CHANTYPES') + + self.assertEquals( + self._parseFeature('CHANTYPES', '#&%'), + ('#', '&', '%')) + + + def test_support_KICKLEN(self): + """ + The KICKLEN support parameter is parsed into an integer value + indicating the maximum length of a kick message a client may use. + """ + self._testIntOrDefaultFeature('KICKLEN') + + + def test_support_PREFIX(self): + """ + The PREFIX support parameter is parsed into a dictionary mapping + modes to two-tuples of status symbol and priority. + """ + self._testFeatureDefault('PREFIX') + self._testFeatureDefault('PREFIX', [('PREFIX', 'hello')]) + + self.assertEquals( + self._parseFeature('PREFIX', None), + None) + self.assertEquals( + self._parseFeature('PREFIX', '(ohv)@%+'), + {'o': ('@', 0), + 'h': ('%', 1), + 'v': ('+', 2)}) + self.assertEquals( + self._parseFeature('PREFIX', '(hov)@%+'), + {'o': ('%', 1), + 'h': ('@', 0), + 'v': ('+', 2)}) + + + def test_support_TOPICLEN(self): + """ + The TOPICLEN support parameter is parsed into an integer value + indicating the maximum length of a topic a client may set. + """ + self._testIntOrDefaultFeature('TOPICLEN') + + + def test_support_MODES(self): + """ + The MODES support parameter is parsed into an integer value + indicating the maximum number of "variable" modes (defined as being + modes from C{addressModes}, C{param} or C{setParam} categories for + the C{CHANMODES} ISUPPORT parameter) which may by set on a channel + by a single MODE command from a client. + """ + self._testIntOrDefaultFeature('MODES') + + + def test_support_EXCEPTS(self): + """ + The EXCEPTS support parameter is parsed into the mode character + to be used for "ban exception" modes. If no parameter is specified + then the character C{e} is assumed. + """ + self.assertEquals( + self._parseFeature('EXCEPTS', 'Z'), + 'Z') + self.assertEquals( + self._parseFeature('EXCEPTS'), + 'e') + + + def test_support_INVEX(self): + """ + The INVEX support parameter is parsed into the mode character to be + used for "invite exception" modes. If no parameter is specified then + the character C{I} is assumed. + """ + self.assertEquals( + self._parseFeature('INVEX', 'Z'), + 'Z') + self.assertEquals( + self._parseFeature('INVEX'), + 'I') + + + +class IRCClientWithoutLogin(irc.IRCClient): + performLogin = 0 + + +class CTCPTest(unittest.TestCase): + def setUp(self): + self.file = StringIOWithoutClosing() + self.transport = protocol.FileWrapper(self.file) + self.client = IRCClientWithoutLogin() + self.client.makeConnection(self.transport) + + + def test_ERRMSG(self): + """Testing CTCP query ERRMSG. + + Not because this is this is an especially important case in the + field, but it does go through the entire dispatch/decode/encode + process. + """ + + errQuery = (":nick!guy@over.there PRIVMSG #theChan :" + "%(X)cERRMSG t%(X)c%(EOL)s" + % {'X': irc.X_DELIM, + 'EOL': irc.CR + irc.LF}) + + errReply = ("NOTICE nick :%(X)cERRMSG t :" + "No error has occoured.%(X)c%(EOL)s" + % {'X': irc.X_DELIM, + 'EOL': irc.CR + irc.LF}) + + self.client.dataReceived(errQuery) + reply = self.file.getvalue() + + self.failUnlessEqual(errReply, reply) + + + def test_noNumbersVERSION(self): + """ + If attributes for version information on L{IRCClient} are set to + C{None}, the parts of the CTCP VERSION response they correspond to + are omitted. + """ + self.client.versionName = "FrobozzIRC" + self.client.ctcpQuery_VERSION("nick!guy@over.there", "#theChan", None) + versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s::" + "%(X)c%(EOL)s" + % {'X': irc.X_DELIM, + 'EOL': irc.CR + irc.LF, + 'vname': self.client.versionName}) + reply = self.file.getvalue() + self.assertEquals(versionReply, reply) + + + def test_fullVERSION(self): + """ + The response to a CTCP VERSION query includes the version number and + environment information, as specified by L{IRCClient.versionNum} and + L{IRCClient.versionEnv}. + """ + self.client.versionName = "FrobozzIRC" + self.client.versionNum = "1.2g" + self.client.versionEnv = "ZorkOS" + self.client.ctcpQuery_VERSION("nick!guy@over.there", "#theChan", None) + versionReply = ("NOTICE nick :%(X)cVERSION %(vname)s:%(vnum)s:%(venv)s" + "%(X)c%(EOL)s" + % {'X': irc.X_DELIM, + 'EOL': irc.CR + irc.LF, + 'vname': self.client.versionName, + 'vnum': self.client.versionNum, + 'venv': self.client.versionEnv}) + reply = self.file.getvalue() + self.assertEquals(versionReply, reply) + + + def tearDown(self): + self.transport.loseConnection() + self.client.connectionLost() + del self.client + del self.transport + +class NoticingClient(IRCClientWithoutLogin, object): + methods = { + 'created': ('when',), + 'yourHost': ('info',), + 'myInfo': ('servername', 'version', 'umodes', 'cmodes'), + 'luserClient': ('info',), + 'bounce': ('info',), + 'isupport': ('options',), + 'luserChannels': ('channels',), + 'luserOp': ('ops',), + 'luserMe': ('info',), + 'receivedMOTD': ('motd',), + + 'privmsg': ('user', 'channel', 'message'), + 'joined': ('channel',), + 'left': ('channel',), + 'noticed': ('user', 'channel', 'message'), + 'modeChanged': ('user', 'channel', 'set', 'modes', 'args'), + 'pong': ('user', 'secs'), + 'signedOn': (), + 'kickedFrom': ('channel', 'kicker', 'message'), + 'nickChanged': ('nick',), + + 'userJoined': ('user', 'channel'), + 'userLeft': ('user', 'channel'), + 'userKicked': ('user', 'channel', 'kicker', 'message'), + 'action': ('user', 'channel', 'data'), + 'topicUpdated': ('user', 'channel', 'newTopic'), + 'userRenamed': ('oldname', 'newname')} + + + def __init__(self, *a, **kw): + # It is important that IRCClient.__init__ is not called since + # traditionally it did not exist, so it is important that nothing is + # initialised there that would prevent subclasses that did not (or + # could not) invoke the base implementation. Any protocol + # initialisation should happen in connectionMode. + self.calls = [] + + + def __getattribute__(self, name): + if name.startswith('__') and name.endswith('__'): + return super(NoticingClient, self).__getattribute__(name) + try: + args = super(NoticingClient, self).__getattribute__('methods')[name] + except KeyError: + return super(NoticingClient, self).__getattribute__(name) + else: + return self.makeMethod(name, args) + + + def makeMethod(self, fname, args): + def method(*a, **kw): + if len(a) > len(args): + raise TypeError("TypeError: %s() takes %d arguments " + "(%d given)" % (fname, len(args), len(a))) + for (name, value) in zip(args, a): + if name in kw: + raise TypeError("TypeError: %s() got multiple values " + "for keyword argument '%s'" % (fname, name)) + else: + kw[name] = value + if len(kw) != len(args): + raise TypeError("TypeError: %s() takes %d arguments " + "(%d given)" % (fname, len(args), len(a))) + self.calls.append((fname, kw)) + return method + + +def pop(dict, key, default): + try: + value = dict[key] + except KeyError: + return default + else: + del dict[key] + return value + +class ClientImplementationTests(unittest.TestCase): + def setUp(self): + self.file = StringIOWithoutClosing() + self.transport = protocol.FileWrapper(self.file) + self.client = NoticingClient() + self.client.makeConnection(self.transport) + + + def tearDown(self): + self.transport.loseConnection() + self.client.connectionLost() + del self.client + del self.transport + + + def _serverTestImpl(self, code, msg, func, **kw): + host = pop(kw, 'host', 'server.host') + nick = pop(kw, 'nick', 'nickname') + args = pop(kw, 'args', '') + + message = (":" + + host + " " + + code + " " + + nick + " " + + args + " :" + + msg + "\r\n") + + self.client.dataReceived(message) + self.assertEquals( + self.client.calls, + [(func, kw)]) + + + def testYourHost(self): + msg = "Your host is some.host[blah.blah/6667], running version server-version-3" + self._serverTestImpl("002", msg, "yourHost", info=msg) + + + def testCreated(self): + msg = "This server was cobbled together Fri Aug 13 18:00:25 UTC 2004" + self._serverTestImpl("003", msg, "created", when=msg) + + + def testMyInfo(self): + msg = "server.host server-version abcDEF bcdEHI" + self._serverTestImpl("004", msg, "myInfo", + servername="server.host", + version="server-version", + umodes="abcDEF", + cmodes="bcdEHI") + + + def testLuserClient(self): + msg = "There are 9227 victims and 9542 hiding on 24 servers" + self._serverTestImpl("251", msg, "luserClient", + info=msg) + + + def _sendISUPPORT(self): + args = ("MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10 HOSTLEN=63 " + "TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23 CHANTYPES=# " + "PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer") + msg = "are available on this server" + self._serverTestImpl("005", msg, "isupport", args=args, + options=['MODES=4', + 'CHANLIMIT=#:20', + 'NICKLEN=16', + 'USERLEN=10', + 'HOSTLEN=63', + 'TOPICLEN=450', + 'KICKLEN=450', + 'CHANNELLEN=30', + 'KEYLEN=23', + 'CHANTYPES=#', + 'PREFIX=(ov)@+', + 'CASEMAPPING=ascii', + 'CAPAB', + 'IRCD=dancer']) + + + def test_ISUPPORT(self): + """ + The client parses ISUPPORT messages sent by the server and calls + L{IRCClient.isupport}. + """ + self._sendISUPPORT() + + + def testBounce(self): + msg = "Try server some.host, port 321" + self._serverTestImpl("010", msg, "bounce", + info=msg) + + + def testLuserChannels(self): + args = "7116" + msg = "channels formed" + self._serverTestImpl("254", msg, "luserChannels", args=args, + channels=int(args)) + + + def testLuserOp(self): + args = "34" + msg = "flagged staff members" + self._serverTestImpl("252", msg, "luserOp", args=args, + ops=int(args)) + + + def testLuserMe(self): + msg = "I have 1937 clients and 0 servers" + self._serverTestImpl("255", msg, "luserMe", + info=msg) + + + def test_receivedMOTD(self): + """ + Lines received in I{RPL_MOTDSTART} and I{RPL_MOTD} are delivered to + L{IRCClient.receivedMOTD} when I{RPL_ENDOFMOTD} is received. + """ + lines = [ + ":host.name 375 nickname :- host.name Message of the Day -", + ":host.name 372 nickname :- Welcome to host.name", + ":host.name 376 nickname :End of /MOTD command."] + for L in lines: + self.assertEquals(self.client.calls, []) + self.client.dataReceived(L + '\r\n') + + self.assertEquals( + self.client.calls, + [("receivedMOTD", {"motd": ["host.name Message of the Day -", "Welcome to host.name"]})]) + + # After the motd is delivered, the tracking variable should be + # reset. + self.assertIdentical(self.client.motd, None) + + + def test_withoutMOTDSTART(self): + """ + If L{IRCClient} receives I{RPL_MOTD} and I{RPL_ENDOFMOTD} without + receiving I{RPL_MOTDSTART}, L{IRCClient.receivedMOTD} is still + called with a list of MOTD lines. + """ + lines = [ + ":host.name 372 nickname :- Welcome to host.name", + ":host.name 376 nickname :End of /MOTD command."] + + for L in lines: + self.client.dataReceived(L + '\r\n') + + self.assertEquals( + self.client.calls, + [("receivedMOTD", {"motd": ["Welcome to host.name"]})]) + + + def _clientTestImpl(self, sender, group, type, msg, func, **kw): + ident = pop(kw, 'ident', 'ident') + host = pop(kw, 'host', 'host') + + wholeUser = sender + '!' + ident + '@' + host + message = (":" + + wholeUser + " " + + type + " " + + group + " :" + + msg + "\r\n") + self.client.dataReceived(message) + self.assertEquals( + self.client.calls, + [(func, kw)]) + self.client.calls = [] + + + def testPrivmsg(self): + msg = "Tooty toot toot." + self._clientTestImpl("sender", "#group", "PRIVMSG", msg, "privmsg", + ident="ident", host="host", + # Expected results below + user="sender!ident@host", + channel="#group", + message=msg) + + self._clientTestImpl("sender", "recipient", "PRIVMSG", msg, "privmsg", + ident="ident", host="host", + # Expected results below + user="sender!ident@host", + channel="recipient", + message=msg) + + + def test_getChannelModeParams(self): + """ + L{IRCClient.getChannelModeParams} uses ISUPPORT information, either + given by the server or defaults, to determine which channel modes + require arguments when being added or removed. + """ + add, remove = map(sorted, self.client.getChannelModeParams()) + self.assertEquals(add, ['b', 'h', 'k', 'l', 'o', 'v']) + self.assertEquals(remove, ['b', 'h', 'o', 'v']) + + def removeFeature(name): + name = '-' + name + msg = "are available on this server" + self._serverTestImpl( + '005', msg, 'isupport', args=name, options=[name]) + self.assertIdentical( + self.client.supported.getFeature(name), None) + self.client.calls = [] + + # Remove CHANMODES feature, causing getFeature('CHANMODES') to return + # None. + removeFeature('CHANMODES') + add, remove = map(sorted, self.client.getChannelModeParams()) + self.assertEquals(add, ['h', 'o', 'v']) + self.assertEquals(remove, ['h', 'o', 'v']) + + # Remove PREFIX feature, causing getFeature('PREFIX') to return None. + removeFeature('PREFIX') + add, remove = map(sorted, self.client.getChannelModeParams()) + self.assertEquals(add, []) + self.assertEquals(remove, []) + + # Restore ISUPPORT features. + self._sendISUPPORT() + self.assertNotIdentical( + self.client.supported.getFeature('PREFIX'), None) + + + def test_getUserModeParams(self): + """ + L{IRCClient.getUserModeParams} returns a list of user modes (modes that + the user sets on themself, outside of channel modes) that require + parameters when added and removed, respectively. + """ + add, remove = map(sorted, self.client.getUserModeParams()) + self.assertEquals(add, []) + self.assertEquals(remove, []) + + + def _sendModeChange(self, msg, args='', target=None): + """ + Build a MODE string and send it to the client. + """ + if target is None: + target = '#chan' + message = ":Wolf!~wolf@yok.utu.fi MODE %s %s %s\r\n" % ( + target, msg, args) + self.client.dataReceived(message) + + + def _parseModeChange(self, results, target=None): + """ + Parse the results, do some test and return the data to check. + """ + if target is None: + target = '#chan' + + for n, result in enumerate(results): + method, data = result + self.assertEquals(method, 'modeChanged') + self.assertEquals(data['user'], 'Wolf!~wolf@yok.utu.fi') + self.assertEquals(data['channel'], target) + results[n] = tuple([data[key] for key in ('set', 'modes', 'args')]) + return results + + + def _checkModeChange(self, expected, target=None): + """ + Compare the expected result with the one returned by the client. + """ + result = self._parseModeChange(self.client.calls, target) + self.assertEquals(result, expected) + self.client.calls = [] + + + def test_modeMissingDirection(self): + """ + Mode strings that do not begin with a directional character, C{'+'} or + C{'-'}, have C{'+'} automatically prepended. + """ + self._sendModeChange('s') + self._checkModeChange([(True, 's', (None,))]) + + + def test_noModeParameters(self): + """ + No parameters are passed to L{IRCClient.modeChanged} for modes that + don't take any parameters. + """ + self._sendModeChange('-s') + self._checkModeChange([(False, 's', (None,))]) + self._sendModeChange('+n') + self._checkModeChange([(True, 'n', (None,))]) + + + def test_oneModeParameter(self): + """ + Parameters are passed to L{IRCClient.modeChanged} for modes that take + parameters. + """ + self._sendModeChange('+o', 'a_user') + self._checkModeChange([(True, 'o', ('a_user',))]) + self._sendModeChange('-o', 'a_user') + self._checkModeChange([(False, 'o', ('a_user',))]) + + + def test_mixedModes(self): + """ + Mixing adding and removing modes that do and don't take parameters + invokes L{IRCClient.modeChanged} with mode characters and parameters + that match up. + """ + self._sendModeChange('+osv', 'a_user another_user') + self._checkModeChange([(True, 'osv', ('a_user', None, 'another_user'))]) + self._sendModeChange('+v-os', 'a_user another_user') + self._checkModeChange([(True, 'v', ('a_user',)), + (False, 'os', ('another_user', None))]) + + + def test_tooManyModeParameters(self): + """ + Passing an argument to modes that take no parameters results in + L{IRCClient.modeChanged} not being called and an error being logged. + """ + self._sendModeChange('+s', 'wrong') + self._checkModeChange([]) + errors = self.flushLoggedErrors(irc.IRCBadModes) + self.assertEquals(len(errors), 1) + self.assertSubstring( + 'Too many parameters', errors[0].getErrorMessage()) + + + def test_tooFewModeParameters(self): + """ + Passing no arguments to modes that do take parameters results in + L{IRCClient.modeChange} not being called and an error being logged. + """ + self._sendModeChange('+o') + self._checkModeChange([]) + errors = self.flushLoggedErrors(irc.IRCBadModes) + self.assertEquals(len(errors), 1) + self.assertSubstring( + 'Not enough parameters', errors[0].getErrorMessage()) + + + def test_userMode(self): + """ + A C{MODE} message whose target is our user (the nickname of our user, + to be precise), as opposed to a channel, will be parsed according to + the modes specified by L{IRCClient.getUserModeParams}. + """ + target = self.client.nickname + # Mode "o" on channels is supposed to take a parameter, but since this + # is not a channel this will not cause an exception. + self._sendModeChange('+o', target=target) + self._checkModeChange([(True, 'o', (None,))], target=target) + + def getUserModeParams(): + return ['Z', ''] + + # Introduce our own user mode that takes an argument. + self.patch(self.client, 'getUserModeParams', getUserModeParams) + + self._sendModeChange('+Z', 'an_arg', target=target) + self._checkModeChange([(True, 'Z', ('an_arg',))], target=target) + + + +class BasicServerFunctionalityTestCase(unittest.TestCase): + def setUp(self): + self.f = StringIOWithoutClosing() + self.t = protocol.FileWrapper(self.f) + self.p = irc.IRC() + self.p.makeConnection(self.t) + + + def check(self, s): + self.assertEquals(self.f.getvalue(), s) + + + def testPrivmsg(self): + self.p.privmsg("this-is-sender", "this-is-recip", "this is message") + self.check(":this-is-sender PRIVMSG this-is-recip :this is message\r\n") + + + def testNotice(self): + self.p.notice("this-is-sender", "this-is-recip", "this is notice") + self.check(":this-is-sender NOTICE this-is-recip :this is notice\r\n") + + + def testAction(self): + self.p.action("this-is-sender", "this-is-recip", "this is action") + self.check(":this-is-sender ACTION this-is-recip :this is action\r\n") + + + def testJoin(self): + self.p.join("this-person", "#this-channel") + self.check(":this-person JOIN #this-channel\r\n") + + + def testPart(self): + self.p.part("this-person", "#that-channel") + self.check(":this-person PART #that-channel\r\n") + + + def testWhois(self): + """ + Verify that a whois by the client receives the right protocol actions + from the server. + """ + timestamp = int(time.time()-100) + hostname = self.p.hostname + req = 'requesting-nick' + targ = 'target-nick' + self.p.whois(req, targ, 'target', 'host.com', + 'Target User', 'irc.host.com', 'A fake server', False, + 12, timestamp, ['#fakeusers', '#fakemisc']) + expected = '\r\n'.join([ +':%(hostname)s 311 %(req)s %(targ)s target host.com * :Target User', +':%(hostname)s 312 %(req)s %(targ)s irc.host.com :A fake server', +':%(hostname)s 317 %(req)s %(targ)s 12 %(timestamp)s :seconds idle, signon time', +':%(hostname)s 319 %(req)s %(targ)s :#fakeusers #fakemisc', +':%(hostname)s 318 %(req)s %(targ)s :End of WHOIS list.', +'']) % dict(hostname=hostname, timestamp=timestamp, req=req, targ=targ) + self.check(expected) + + +class DummyClient(irc.IRCClient): + def __init__(self): + self.lines = [] + def sendLine(self, m): + self.lines.append(m) + + +class ClientMsgTests(unittest.TestCase): + def setUp(self): + self.client = DummyClient() + + + def testSingleLine(self): + self.client.msg('foo', 'bar') + self.assertEquals(self.client.lines, ['PRIVMSG foo :bar']) + + + def testDodgyMaxLength(self): + self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 0) + self.assertRaises(ValueError, self.client.msg, 'foo', 'bar', 3) + + + def testMultipleLine(self): + maxLen = len('PRIVMSG foo :') + 3 + 2 # 2 for line endings + self.client.msg('foo', 'barbazbo', maxLen) + self.assertEquals(self.client.lines, ['PRIVMSG foo :bar', + 'PRIVMSG foo :baz', + 'PRIVMSG foo :bo']) + + + def testSufficientWidth(self): + msg = 'barbazbo' + maxLen = len('PRIVMSG foo :%s' % (msg,)) + 2 + self.client.msg('foo', msg, maxLen) + self.assertEquals(self.client.lines, ['PRIVMSG foo :%s' % (msg,)]) + self.client.lines = [] + self.client.msg('foo', msg, maxLen-1) + self.assertEquals(2, len(self.client.lines)) + self.client.lines = [] + self.client.msg('foo', msg, maxLen+1) + self.assertEquals(1, len(self.client.lines)) + + + def testSplitSanity(self): + # Whiteboxing + self.assertRaises(ValueError, irc.split, 'foo', -1) + self.assertRaises(ValueError, irc.split, 'foo', 0) + self.assertEquals([], irc.split('', 1)) + self.assertEquals([], irc.split('')) + + + def test_splitDelimiters(self): + """ + Test that split() skips any delimiter (space or newline) that it finds + at the very beginning of the string segment it is operating on. + Nothing should be added to the output list because of it. + """ + r = irc.split("xx yyz", 2) + self.assertEquals(['xx', 'yy', 'z'], r) + r = irc.split("xx\nyyz", 2) + self.assertEquals(['xx', 'yy', 'z'], r) + + +class ClientTests(TestCase): + """ + Tests for the protocol-level behavior of IRCClient methods intended to + be called by application code. + """ + def setUp(self): + """ + Create and connect a new L{IRCClient} to a new L{StringTransport}. + """ + self.transport = StringTransport() + self.protocol = IRCClient() + self.protocol.performLogin = False + self.protocol.makeConnection(self.transport) + + # Sanity check - we don't want anything to have happened at this + # point, since we're not in a test yet. + self.assertEquals(self.transport.value(), "") + + + def getLastLine(self, transport): + """ + Return the last IRC message in the transport buffer. + """ + return transport.value().split('\r\n')[-2] + + + def test_away(self): + """ + L{IRCCLient.away} sends an AWAY command with the specified message. + """ + message = "Sorry, I'm not here." + self.protocol.away(message) + expected = [ + 'AWAY :%s' % (message,), + '', + ] + self.assertEquals(self.transport.value().split('\r\n'), expected) + + + def test_back(self): + """ + L{IRCClient.back} sends an AWAY command with an empty message. + """ + self.protocol.back() + expected = [ + 'AWAY :', + '', + ] + self.assertEquals(self.transport.value().split('\r\n'), expected) + + + def test_whois(self): + """ + L{IRCClient.whois} sends a WHOIS message. + """ + self.protocol.whois('alice') + self.assertEquals( + self.transport.value().split('\r\n'), + ['WHOIS alice', '']) + + + def test_whoisWithServer(self): + """ + L{IRCClient.whois} sends a WHOIS message with a server name if a + value is passed for the C{server} parameter. + """ + self.protocol.whois('alice', 'example.org') + self.assertEquals( + self.transport.value().split('\r\n'), + ['WHOIS example.org alice', '']) + + + def test_register(self): + """ + L{IRCClient.register} sends NICK and USER commands with the + username, name, hostname, server name, and real name specified. + """ + username = 'testuser' + hostname = 'testhost' + servername = 'testserver' + self.protocol.realname = 'testname' + self.protocol.password = None + self.protocol.register(username, hostname, servername) + expected = [ + 'NICK %s' % (username,), + 'USER %s %s %s :%s' % ( + username, hostname, servername, self.protocol.realname), + ''] + self.assertEquals(self.transport.value().split('\r\n'), expected) + + + def test_registerWithPassword(self): + """ + If the C{password} attribute of L{IRCClient} is not C{None}, the + C{register} method also sends a PASS command with it as the + argument. + """ + username = 'testuser' + hostname = 'testhost' + servername = 'testserver' + self.protocol.realname = 'testname' + self.protocol.password = 'testpass' + self.protocol.register(username, hostname, servername) + expected = [ + 'PASS %s' % (self.protocol.password,), + 'NICK %s' % (username,), + 'USER %s %s %s :%s' % ( + username, hostname, servername, self.protocol.realname), + ''] + self.assertEquals(self.transport.value().split('\r\n'), expected) + + + def test_registerWithTakenNick(self): + """ + Verify that the client repeats the L{IRCClient.setNick} method with a + new value when presented with an C{ERR_NICKNAMEINUSE} while trying to + register. + """ + username = 'testuser' + hostname = 'testhost' + servername = 'testserver' + self.protocol.realname = 'testname' + self.protocol.password = 'testpass' + self.protocol.register(username, hostname, servername) + self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param']) + lastLine = self.getLastLine(self.transport) + self.assertNotEquals(lastLine, 'NICK %s' % (username,)) + + # Keep chaining underscores for each collision + self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param']) + lastLine = self.getLastLine(self.transport) + self.assertEquals(lastLine, 'NICK %s' % (username + '__',)) + + + def test_overrideAlterCollidedNick(self): + """ + L{IRCClient.alterCollidedNick} determines how a nickname is altered upon + collision while a user is trying to change to that nickname. + """ + nick = 'foo' + self.protocol.alterCollidedNick = lambda nick: nick + '***' + self.protocol.register(nick) + self.protocol.irc_ERR_NICKNAMEINUSE('prefix', ['param']) + lastLine = self.getLastLine(self.transport) + self.assertEquals( + lastLine, 'NICK %s' % (nick + '***',)) + + + def test_nickChange(self): + """ + When a NICK command is sent after signon, C{IRCClient.nickname} is set + to the new nickname I{after} the server sends an acknowledgement. + """ + oldnick = 'foo' + newnick = 'bar' + self.protocol.register(oldnick) + self.protocol.irc_RPL_WELCOME('prefix', ['param']) + self.protocol.setNick(newnick) + self.assertEquals(self.protocol.nickname, oldnick) + self.protocol.irc_NICK('%s!quux@qux' % (oldnick,), [newnick]) + self.assertEquals(self.protocol.nickname, newnick) + + + def test_erroneousNick(self): + """ + Trying to register an illegal nickname results in the default legal + nickname being set, and trying to change a nickname to an illegal + nickname results in the old nickname being kept. + """ + # Registration case: change illegal nickname to erroneousNickFallback + badnick = 'foo' + self.assertEquals(self.protocol._registered, False) + self.protocol.register(badnick) + self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param']) + lastLine = self.getLastLine(self.transport) + self.assertEquals( + lastLine, 'NICK %s' % (self.protocol.erroneousNickFallback,)) + self.protocol.irc_RPL_WELCOME('prefix', ['param']) + self.assertEquals(self.protocol._registered, True) + self.protocol.setNick(self.protocol.erroneousNickFallback) + self.assertEquals( + self.protocol.nickname, self.protocol.erroneousNickFallback) + + # Illegal nick change attempt after registration. Fall back to the old + # nickname instead of erroneousNickFallback. + oldnick = self.protocol.nickname + self.protocol.setNick(badnick) + self.protocol.irc_ERR_ERRONEUSNICKNAME('prefix', ['param']) + lastLine = self.getLastLine(self.transport) + self.assertEquals( + lastLine, 'NICK %s' % (badnick,)) + self.assertEquals(self.protocol.nickname, oldnick) + + + def test_describe(self): + """ + L{IRCClient.desrcibe} sends a CTCP ACTION message to the target + specified. + """ + target = 'foo' + channel = '#bar' + action = 'waves' + self.protocol.describe(target, action) + self.protocol.describe(channel, action) + expected = [ + 'PRIVMSG %s :\01ACTION %s\01' % (target, action), + 'PRIVMSG %s :\01ACTION %s\01' % (channel, action), + ''] + self.assertEquals(self.transport.value().split('\r\n'), expected) + + + def test_me(self): + """ + L{IRCClient.me} sends a CTCP ACTION message to the target channel + specified. + If the target does not begin with a standard channel prefix, + '#' is prepended. + """ + target = 'foo' + channel = '#bar' + action = 'waves' + self.protocol.me(target, action) + self.protocol.me(channel, action) + expected = [ + 'PRIVMSG %s :\01ACTION %s\01' % ('#' + target, action), + 'PRIVMSG %s :\01ACTION %s\01' % (channel, action), + ''] + self.assertEquals(self.transport.value().split('\r\n'), expected) + warnings = self.flushWarnings( + offendingFunctions=[self.test_me]) + self.assertEquals( + warnings[0]['message'], + "me() is deprecated since Twisted 9.0. Use IRCClient.describe().") + self.assertEquals(warnings[0]['category'], DeprecationWarning) + self.assertEquals(len(warnings), 2) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_irc_service.py b/vendor/Twisted-10.0.0/twisted/words/test/test_irc_service.py new file mode 100644 index 000000000000..13e809ee6eca --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_irc_service.py @@ -0,0 +1,110 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for IRC portions of L{twisted.words.service}. +""" + +from twisted.trial import unittest +from twisted.test import proto_helpers +from twisted.words.service import InMemoryWordsRealm, IRCFactory +from twisted.words.protocols import irc +from twisted.cred import checkers, portal + +class IRCUserTestCase(unittest.TestCase): + """ + Isolated tests for L{IRCUser} + """ + + def setUp(self): + """ + Sets up a Realm, Portal, Factory, IRCUser, Transport, and Connection + for our tests. + """ + self.wordsRealm = InMemoryWordsRealm("example.com") + self.portal = portal.Portal(self.wordsRealm, + [checkers.InMemoryUsernamePasswordDatabaseDontUse(john="pass")]) + self.factory = IRCFactory(self.wordsRealm, self.portal) + self.ircUser = self.factory.buildProtocol(None) + self.stringTransport = proto_helpers.StringTransport() + self.ircUser.makeConnection(self.stringTransport) + + + def test_sendMessage(self): + """ + Sending a message to a user after they have sent NICK, but before they + have authenticated, results in a message from "example.com". + """ + self.ircUser.irc_NICK("", ["mynick"]) + self.stringTransport.clear() + self.ircUser.sendMessage("foo") + self.assertEquals(":example.com foo mynick\r\n", + self.stringTransport.value()) + + + def response(self): + """ + Grabs our responses and then clears the transport + """ + response = self.ircUser.transport.value().splitlines() + self.ircUser.transport.clear() + return map(irc.parsemsg, response) + + + def scanResponse(self, response, messageType): + """ + Gets messages out of a response + + @param response: The parsed IRC messages of the response, as returned + by L{IRCServiceTestCase.response} + + @param messageType: The string type of the desired messages. + + @return: An iterator which yields 2-tuples of C{(index, ircMessage)} + """ + for n, message in enumerate(response): + if (message[1] == messageType): + yield n, message + + + def test_sendNickSendsGreeting(self): + """ + Receiving NICK without authenticating sends the MOTD Start and MOTD End + messages, which is required by certain popular IRC clients (such as + Pidgin) before a connection is considered to be fully established. + """ + self.ircUser.irc_NICK("", ["mynick"]) + response = self.response() + start = list(self.scanResponse(response, irc.RPL_MOTDSTART)) + end = list(self.scanResponse(response, irc.RPL_ENDOFMOTD)) + self.assertEquals(start, + [(0, ('example.com', '375', ['mynick', '- example.com Message of the Day - ']))]) + self.assertEquals(end, + [(1, ('example.com', '376', ['mynick', 'End of /MOTD command.']))]) + + + def test_fullLogin(self): + """ + Receiving USER, PASS, NICK will log in the user, and transmit the + appropriate response messages. + """ + self.ircUser.irc_USER("", ["john doe"]) + self.ircUser.irc_PASS("", ["pass"]) + self.ircUser.irc_NICK("", ["john"]) + + version = ('Your host is example.com, running version %s' % + (self.factory._serverInfo["serviceVersion"],)) + + creation = ('This server was created on %s' % + (self.factory._serverInfo["creationDate"],)) + + self.assertEquals(self.response(), + [('example.com', '375', + ['john', '- example.com Message of the Day - ']), + ('example.com', '376', ['john', 'End of /MOTD command.']), + ('example.com', '001', ['john', 'connected to Twisted IRC']), + ('example.com', '002', ['john', version]), + ('example.com', '003', ['john', creation]), + ('example.com', '004', + ['john', 'example.com', self.factory._serverInfo["serviceVersion"], + 'w', 'n'])]) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabberclient.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberclient.py new file mode 100644 index 000000000000..e90895beed35 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberclient.py @@ -0,0 +1,414 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.client} +""" + +from twisted.internet import defer +from twisted.python.hashlib import sha1 +from twisted.trial import unittest +from twisted.words.protocols.jabber import client, error, jid, xmlstream +from twisted.words.protocols.jabber.sasl import SASLInitiatingInitializer +from twisted.words.xish import utility + +IQ_AUTH_GET = '/iq[@type="get"]/query[@xmlns="jabber:iq:auth"]' +IQ_AUTH_SET = '/iq[@type="set"]/query[@xmlns="jabber:iq:auth"]' +NS_BIND = 'urn:ietf:params:xml:ns:xmpp-bind' +IQ_BIND_SET = '/iq[@type="set"]/bind[@xmlns="%s"]' % NS_BIND +NS_SESSION = 'urn:ietf:params:xml:ns:xmpp-session' +IQ_SESSION_SET = '/iq[@type="set"]/session[@xmlns="%s"]' % NS_SESSION + +class CheckVersionInitializerTest(unittest.TestCase): + def setUp(self): + a = xmlstream.Authenticator() + xs = xmlstream.XmlStream(a) + self.init = client.CheckVersionInitializer(xs) + + + def testSupported(self): + """ + Test supported version number 1.0 + """ + self.init.xmlstream.version = (1, 0) + self.init.initialize() + + + def testNotSupported(self): + """ + Test unsupported version number 0.0, and check exception. + """ + self.init.xmlstream.version = (0, 0) + exc = self.assertRaises(error.StreamError, self.init.initialize) + self.assertEquals('unsupported-version', exc.condition) + + + +class InitiatingInitializerHarness(object): + """ + Testing harness for interacting with XML stream initializers. + + This sets up an L{utility.XmlPipe} to create a communication channel between + the initializer and the stubbed receiving entity. It features a sink and + source side that both act similarly to a real L{xmlstream.XmlStream}. The + sink is augmented with an authenticator to which initializers can be added. + + The harness also provides some utility methods to work with event observers + and deferreds. + """ + + def setUp(self): + self.output = [] + self.pipe = utility.XmlPipe() + self.xmlstream = self.pipe.sink + self.authenticator = xmlstream.ConnectAuthenticator('example.org') + self.xmlstream.authenticator = self.authenticator + + + def waitFor(self, event, handler): + """ + Observe an output event, returning a deferred. + + The returned deferred will be fired when the given event has been + observed on the source end of the L{XmlPipe} tied to the protocol + under test. The handler is added as the first callback. + + @param event: The event to be observed. See + L{utility.EventDispatcher.addOnetimeObserver}. + @param handler: The handler to be called with the observed event object. + @rtype: L{defer.Deferred}. + """ + d = defer.Deferred() + d.addCallback(handler) + self.pipe.source.addOnetimeObserver(event, d.callback) + return d + + + +class IQAuthInitializerTest(InitiatingInitializerHarness, unittest.TestCase): + """ + Tests for L{client.IQAuthInitializer}. + """ + + def setUp(self): + super(IQAuthInitializerTest, self).setUp() + self.init = client.IQAuthInitializer(self.xmlstream) + self.authenticator.jid = jid.JID('user@example.com/resource') + self.authenticator.password = 'secret' + + + def testPlainText(self): + """ + Test plain-text authentication. + + Act as a server supporting plain-text authentication and expect the + C{password} field to be filled with the password. Then act as if + authentication succeeds. + """ + + def onAuthGet(iq): + """ + Called when the initializer sent a query for authentication methods. + + The response informs the client that plain-text authentication + is supported. + """ + + # Create server response + response = xmlstream.toResponse(iq, 'result') + response.addElement(('jabber:iq:auth', 'query')) + response.query.addElement('username') + response.query.addElement('password') + response.query.addElement('resource') + + # Set up an observer for the next request we expect. + d = self.waitFor(IQ_AUTH_SET, onAuthSet) + + # Send server response + self.pipe.source.send(response) + + return d + + def onAuthSet(iq): + """ + Called when the initializer sent the authentication request. + + The server checks the credentials and responds with an empty result + signalling success. + """ + self.assertEquals('user', unicode(iq.query.username)) + self.assertEquals('secret', unicode(iq.query.password)) + self.assertEquals('resource', unicode(iq.query.resource)) + + # Send server response + response = xmlstream.toResponse(iq, 'result') + self.pipe.source.send(response) + + # Set up an observer for the request for authentication fields + d1 = self.waitFor(IQ_AUTH_GET, onAuthGet) + + # Start the initializer + d2 = self.init.initialize() + return defer.gatherResults([d1, d2]) + + + def testDigest(self): + """ + Test digest authentication. + + Act as a server supporting digest authentication and expect the + C{digest} field to be filled with a sha1 digest of the concatenated + stream session identifier and password. Then act as if authentication + succeeds. + """ + + def onAuthGet(iq): + """ + Called when the initializer sent a query for authentication methods. + + The response informs the client that digest authentication is + supported. + """ + + # Create server response + response = xmlstream.toResponse(iq, 'result') + response.addElement(('jabber:iq:auth', 'query')) + response.query.addElement('username') + response.query.addElement('digest') + response.query.addElement('resource') + + # Set up an observer for the next request we expect. + d = self.waitFor(IQ_AUTH_SET, onAuthSet) + + # Send server response + self.pipe.source.send(response) + + return d + + def onAuthSet(iq): + """ + Called when the initializer sent the authentication request. + + The server checks the credentials and responds with an empty result + signalling success. + """ + self.assertEquals('user', unicode(iq.query.username)) + self.assertEquals(sha1('12345secret').hexdigest(), + unicode(iq.query.digest).encode('utf-8')) + self.assertEquals('resource', unicode(iq.query.resource)) + + # Send server response + response = xmlstream.toResponse(iq, 'result') + self.pipe.source.send(response) + + # Digest authentication relies on the stream session identifier. Set it. + self.xmlstream.sid = u'12345' + + # Set up an observer for the request for authentication fields + d1 = self.waitFor(IQ_AUTH_GET, onAuthGet) + + # Start the initializer + d2 = self.init.initialize() + + return defer.gatherResults([d1, d2]) + + + def testFailRequestFields(self): + """ + Test initializer failure of request for fields for authentication. + """ + def onAuthGet(iq): + """ + Called when the initializer sent a query for authentication methods. + + The server responds that the client is not authorized to authenticate. + """ + response = error.StanzaError('not-authorized').toResponse(iq) + self.pipe.source.send(response) + + # Set up an observer for the request for authentication fields + d1 = self.waitFor(IQ_AUTH_GET, onAuthGet) + + # Start the initializer + d2 = self.init.initialize() + + # The initialized should fail with a stanza error. + self.assertFailure(d2, error.StanzaError) + + return defer.gatherResults([d1, d2]) + + + def testFailAuth(self): + """ + Test initializer failure to authenticate. + """ + + def onAuthGet(iq): + """ + Called when the initializer sent a query for authentication methods. + + The response informs the client that plain-text authentication + is supported. + """ + + # Send server response + response = xmlstream.toResponse(iq, 'result') + response.addElement(('jabber:iq:auth', 'query')) + response.query.addElement('username') + response.query.addElement('password') + response.query.addElement('resource') + + # Set up an observer for the next request we expect. + d = self.waitFor(IQ_AUTH_SET, onAuthSet) + + # Send server response + self.pipe.source.send(response) + + return d + + def onAuthSet(iq): + """ + Called when the initializer sent the authentication request. + + The server checks the credentials and responds with a not-authorized + stanza error. + """ + response = error.StanzaError('not-authorized').toResponse(iq) + self.pipe.source.send(response) + + # Set up an observer for the request for authentication fields + d1 = self.waitFor(IQ_AUTH_GET, onAuthGet) + + # Start the initializer + d2 = self.init.initialize() + + # The initializer should fail with a stanza error. + self.assertFailure(d2, error.StanzaError) + + return defer.gatherResults([d1, d2]) + + + +class BindInitializerTest(InitiatingInitializerHarness, unittest.TestCase): + """ + Tests for L{client.BindInitializer}. + """ + + def setUp(self): + super(BindInitializerTest, self).setUp() + self.init = client.BindInitializer(self.xmlstream) + self.authenticator.jid = jid.JID('user@example.com/resource') + + + def testBasic(self): + """ + Set up a stream, and act as if resource binding succeeds. + """ + def onBind(iq): + response = xmlstream.toResponse(iq, 'result') + response.addElement((NS_BIND, 'bind')) + response.bind.addElement('jid', + content='user@example.com/other resource') + self.pipe.source.send(response) + + def cb(result): + self.assertEquals(jid.JID('user@example.com/other resource'), + self.authenticator.jid) + + d1 = self.waitFor(IQ_BIND_SET, onBind) + d2 = self.init.start() + d2.addCallback(cb) + return defer.gatherResults([d1, d2]) + + + def testFailure(self): + """ + Set up a stream, and act as if resource binding fails. + """ + def onBind(iq): + response = error.StanzaError('conflict').toResponse(iq) + self.pipe.source.send(response) + + d1 = self.waitFor(IQ_BIND_SET, onBind) + d2 = self.init.start() + self.assertFailure(d2, error.StanzaError) + return defer.gatherResults([d1, d2]) + + + +class SessionInitializerTest(InitiatingInitializerHarness, unittest.TestCase): + """ + Tests for L{client.SessionInitializer}. + """ + + def setUp(self): + super(SessionInitializerTest, self).setUp() + self.init = client.SessionInitializer(self.xmlstream) + + + def testSuccess(self): + """ + Set up a stream, and act as if session establishment succeeds. + """ + + def onSession(iq): + response = xmlstream.toResponse(iq, 'result') + self.pipe.source.send(response) + + d1 = self.waitFor(IQ_SESSION_SET, onSession) + d2 = self.init.start() + return defer.gatherResults([d1, d2]) + + + def testFailure(self): + """ + Set up a stream, and act as if session establishment fails. + """ + def onSession(iq): + response = error.StanzaError('forbidden').toResponse(iq) + self.pipe.source.send(response) + + d1 = self.waitFor(IQ_SESSION_SET, onSession) + d2 = self.init.start() + self.assertFailure(d2, error.StanzaError) + return defer.gatherResults([d1, d2]) + + + +class XMPPAuthenticatorTest(unittest.TestCase): + """ + Test for both XMPPAuthenticator and XMPPClientFactory. + """ + def testBasic(self): + """ + Test basic operations. + + Setup an XMPPClientFactory, which sets up an XMPPAuthenticator, and let + it produce a protocol instance. Then inspect the instance variables of + the authenticator and XML stream objects. + """ + self.client_jid = jid.JID('user@example.com/resource') + + # Get an XmlStream instance. Note that it gets initialized with the + # XMPPAuthenticator (that has its associateWithXmlStream called) that + # is in turn initialized with the arguments to the factory. + xs = client.XMPPClientFactory(self.client_jid, + 'secret').buildProtocol(None) + + # test authenticator's instance variables + self.assertEqual('example.com', xs.authenticator.otherHost) + self.assertEqual(self.client_jid, xs.authenticator.jid) + self.assertEqual('secret', xs.authenticator.password) + + # test list of initializers + version, tls, sasl, bind, session = xs.initializers + + self.assert_(isinstance(tls, xmlstream.TLSInitiatingInitializer)) + self.assert_(isinstance(sasl, SASLInitiatingInitializer)) + self.assert_(isinstance(bind, client.BindInitializer)) + self.assert_(isinstance(session, client.SessionInitializer)) + + self.assertFalse(tls.required) + self.assertTrue(sasl.required) + self.assertFalse(bind.required) + self.assertFalse(session.required) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabbercomponent.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbercomponent.py new file mode 100644 index 000000000000..1ce6db1d12bd --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbercomponent.py @@ -0,0 +1,422 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.component} +""" + +from twisted.python import failure +from twisted.python.hashlib import sha1 +from twisted.trial import unittest +from twisted.words.protocols.jabber import component, xmlstream +from twisted.words.protocols.jabber.jid import JID +from twisted.words.xish import domish +from twisted.words.xish.utility import XmlPipe + +class DummyTransport: + def __init__(self, list): + self.list = list + + def write(self, bytes): + self.list.append(bytes) + +class ComponentInitiatingInitializerTest(unittest.TestCase): + def setUp(self): + self.output = [] + + self.authenticator = xmlstream.Authenticator() + self.authenticator.password = 'secret' + self.xmlstream = xmlstream.XmlStream(self.authenticator) + self.xmlstream.namespace = 'test:component' + self.xmlstream.send = self.output.append + self.xmlstream.connectionMade() + self.xmlstream.dataReceived( + "") + self.xmlstream.sid = u'12345' + self.init = component.ComponentInitiatingInitializer(self.xmlstream) + + def testHandshake(self): + """ + Test basic operations of component handshake. + """ + + d = self.init.initialize() + + # the initializer should have sent the handshake request + + handshake = self.output[-1] + self.assertEquals('handshake', handshake.name) + self.assertEquals('test:component', handshake.uri) + self.assertEquals(sha1("%s%s" % ('12345', 'secret')).hexdigest(), + unicode(handshake)) + + # successful authentication + + handshake.children = [] + self.xmlstream.dataReceived(handshake.toXml()) + + return d + +class ComponentAuthTest(unittest.TestCase): + def authPassed(self, stream): + self.authComplete = True + + def testAuth(self): + self.authComplete = False + outlist = [] + + ca = component.ConnectComponentAuthenticator("cjid", "secret") + xs = xmlstream.XmlStream(ca) + xs.transport = DummyTransport(outlist) + + xs.addObserver(xmlstream.STREAM_AUTHD_EVENT, + self.authPassed) + + # Go... + xs.connectionMade() + xs.dataReceived("") + + # Calculate what we expect the handshake value to be + hv = sha1("%s%s" % ("12345", "secret")).hexdigest() + + self.assertEquals(outlist[1], "%s" % (hv)) + + xs.dataReceived("") + + self.assertEquals(self.authComplete, True) + + +class JabberServiceHarness(component.Service): + def __init__(self): + self.componentConnectedFlag = False + self.componentDisconnectedFlag = False + self.transportConnectedFlag = False + + def componentConnected(self, xmlstream): + self.componentConnectedFlag = True + + def componentDisconnected(self): + self.componentDisconnectedFlag = True + + def transportConnected(self, xmlstream): + self.transportConnectedFlag = True + + +class TestJabberServiceManager(unittest.TestCase): + def testSM(self): + # Setup service manager and test harnes + sm = component.ServiceManager("foo", "password") + svc = JabberServiceHarness() + svc.setServiceParent(sm) + + # Create a write list + wlist = [] + + # Setup a XmlStream + xs = sm.getFactory().buildProtocol(None) + xs.transport = self + xs.transport.write = wlist.append + + # Indicate that it's connected + xs.connectionMade() + + # Ensure the test service harness got notified + self.assertEquals(True, svc.transportConnectedFlag) + + # Jump ahead and pretend like the stream got auth'd + xs.dispatch(xs, xmlstream.STREAM_AUTHD_EVENT) + + # Ensure the test service harness got notified + self.assertEquals(True, svc.componentConnectedFlag) + + # Pretend to drop the connection + xs.connectionLost(None) + + # Ensure the test service harness got notified + self.assertEquals(True, svc.componentDisconnectedFlag) + + + +class RouterTest(unittest.TestCase): + """ + Tests for L{component.Router}. + """ + + def test_addRoute(self): + """ + Test route registration and routing on incoming stanzas. + """ + router = component.Router() + routed = [] + router.route = lambda element: routed.append(element) + + pipe = XmlPipe() + router.addRoute('example.org', pipe.sink) + self.assertEquals(1, len(router.routes)) + self.assertEquals(pipe.sink, router.routes['example.org']) + + element = domish.Element(('testns', 'test')) + pipe.source.send(element) + self.assertEquals([element], routed) + + + def test_route(self): + """ + Test routing of a message. + """ + component1 = XmlPipe() + component2 = XmlPipe() + router = component.Router() + router.addRoute('component1.example.org', component1.sink) + router.addRoute('component2.example.org', component2.sink) + + outgoing = [] + component2.source.addObserver('/*', + lambda element: outgoing.append(element)) + stanza = domish.Element((None, 'presence')) + stanza['from'] = 'component1.example.org' + stanza['to'] = 'component2.example.org' + component1.source.send(stanza) + self.assertEquals([stanza], outgoing) + + + def test_routeDefault(self): + """ + Test routing of a message using the default route. + + The default route is the one with C{None} as its key in the + routing table. It is taken when there is no more specific route + in the routing table that matches the stanza's destination. + """ + component1 = XmlPipe() + s2s = XmlPipe() + router = component.Router() + router.addRoute('component1.example.org', component1.sink) + router.addRoute(None, s2s.sink) + + outgoing = [] + s2s.source.addObserver('/*', lambda element: outgoing.append(element)) + stanza = domish.Element((None, 'presence')) + stanza['from'] = 'component1.example.org' + stanza['to'] = 'example.com' + component1.source.send(stanza) + self.assertEquals([stanza], outgoing) + + + +class ListenComponentAuthenticatorTest(unittest.TestCase): + """ + Tests for L{component.ListenComponentAuthenticator}. + """ + + def setUp(self): + self.output = [] + authenticator = component.ListenComponentAuthenticator('secret') + self.xmlstream = xmlstream.XmlStream(authenticator) + self.xmlstream.send = self.output.append + + + def loseConnection(self): + """ + Stub loseConnection because we are a transport. + """ + self.xmlstream.connectionLost("no reason") + + + def test_streamStarted(self): + """ + The received stream header should set several attributes. + """ + observers = [] + + def addOnetimeObserver(event, observerfn): + observers.append((event, observerfn)) + + xs = self.xmlstream + xs.addOnetimeObserver = addOnetimeObserver + + xs.makeConnection(self) + self.assertIdentical(None, xs.sid) + self.assertFalse(xs._headerSent) + + xs.dataReceived("") + self.assertEqual((0, 0), xs.version) + self.assertNotIdentical(None, xs.sid) + self.assertTrue(xs._headerSent) + self.assertEquals(('/*', xs.authenticator.onElement), observers[-1]) + + + def test_streamStartedWrongNamespace(self): + """ + The received stream header should have a correct namespace. + """ + streamErrors = [] + + xs = self.xmlstream + xs.sendStreamError = streamErrors.append + xs.makeConnection(self) + xs.dataReceived("") + self.assertEquals(1, len(streamErrors)) + self.assertEquals('invalid-namespace', streamErrors[-1].condition) + + + def test_streamStartedNoTo(self): + """ + The received stream header should have a 'to' attribute. + """ + streamErrors = [] + + xs = self.xmlstream + xs.sendStreamError = streamErrors.append + xs.makeConnection(self) + xs.dataReceived("") + self.assertEquals(1, len(streamErrors)) + self.assertEquals('improper-addressing', streamErrors[-1].condition) + + + def test_onElement(self): + """ + We expect a handshake element with a hash. + """ + handshakes = [] + + xs = self.xmlstream + xs.authenticator.onHandshake = handshakes.append + + handshake = domish.Element(('jabber:component:accept', 'handshake')) + handshake.addContent('1234') + xs.authenticator.onElement(handshake) + self.assertEqual('1234', handshakes[-1]) + + def test_onElementNotHandshake(self): + """ + Reject elements that are not handshakes + """ + handshakes = [] + streamErrors = [] + + xs = self.xmlstream + xs.authenticator.onHandshake = handshakes.append + xs.sendStreamError = streamErrors.append + + element = domish.Element(('jabber:component:accept', 'message')) + xs.authenticator.onElement(element) + self.assertFalse(handshakes) + self.assertEquals('not-authorized', streamErrors[-1].condition) + + + def test_onHandshake(self): + """ + Receiving a handshake matching the secret authenticates the stream. + """ + authd = [] + + def authenticated(xs): + authd.append(xs) + + xs = self.xmlstream + xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated) + xs.sid = u'1234' + theHash = '32532c0f7dbf1253c095b18b18e36d38d94c1256' + xs.authenticator.onHandshake(theHash) + self.assertEqual('', self.output[-1]) + self.assertEquals(1, len(authd)) + + + def test_onHandshakeWrongHash(self): + """ + Receiving a bad handshake should yield a stream error. + """ + streamErrors = [] + authd = [] + + def authenticated(xs): + authd.append(xs) + + xs = self.xmlstream + xs.addOnetimeObserver(xmlstream.STREAM_AUTHD_EVENT, authenticated) + xs.sendStreamError = streamErrors.append + + xs.sid = u'1234' + theHash = '1234' + xs.authenticator.onHandshake(theHash) + self.assertEquals('not-authorized', streamErrors[-1].condition) + self.assertEquals(0, len(authd)) + + + +class XMPPComponentServerFactoryTest(unittest.TestCase): + """ + Tests for L{component.XMPPComponentServerFactory}. + """ + + def setUp(self): + self.router = component.Router() + self.factory = component.XMPPComponentServerFactory(self.router, + 'secret') + self.xmlstream = self.factory.buildProtocol(None) + self.xmlstream.thisEntity = JID('component.example.org') + + + def test_makeConnection(self): + """ + A new connection increases the stream serial count. No logs by default. + """ + self.xmlstream.dispatch(self.xmlstream, + xmlstream.STREAM_CONNECTED_EVENT) + self.assertEqual(0, self.xmlstream.serial) + self.assertEqual(1, self.factory.serial) + self.assertIdentical(None, self.xmlstream.rawDataInFn) + self.assertIdentical(None, self.xmlstream.rawDataOutFn) + + + def test_makeConnectionLogTraffic(self): + """ + Setting logTraffic should set up raw data loggers. + """ + self.factory.logTraffic = True + self.xmlstream.dispatch(self.xmlstream, + xmlstream.STREAM_CONNECTED_EVENT) + self.assertNotIdentical(None, self.xmlstream.rawDataInFn) + self.assertNotIdentical(None, self.xmlstream.rawDataOutFn) + + + def test_onError(self): + """ + An observer for stream errors should trigger onError to log it. + """ + self.xmlstream.dispatch(self.xmlstream, + xmlstream.STREAM_CONNECTED_EVENT) + + class TestError(Exception): + pass + + reason = failure.Failure(TestError()) + self.xmlstream.dispatch(reason, xmlstream.STREAM_ERROR_EVENT) + self.assertEqual(1, len(self.flushLoggedErrors(TestError))) + + + def test_connectionInitialized(self): + """ + Make sure a new stream is added to the routing table. + """ + self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT) + self.assertIn('component.example.org', self.router.routes) + self.assertIdentical(self.xmlstream, + self.router.routes['component.example.org']) + + + def test_connectionLost(self): + """ + Make sure a stream is removed from the routing table on disconnect. + """ + self.xmlstream.dispatch(self.xmlstream, xmlstream.STREAM_AUTHD_EVENT) + self.xmlstream.dispatch(None, xmlstream.STREAM_END_EVENT) + self.assertNotIn('component.example.org', self.router.routes) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabbererror.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbererror.py new file mode 100644 index 000000000000..9b4643dcd9fc --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbererror.py @@ -0,0 +1,308 @@ +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.error}. +""" + +from twisted.trial import unittest + +from twisted.words.protocols.jabber import error +from twisted.words.xish import domish + +NS_XML = 'http://www.w3.org/XML/1998/namespace' +NS_STREAMS = 'http://etherx.jabber.org/streams' +NS_XMPP_STREAMS = 'urn:ietf:params:xml:ns:xmpp-streams' +NS_XMPP_STANZAS = 'urn:ietf:params:xml:ns:xmpp-stanzas' + +class BaseErrorTest(unittest.TestCase): + + def test_getElementPlain(self): + """ + Test getting an element for a plain error. + """ + e = error.BaseError('feature-not-implemented') + element = e.getElement() + self.assertIdentical(element.uri, None) + self.assertEquals(len(element.children), 1) + + def test_getElementText(self): + """ + Test getting an element for an error with a text. + """ + e = error.BaseError('feature-not-implemented', 'text') + element = e.getElement() + self.assertEquals(len(element.children), 2) + self.assertEquals(unicode(element.text), 'text') + self.assertEquals(element.text.getAttribute((NS_XML, 'lang')), None) + + def test_getElementTextLang(self): + """ + Test getting an element for an error with a text and language. + """ + e = error.BaseError('feature-not-implemented', 'text', 'en_US') + element = e.getElement() + self.assertEquals(len(element.children), 2) + self.assertEquals(unicode(element.text), 'text') + self.assertEquals(element.text[(NS_XML, 'lang')], 'en_US') + + def test_getElementAppCondition(self): + """ + Test getting an element for an error with an app specific condition. + """ + ac = domish.Element(('testns', 'myerror')) + e = error.BaseError('feature-not-implemented', appCondition=ac) + element = e.getElement() + self.assertEquals(len(element.children), 2) + self.assertEquals(element.myerror, ac) + +class StreamErrorTest(unittest.TestCase): + + def test_getElementPlain(self): + """ + Test namespace of the element representation of an error. + """ + e = error.StreamError('feature-not-implemented') + element = e.getElement() + self.assertEquals(element.uri, NS_STREAMS) + + def test_getElementConditionNamespace(self): + """ + Test that the error condition element has the correct namespace. + """ + e = error.StreamError('feature-not-implemented') + element = e.getElement() + self.assertEquals(NS_XMPP_STREAMS, getattr(element, 'feature-not-implemented').uri) + + def test_getElementTextNamespace(self): + """ + Test that the error text element has the correct namespace. + """ + e = error.StreamError('feature-not-implemented', 'text') + element = e.getElement() + self.assertEquals(NS_XMPP_STREAMS, element.text.uri) + +class StanzaErrorTest(unittest.TestCase): + + def test_getElementPlain(self): + """ + Test getting an element for a plain stanza error. + """ + e = error.StanzaError('feature-not-implemented') + element = e.getElement() + self.assertEquals(element.uri, None) + self.assertEquals(element['type'], 'cancel') + self.assertEquals(element['code'], '501') + + def test_getElementType(self): + """ + Test getting an element for a stanza error with a given type. + """ + e = error.StanzaError('feature-not-implemented', 'auth') + element = e.getElement() + self.assertEquals(element.uri, None) + self.assertEquals(element['type'], 'auth') + self.assertEquals(element['code'], '501') + + def test_getElementConditionNamespace(self): + """ + Test that the error condition element has the correct namespace. + """ + e = error.StanzaError('feature-not-implemented') + element = e.getElement() + self.assertEquals(NS_XMPP_STANZAS, getattr(element, 'feature-not-implemented').uri) + + def test_getElementTextNamespace(self): + """ + Test that the error text element has the correct namespace. + """ + e = error.StanzaError('feature-not-implemented', text='text') + element = e.getElement() + self.assertEquals(NS_XMPP_STANZAS, element.text.uri) + + def test_toResponse(self): + """ + Test an error response is generated from a stanza. + + The addressing on the (new) response stanza should be reversed, an + error child (with proper properties) added and the type set to + C{'error'}. + """ + stanza = domish.Element(('jabber:client', 'message')) + stanza['type'] = 'chat' + stanza['to'] = 'user1@example.com' + stanza['from'] = 'user2@example.com/resource' + e = error.StanzaError('service-unavailable') + response = e.toResponse(stanza) + self.assertNotIdentical(response, stanza) + self.assertEqual(response['from'], 'user1@example.com') + self.assertEqual(response['to'], 'user2@example.com/resource') + self.assertEqual(response['type'], 'error') + self.assertEqual(response.error.children[0].name, + 'service-unavailable') + self.assertEqual(response.error['type'], 'cancel') + self.assertNotEqual(stanza.children, response.children) + +class ParseErrorTest(unittest.TestCase): + + def setUp(self): + self.error = domish.Element((None, 'error')) + + def test_empty(self): + """ + Test parsing of the empty error element. + """ + result = error._parseError(self.error, 'errorns') + self.assertEqual({'condition': None, + 'text': None, + 'textLang': None, + 'appCondition': None}, result) + + def test_condition(self): + """ + Test parsing of an error element with a condition. + """ + self.error.addElement(('errorns', 'bad-request')) + result = error._parseError(self.error, 'errorns') + self.assertEqual('bad-request', result['condition']) + + def test_text(self): + """ + Test parsing of an error element with a text. + """ + text = self.error.addElement(('errorns', 'text')) + text.addContent('test') + result = error._parseError(self.error, 'errorns') + self.assertEqual('test', result['text']) + self.assertEqual(None, result['textLang']) + + def test_textLang(self): + """ + Test parsing of an error element with a text with a defined language. + """ + text = self.error.addElement(('errorns', 'text')) + text[NS_XML, 'lang'] = 'en_US' + text.addContent('test') + result = error._parseError(self.error, 'errorns') + self.assertEqual('en_US', result['textLang']) + + def test_textLangInherited(self): + """ + Test parsing of an error element with a text with inherited language. + """ + text = self.error.addElement(('errorns', 'text')) + self.error[NS_XML, 'lang'] = 'en_US' + text.addContent('test') + result = error._parseError(self.error, 'errorns') + self.assertEqual('en_US', result['textLang']) + test_textLangInherited.todo = "xml:lang inheritance not implemented" + + def test_appCondition(self): + """ + Test parsing of an error element with an app specific condition. + """ + condition = self.error.addElement(('testns', 'condition')) + result = error._parseError(self.error, 'errorns') + self.assertEqual(condition, result['appCondition']) + + def test_appConditionMultiple(self): + """ + Test parsing of an error element with multiple app specific conditions. + """ + condition = self.error.addElement(('testns', 'condition')) + condition2 = self.error.addElement(('testns', 'condition2')) + result = error._parseError(self.error, 'errorns') + self.assertEqual(condition2, result['appCondition']) + +class ExceptionFromStanzaTest(unittest.TestCase): + + def test_basic(self): + """ + Test basic operations of exceptionFromStanza. + + Given a realistic stanza, check if a sane exception is returned. + + Using this stanza:: + + + + + + + + + + + """ + + stanza = domish.Element((None, 'stanza')) + p = stanza.addElement(('http://jabber.org/protocol/pubsub', 'pubsub')) + p.addElement('subscriptions') + e = stanza.addElement('error') + e['type'] = 'cancel' + e.addElement((NS_XMPP_STANZAS, 'feature-not-implemented')) + uc = e.addElement(('http://jabber.org/protocol/pubsub#errors', + 'unsupported')) + uc['feature'] = 'retrieve-subscriptions' + + result = error.exceptionFromStanza(stanza) + self.assert_(isinstance(result, error.StanzaError)) + self.assertEquals('feature-not-implemented', result.condition) + self.assertEquals('cancel', result.type) + self.assertEquals(uc, result.appCondition) + self.assertEquals([p], result.children) + + def test_legacy(self): + """ + Test legacy operations of exceptionFromStanza. + + Given a realistic stanza with only legacy (pre-XMPP) error information, + check if a sane exception is returned. + + Using this stanza:: + + + Are you there? + Unable to resolve hostname. + + """ + stanza = domish.Element((None, 'stanza')) + p = stanza.addElement('body', content='Are you there?') + e = stanza.addElement('error', content='Unable to resolve hostname.') + e['code'] = '502' + + result = error.exceptionFromStanza(stanza) + self.assert_(isinstance(result, error.StanzaError)) + self.assertEquals('service-unavailable', result.condition) + self.assertEquals('wait', result.type) + self.assertEquals('Unable to resolve hostname.', result.text) + self.assertEquals([p], result.children) + +class ExceptionFromStreamErrorTest(unittest.TestCase): + + def test_basic(self): + """ + Test basic operations of exceptionFromStreamError. + + Given a realistic stream error, check if a sane exception is returned. + + Using this error:: + + + + + """ + + e = domish.Element(('http://etherx.jabber.org/streams', 'error')) + e.addElement((NS_XMPP_STREAMS, 'xml-not-well-formed')) + + result = error.exceptionFromStreamError(e) + self.assert_(isinstance(result, error.StreamError)) + self.assertEquals('xml-not-well-formed', result.condition) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabberjid.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberjid.py new file mode 100644 index 000000000000..dbb42e5b2006 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberjid.py @@ -0,0 +1,225 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.jid}. +""" + +from twisted.trial import unittest + +from twisted.words.protocols.jabber import jid + +class JIDParsingTest(unittest.TestCase): + def test_parse(self): + """ + Test different forms of JIDs. + """ + # Basic forms + self.assertEquals(jid.parse("user@host/resource"), + ("user", "host", "resource")) + self.assertEquals(jid.parse("user@host"), + ("user", "host", None)) + self.assertEquals(jid.parse("host"), + (None, "host", None)) + self.assertEquals(jid.parse("host/resource"), + (None, "host", "resource")) + + # More interesting forms + self.assertEquals(jid.parse("foo/bar@baz"), + (None, "foo", "bar@baz")) + self.assertEquals(jid.parse("boo@foo/bar@baz"), + ("boo", "foo", "bar@baz")) + self.assertEquals(jid.parse("boo@foo/bar/baz"), + ("boo", "foo", "bar/baz")) + self.assertEquals(jid.parse("boo/foo@bar@baz"), + (None, "boo", "foo@bar@baz")) + self.assertEquals(jid.parse("boo/foo/bar"), + (None, "boo", "foo/bar")) + self.assertEquals(jid.parse("boo//foo"), + (None, "boo", "/foo")) + + def test_noHost(self): + """ + Test for failure on no host part. + """ + self.assertRaises(jid.InvalidFormat, jid.parse, "user@") + + def test_doubleAt(self): + """ + Test for failure on double @ signs. + + This should fail because @ is not a valid character for the host + part of the JID. + """ + self.assertRaises(jid.InvalidFormat, jid.parse, "user@@host") + + def test_multipleAt(self): + """ + Test for failure on two @ signs. + + This should fail because @ is not a valid character for the host + part of the JID. + """ + self.assertRaises(jid.InvalidFormat, jid.parse, "user@host@host") + + # Basic tests for case mapping. These are fallback tests for the + # prepping done in twisted.words.protocols.jabber.xmpp_stringprep + + def test_prepCaseMapUser(self): + """ + Test case mapping of the user part of the JID. + """ + self.assertEquals(jid.prep("UsEr", "host", "resource"), + ("user", "host", "resource")) + + def test_prepCaseMapHost(self): + """ + Test case mapping of the host part of the JID. + """ + self.assertEquals(jid.prep("user", "hoST", "resource"), + ("user", "host", "resource")) + + def test_prepNoCaseMapResource(self): + """ + Test no case mapping of the resourcce part of the JID. + """ + self.assertEquals(jid.prep("user", "hoST", "resource"), + ("user", "host", "resource")) + self.assertNotEquals(jid.prep("user", "host", "Resource"), + ("user", "host", "resource")) + +class JIDTest(unittest.TestCase): + + def test_noneArguments(self): + """ + Test that using no arguments raises an exception. + """ + self.assertRaises(RuntimeError, jid.JID) + + def test_attributes(self): + """ + Test that the attributes correspond with the JID parts. + """ + j = jid.JID("user@host/resource") + self.assertEquals(j.user, "user") + self.assertEquals(j.host, "host") + self.assertEquals(j.resource, "resource") + + def test_userhost(self): + """ + Test the extraction of the bare JID. + """ + j = jid.JID("user@host/resource") + self.assertEquals("user@host", j.userhost()) + + def test_userhostOnlyHost(self): + """ + Test the extraction of the bare JID of the full form host/resource. + """ + j = jid.JID("host/resource") + self.assertEquals("host", j.userhost()) + + def test_userhostJID(self): + """ + Test getting a JID object of the bare JID. + """ + j1 = jid.JID("user@host/resource") + j2 = jid.internJID("user@host") + self.assertIdentical(j2, j1.userhostJID()) + + def test_userhostJIDNoResource(self): + """ + Test getting a JID object of the bare JID when there was no resource. + """ + j = jid.JID("user@host") + self.assertIdentical(j, j.userhostJID()) + + def test_fullHost(self): + """ + Test giving a string representation of the JID with only a host part. + """ + j = jid.JID(tuple=(None, 'host', None)) + self.assertEqual('host', j.full()) + + def test_fullHostResource(self): + """ + Test giving a string representation of the JID with host, resource. + """ + j = jid.JID(tuple=(None, 'host', 'resource')) + self.assertEqual('host/resource', j.full()) + + def test_fullUserHost(self): + """ + Test giving a string representation of the JID with user, host. + """ + j = jid.JID(tuple=('user', 'host', None)) + self.assertEqual('user@host', j.full()) + + def test_fullAll(self): + """ + Test giving a string representation of the JID. + """ + j = jid.JID(tuple=('user', 'host', 'resource')) + self.assertEqual('user@host/resource', j.full()) + + def test_equality(self): + """ + Test JID equality. + """ + j1 = jid.JID("user@host/resource") + j2 = jid.JID("user@host/resource") + self.assertNotIdentical(j1, j2) + self.assertEqual(j1, j2) + + def test_equalityWithNonJIDs(self): + """ + Test JID equality. + """ + j = jid.JID("user@host/resource") + self.assertFalse(j == 'user@host/resource') + + def test_inequality(self): + """ + Test JID inequality. + """ + j1 = jid.JID("user1@host/resource") + j2 = jid.JID("user2@host/resource") + self.assertNotEqual(j1, j2) + + def test_inequalityWithNonJIDs(self): + """ + Test JID equality. + """ + j = jid.JID("user@host/resource") + self.assertNotEqual(j, 'user@host/resource') + + def test_hashable(self): + """ + Test JID hashability. + """ + j1 = jid.JID("user@host/resource") + j2 = jid.JID("user@host/resource") + self.assertEqual(hash(j1), hash(j2)) + + def test_unicode(self): + """ + Test unicode representation of JIDs. + """ + j = jid.JID(tuple=('user', 'host', 'resource')) + self.assertEquals("user@host/resource", unicode(j)) + + def test_repr(self): + """ + Test representation of JID objects. + """ + j = jid.JID(tuple=('user', 'host', 'resource')) + self.assertEquals("JID(u'user@host/resource')", repr(j)) + +class InternJIDTest(unittest.TestCase): + def test_identity(self): + """ + Test that two interned JIDs yield the same object. + """ + j1 = jid.internJID("user@host") + j2 = jid.internJID("user@host") + self.assertIdentical(j1, j2) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersasl.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersasl.py new file mode 100644 index 000000000000..cd2a7d24c767 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersasl.py @@ -0,0 +1,272 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +from zope.interface import implements +from twisted.internet import defer +from twisted.trial import unittest +from twisted.words.protocols.jabber import sasl, sasl_mechanisms, xmlstream, jid +from twisted.words.xish import domish + +NS_XMPP_SASL = 'urn:ietf:params:xml:ns:xmpp-sasl' + +class DummySASLMechanism(object): + """ + Dummy SASL mechanism. + + This just returns the initialResponse passed on creation, stores any + challenges and replies with an empty response. + + @ivar challenge: Last received challenge. + @type challenge: C{unicode}. + @ivar initialResponse: Initial response to be returned when requested + via C{getInitialResponse} or C{None}. + @type initialResponse: C{unicode} + """ + + implements(sasl_mechanisms.ISASLMechanism) + + challenge = None + name = "DUMMY" + + def __init__(self, initialResponse): + self.initialResponse = initialResponse + + def getInitialResponse(self): + return self.initialResponse + + def getResponse(self, challenge): + self.challenge = challenge + return "" + +class DummySASLInitiatingInitializer(sasl.SASLInitiatingInitializer): + """ + Dummy SASL Initializer for initiating entities. + + This hardwires the SASL mechanism to L{DummySASLMechanism}, that is + instantiated with the value of C{initialResponse}. + + @ivar initialResponse: The initial response to be returned by the + dummy SASL mechanism or C{None}. + @type initialResponse: C{unicode}. + """ + + initialResponse = None + + def setMechanism(self): + self.mechanism = DummySASLMechanism(self.initialResponse) + + + +class SASLInitiatingInitializerTest(unittest.TestCase): + """ + Tests for L{sasl.SASLInitiatingInitializer} + """ + + def setUp(self): + self.output = [] + + self.authenticator = xmlstream.Authenticator() + self.xmlstream = xmlstream.XmlStream(self.authenticator) + self.xmlstream.send = self.output.append + self.xmlstream.connectionMade() + self.xmlstream.dataReceived("") + self.init = DummySASLInitiatingInitializer(self.xmlstream) + + + def test_onFailure(self): + """ + Test that the SASL error condition is correctly extracted. + """ + failure = domish.Element(('urn:ietf:params:xml:ns:xmpp-sasl', + 'failure')) + failure.addElement('not-authorized') + self.init._deferred = defer.Deferred() + self.init.onFailure(failure) + self.assertFailure(self.init._deferred, sasl.SASLAuthError) + self.init._deferred.addCallback(lambda e: + self.assertEquals('not-authorized', + e.condition)) + return self.init._deferred + + + def test_sendAuthInitialResponse(self): + """ + Test starting authentication with an initial response. + """ + self.init.initialResponse = "dummy" + self.init.start() + auth = self.output[0] + self.assertEquals(NS_XMPP_SASL, auth.uri) + self.assertEquals('auth', auth.name) + self.assertEquals('DUMMY', auth['mechanism']) + self.assertEquals('ZHVtbXk=', str(auth)) + + + def test_sendAuthNoInitialResponse(self): + """ + Test starting authentication without an initial response. + """ + self.init.initialResponse = None + self.init.start() + auth = self.output[0] + self.assertEquals('', str(auth)) + + + def test_sendAuthEmptyInitialResponse(self): + """ + Test starting authentication where the initial response is empty. + """ + self.init.initialResponse = "" + self.init.start() + auth = self.output[0] + self.assertEquals('=', str(auth)) + + + def test_onChallenge(self): + """ + Test receiving a challenge message. + """ + d = self.init.start() + challenge = domish.Element((NS_XMPP_SASL, 'challenge')) + challenge.addContent('bXkgY2hhbGxlbmdl') + self.init.onChallenge(challenge) + self.assertEqual('my challenge', self.init.mechanism.challenge) + self.init.onSuccess(None) + return d + + + def test_onChallengeEmpty(self): + """ + Test receiving an empty challenge message. + """ + d = self.init.start() + challenge = domish.Element((NS_XMPP_SASL, 'challenge')) + self.init.onChallenge(challenge) + self.assertEqual('', self.init.mechanism.challenge) + self.init.onSuccess(None) + return d + + + def test_onChallengeIllegalPadding(self): + """ + Test receiving a challenge message with illegal padding. + """ + d = self.init.start() + challenge = domish.Element((NS_XMPP_SASL, 'challenge')) + challenge.addContent('bXkg=Y2hhbGxlbmdl') + self.init.onChallenge(challenge) + self.assertFailure(d, sasl.SASLIncorrectEncodingError) + return d + + + def test_onChallengeIllegalCharacters(self): + """ + Test receiving a challenge message with illegal characters. + """ + d = self.init.start() + challenge = domish.Element((NS_XMPP_SASL, 'challenge')) + challenge.addContent('bXkg*Y2hhbGxlbmdl') + self.init.onChallenge(challenge) + self.assertFailure(d, sasl.SASLIncorrectEncodingError) + return d + + + def test_onChallengeMalformed(self): + """ + Test receiving a malformed challenge message. + """ + d = self.init.start() + challenge = domish.Element((NS_XMPP_SASL, 'challenge')) + challenge.addContent('a') + self.init.onChallenge(challenge) + self.assertFailure(d, sasl.SASLIncorrectEncodingError) + return d + + +class SASLInitiatingInitializerSetMechanismTest(unittest.TestCase): + """ + Test for L{sasl.SASLInitiatingInitializer.setMechanism}. + """ + + def setUp(self): + self.output = [] + + self.authenticator = xmlstream.Authenticator() + self.xmlstream = xmlstream.XmlStream(self.authenticator) + self.xmlstream.send = self.output.append + self.xmlstream.connectionMade() + self.xmlstream.dataReceived("") + + self.init = sasl.SASLInitiatingInitializer(self.xmlstream) + + + def _setMechanism(self, name): + """ + Set up the XML Stream to have a SASL feature with the given mechanism. + """ + feature = domish.Element((NS_XMPP_SASL, 'mechanisms')) + feature.addElement('mechanism', content=name) + self.xmlstream.features[(feature.uri, feature.name)] = feature + + self.init.setMechanism() + return self.init.mechanism.name + + + def test_anonymous(self): + """ + Test setting ANONYMOUS as the authentication mechanism. + """ + self.authenticator.jid = jid.JID('example.com') + self.authenticator.password = None + name = "ANONYMOUS" + + self.assertEqual(name, self._setMechanism(name)) + + + def test_plain(self): + """ + Test setting PLAIN as the authentication mechanism. + """ + self.authenticator.jid = jid.JID('test@example.com') + self.authenticator.password = 'secret' + name = "PLAIN" + + self.assertEqual(name, self._setMechanism(name)) + + + def test_digest(self): + """ + Test setting DIGEST-MD5 as the authentication mechanism. + """ + self.authenticator.jid = jid.JID('test@example.com') + self.authenticator.password = 'secret' + name = "DIGEST-MD5" + + self.assertEqual(name, self._setMechanism(name)) + + + def test_notAcceptable(self): + """ + Test using an unacceptable SASL authentication mechanism. + """ + + self.authenticator.jid = jid.JID('test@example.com') + self.authenticator.password = 'secret' + + self.assertRaises(sasl.SASLNoAcceptableMechanism, + self._setMechanism, 'SOMETHING_UNACCEPTABLE') + + + def test_notAcceptableWithoutUser(self): + """ + Test using an unacceptable SASL authentication mechanism with no JID. + """ + self.authenticator.jid = jid.JID('example.com') + self.authenticator.password = 'secret' + + self.assertRaises(sasl.SASLNoAcceptableMechanism, + self._setMechanism, 'SOMETHING_UNACCEPTABLE') diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersaslmechanisms.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersaslmechanisms.py new file mode 100644 index 000000000000..627d069acbeb --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabbersaslmechanisms.py @@ -0,0 +1,90 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.sasl_mechanisms}. +""" + +from twisted.trial import unittest + +from twisted.words.protocols.jabber import sasl_mechanisms + +class PlainTest(unittest.TestCase): + def test_getInitialResponse(self): + """ + Test the initial response. + """ + m = sasl_mechanisms.Plain(None, 'test', 'secret') + self.assertEquals(m.getInitialResponse(), '\x00test\x00secret') + + + +class AnonymousTest(unittest.TestCase): + """ + Tests for L{twisted.words.protocols.jabber.sasl_mechanisms.Anonymous}. + """ + def test_getInitialResponse(self): + """ + Test the initial response to be empty. + """ + m = sasl_mechanisms.Anonymous() + self.assertEquals(m.getInitialResponse(), None) + + + +class DigestMD5Test(unittest.TestCase): + def setUp(self): + self.mechanism = sasl_mechanisms.DigestMD5('xmpp', 'example.org', None, + 'test', 'secret') + + + def test_getInitialResponse(self): + """ + Test that no initial response is generated. + """ + self.assertIdentical(self.mechanism.getInitialResponse(), None) + + def test_getResponse(self): + """ + Partially test challenge response. + + Does not actually test the response-value, yet. + """ + + challenge = 'realm="localhost",nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess' + directives = self.mechanism._parse(self.mechanism.getResponse(challenge)) + self.assertEqual(directives['username'], 'test') + self.assertEqual(directives['nonce'], '1234') + self.assertEqual(directives['nc'], '00000001') + self.assertEqual(directives['qop'], ['auth']) + self.assertEqual(directives['charset'], 'utf-8') + self.assertEqual(directives['digest-uri'], 'xmpp/example.org') + self.assertEqual(directives['realm'], 'localhost') + + def test_getResponseNoRealm(self): + """ + Test that we accept challenges without realm. + + The realm should default to the host part of the JID. + """ + + challenge = 'nonce="1234",qop="auth",charset=utf-8,algorithm=md5-sess' + directives = self.mechanism._parse(self.mechanism.getResponse(challenge)) + self.assertEqual(directives['realm'], 'example.org') + + def test__parse(self): + """ + Test challenge decoding. + + Specifically, check for multiple values for the C{qop} and C{cipher} + directives. + """ + challenge = 'nonce="1234",qop="auth,auth-conf",charset=utf-8,' \ + 'algorithm=md5-sess,cipher="des,3des"' + directives = self.mechanism._parse(challenge) + self.assertEqual('1234', directives['nonce']) + self.assertEqual('utf-8', directives['charset']) + self.assertIn('auth', directives['qop']) + self.assertIn('auth-conf', directives['qop']) + self.assertIn('des', directives['cipher']) + self.assertIn('3des', directives['cipher']) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmlstream.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmlstream.py new file mode 100644 index 000000000000..6bca82456b86 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmlstream.py @@ -0,0 +1,1287 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.jabber.xmlstream}. +""" + +from twisted.trial import unittest + +from zope.interface.verify import verifyObject + +from twisted.internet import defer, task +from twisted.internet.error import ConnectionLost +from twisted.internet.interfaces import IProtocolFactory +from twisted.test import proto_helpers +from twisted.words.test.test_xmlstream import GenericXmlStreamFactoryTestsMixin +from twisted.words.xish import domish +from twisted.words.protocols.jabber import error, ijabber, jid, xmlstream + + + +NS_XMPP_TLS = 'urn:ietf:params:xml:ns:xmpp-tls' + + + +class HashPasswordTest(unittest.TestCase): + """ + Tests for L{xmlstream.hashPassword}. + """ + + def test_basic(self): + """ + The sid and secret are concatenated to calculate sha1 hex digest. + """ + hash = xmlstream.hashPassword(u"12345", u"secret") + self.assertEqual('99567ee91b2c7cabf607f10cb9f4a3634fa820e0', hash) + + + def test_sidNotUnicode(self): + """ + The session identifier must be a unicode object. + """ + self.assertRaises(TypeError, xmlstream.hashPassword, "\xc2\xb92345", + u"secret") + + + def test_passwordNotUnicode(self): + """ + The password must be a unicode object. + """ + self.assertRaises(TypeError, xmlstream.hashPassword, u"12345", + "secr\xc3\xa9t") + + + def test_unicodeSecret(self): + """ + The concatenated sid and password must be encoded to UTF-8 before hashing. + """ + hash = xmlstream.hashPassword(u"12345", u"secr\u00e9t") + self.assertEqual('659bf88d8f8e179081f7f3b4a8e7d224652d2853', hash) + + + +class IQTest(unittest.TestCase): + """ + Tests both IQ and the associated IIQResponseTracker callback. + """ + + def setUp(self): + authenticator = xmlstream.ConnectAuthenticator('otherhost') + authenticator.namespace = 'testns' + self.xmlstream = xmlstream.XmlStream(authenticator) + self.clock = task.Clock() + self.xmlstream._callLater = self.clock.callLater + self.xmlstream.makeConnection(proto_helpers.StringTransport()) + self.xmlstream.dataReceived( + "") + self.iq = xmlstream.IQ(self.xmlstream, 'get') + + + def testBasic(self): + self.assertEquals(self.iq['type'], 'get') + self.assertTrue(self.iq['id']) + + + def testSend(self): + self.xmlstream.transport.clear() + self.iq.send() + self.assertEquals("" % self.iq['id'], + self.xmlstream.transport.value()) + + + def testResultResponse(self): + def cb(result): + self.assertEquals(result['type'], 'result') + + d = self.iq.send() + d.addCallback(cb) + + xs = self.xmlstream + xs.dataReceived("" % self.iq['id']) + return d + + + def testErrorResponse(self): + d = self.iq.send() + self.assertFailure(d, error.StanzaError) + + xs = self.xmlstream + xs.dataReceived("" % self.iq['id']) + return d + + + def testNonTrackedResponse(self): + """ + Test that untracked iq responses don't trigger any action. + + Untracked means that the id of the incoming response iq is not + in the stream's C{iqDeferreds} dictionary. + """ + xs = self.xmlstream + xmlstream.upgradeWithIQResponseTracker(xs) + + # Make sure we aren't tracking any iq's. + self.assertFalse(xs.iqDeferreds) + + # Set up a fallback handler that checks the stanza's handled attribute. + # If that is set to True, the iq tracker claims to have handled the + # response. + def cb(iq): + self.assertFalse(getattr(iq, 'handled', False)) + + xs.addObserver("/iq", cb, -1) + + # Receive an untracked iq response + xs.dataReceived("") + + + def testCleanup(self): + """ + Test if the deferred associated with an iq request is removed + from the list kept in the L{XmlStream} object after it has + been fired. + """ + + d = self.iq.send() + xs = self.xmlstream + xs.dataReceived("" % self.iq['id']) + self.assertNotIn(self.iq['id'], xs.iqDeferreds) + return d + + + def testDisconnectCleanup(self): + """ + Test if deferreds for iq's that haven't yet received a response + have their errback called on stream disconnect. + """ + + d = self.iq.send() + xs = self.xmlstream + xs.connectionLost("Closed by peer") + self.assertFailure(d, ConnectionLost) + return d + + + def testNoModifyingDict(self): + """ + Test to make sure the errbacks cannot cause the iteration of the + iqDeferreds to blow up in our face. + """ + + def eb(failure): + d = xmlstream.IQ(self.xmlstream).send() + d.addErrback(eb) + + d = self.iq.send() + d.addErrback(eb) + self.xmlstream.connectionLost("Closed by peer") + return d + + + def testRequestTimingOut(self): + """ + Test that an iq request with a defined timeout times out. + """ + self.iq.timeout = 60 + d = self.iq.send() + self.assertFailure(d, xmlstream.TimeoutError) + + self.clock.pump([1, 60]) + self.assertFalse(self.clock.calls) + self.assertFalse(self.xmlstream.iqDeferreds) + return d + + + def testRequestNotTimingOut(self): + """ + Test that an iq request with a defined timeout does not time out + when a response was received before the timeout period elapsed. + """ + self.iq.timeout = 60 + d = self.iq.send() + self.clock.callLater(1, self.xmlstream.dataReceived, + "" % self.iq['id']) + self.clock.pump([1, 1]) + self.assertFalse(self.clock.calls) + return d + + + def testDisconnectTimeoutCancellation(self): + """ + Test if timeouts for iq's that haven't yet received a response + are cancelled on stream disconnect. + """ + + self.iq.timeout = 60 + d = self.iq.send() + + xs = self.xmlstream + xs.connectionLost("Closed by peer") + self.assertFailure(d, ConnectionLost) + self.assertFalse(self.clock.calls) + return d + + + +class XmlStreamTest(unittest.TestCase): + + def onStreamStart(self, obj): + self.gotStreamStart = True + + + def onStreamEnd(self, obj): + self.gotStreamEnd = True + + + def onStreamError(self, obj): + self.gotStreamError = True + + + def setUp(self): + """ + Set up XmlStream and several observers. + """ + self.gotStreamStart = False + self.gotStreamEnd = False + self.gotStreamError = False + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + xs.addObserver('//event/stream/start', self.onStreamStart) + xs.addObserver('//event/stream/end', self.onStreamEnd) + xs.addObserver('//event/stream/error', self.onStreamError) + xs.makeConnection(proto_helpers.StringTransportWithDisconnection()) + xs.transport.protocol = xs + xs.namespace = 'testns' + xs.version = (1, 0) + self.xmlstream = xs + + + def test_sendHeaderBasic(self): + """ + Basic test on the header sent by sendHeader. + """ + xs = self.xmlstream + xs.sendHeader() + splitHeader = self.xmlstream.transport.value()[0:-1].split(' ') + self.assertIn("") + xs.dataReceived("") + self.assertTrue(self.gotStreamError) + self.assertTrue(self.gotStreamEnd) + + + def test_sendStreamErrorInitiating(self): + """ + Test sendStreamError on an initiating xmlstream with a header sent. + + An error should be sent out and the connection lost. + """ + xs = self.xmlstream + xs.initiating = True + xs.sendHeader() + xs.transport.clear() + xs.sendStreamError(error.StreamError('version-unsupported')) + self.assertNotEqual('', xs.transport.value()) + self.assertTrue(self.gotStreamEnd) + + + def test_sendStreamErrorInitiatingNoHeader(self): + """ + Test sendStreamError on an initiating xmlstream without having sent a + header. + + In this case, no header should be generated. Also, the error should + not be sent out on the stream. Just closing the connection. + """ + xs = self.xmlstream + xs.initiating = True + xs.transport.clear() + xs.sendStreamError(error.StreamError('version-unsupported')) + self.assertNot(xs._headerSent) + self.assertEqual('', xs.transport.value()) + self.assertTrue(self.gotStreamEnd) + + + def test_sendStreamErrorReceiving(self): + """ + Test sendStreamError on a receiving xmlstream with a header sent. + + An error should be sent out and the connection lost. + """ + xs = self.xmlstream + xs.initiating = False + xs.sendHeader() + xs.transport.clear() + xs.sendStreamError(error.StreamError('version-unsupported')) + self.assertNotEqual('', xs.transport.value()) + self.assertTrue(self.gotStreamEnd) + + + def test_sendStreamErrorReceivingNoHeader(self): + """ + Test sendStreamError on a receiving xmlstream without having sent a + header. + + In this case, a header should be generated. Then, the error should + be sent out on the stream followed by closing the connection. + """ + xs = self.xmlstream + xs.initiating = False + xs.transport.clear() + xs.sendStreamError(error.StreamError('version-unsupported')) + self.assertTrue(xs._headerSent) + self.assertNotEqual('', xs.transport.value()) + self.assertTrue(self.gotStreamEnd) + + + def test_reset(self): + """ + Test resetting the XML stream to start a new layer. + """ + xs = self.xmlstream + xs.sendHeader() + stream = xs.stream + xs.reset() + self.assertNotEqual(stream, xs.stream) + self.assertNot(xs._headerSent) + + + def test_send(self): + """ + Test send with various types of objects. + """ + xs = self.xmlstream + xs.send('') + self.assertEqual(xs.transport.value(), '') + + xs.transport.clear() + el = domish.Element(('testns', 'presence')) + xs.send(el) + self.assertEqual(xs.transport.value(), '') + + xs.transport.clear() + el = domish.Element(('http://etherx.jabber.org/streams', 'features')) + xs.send(el) + self.assertEqual(xs.transport.value(), '') + + + def test_authenticator(self): + """ + Test that the associated authenticator is correctly called. + """ + connectionMadeCalls = [] + streamStartedCalls = [] + associateWithStreamCalls = [] + + class TestAuthenticator: + def connectionMade(self): + connectionMadeCalls.append(None) + + def streamStarted(self, rootElement): + streamStartedCalls.append(rootElement) + + def associateWithStream(self, xs): + associateWithStreamCalls.append(xs) + + a = TestAuthenticator() + xs = xmlstream.XmlStream(a) + self.assertEqual([xs], associateWithStreamCalls) + xs.connectionMade() + self.assertEqual([None], connectionMadeCalls) + xs.dataReceived("") + self.assertEqual(1, len(streamStartedCalls)) + xs.reset() + self.assertEqual([None], connectionMadeCalls) + + + +class TestError(Exception): + pass + + + +class AuthenticatorTest(unittest.TestCase): + def setUp(self): + self.authenticator = xmlstream.Authenticator() + self.xmlstream = xmlstream.XmlStream(self.authenticator) + + + def test_streamStart(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header. + """ + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + xs.dataReceived("") + self.assertEqual((1, 0), xs.version) + self.assertIdentical(None, xs.sid) + self.assertEqual('invalid', xs.namespace) + self.assertIdentical(None, xs.otherEntity) + self.assertEqual(None, xs.thisEntity) + + + def test_streamStartLegacy(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header for a pre-XMPP-1.0 header. + """ + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + xs.dataReceived("") + self.assertEqual((0, 0), xs.version) + + + def test_streamBadVersionOneDigit(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header for a version with only one digit. + """ + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + xs.dataReceived("") + self.assertEqual((0, 0), xs.version) + + + def test_streamBadVersionNoNumber(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header for a malformed version. + """ + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + xs.dataReceived("") + self.assertEqual((0, 0), xs.version) + + + +class ConnectAuthenticatorTest(unittest.TestCase): + + def setUp(self): + self.gotAuthenticated = False + self.initFailure = None + self.authenticator = xmlstream.ConnectAuthenticator('otherHost') + self.xmlstream = xmlstream.XmlStream(self.authenticator) + self.xmlstream.addObserver('//event/stream/authd', self.onAuthenticated) + self.xmlstream.addObserver('//event/xmpp/initfailed', self.onInitFailed) + + + def onAuthenticated(self, obj): + self.gotAuthenticated = True + + + def onInitFailed(self, failure): + self.initFailure = failure + + + def testSucces(self): + """ + Test successful completion of an initialization step. + """ + class Initializer: + def initialize(self): + pass + + init = Initializer() + self.xmlstream.initializers = [init] + + self.authenticator.initializeStream() + self.assertEqual([], self.xmlstream.initializers) + self.assertTrue(self.gotAuthenticated) + + + def testFailure(self): + """ + Test failure of an initialization step. + """ + class Initializer: + def initialize(self): + raise TestError + + init = Initializer() + self.xmlstream.initializers = [init] + + self.authenticator.initializeStream() + self.assertEqual([init], self.xmlstream.initializers) + self.assertFalse(self.gotAuthenticated) + self.assertNotIdentical(None, self.initFailure) + self.assertTrue(self.initFailure.check(TestError)) + + + def test_streamStart(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header. + """ + self.authenticator.namespace = 'testns' + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + xs.dataReceived("") + self.assertEqual((1, 0), xs.version) + self.assertEqual('12345', xs.sid) + self.assertEqual('testns', xs.namespace) + self.assertEqual('example.com', xs.otherEntity.host) + self.assertIdentical(None, xs.thisEntity) + self.assertNot(self.gotAuthenticated) + xs.dataReceived("" + "" + "") + self.assertIn(('testns', 'test'), xs.features) + self.assertTrue(self.gotAuthenticated) + + + +class ListenAuthenticatorTest(unittest.TestCase): + def setUp(self): + self.authenticator = xmlstream.ListenAuthenticator() + self.xmlstream = xmlstream.XmlStream(self.authenticator) + + + def test_streamStart(self): + """ + Test streamStart to fill the appropriate attributes from the + stream header. + """ + xs = self.xmlstream + xs.makeConnection(proto_helpers.StringTransport()) + self.assertIdentical(None, xs.sid) + xs.dataReceived("") + self.assertEqual((1, 0), xs.version) + self.assertNotIdentical(None, xs.sid) + self.assertNotEquals('12345', xs.sid) + self.assertEqual('jabber:client', xs.namespace) + self.assertIdentical(None, xs.otherEntity) + self.assertEqual('example.com', xs.thisEntity.host) + + + +class TLSInitiatingInitializerTest(unittest.TestCase): + def setUp(self): + self.output = [] + self.done = [] + + self.savedSSL = xmlstream.ssl + + self.authenticator = xmlstream.Authenticator() + self.xmlstream = xmlstream.XmlStream(self.authenticator) + self.xmlstream.send = self.output.append + self.xmlstream.connectionMade() + self.xmlstream.dataReceived("") + self.init = xmlstream.TLSInitiatingInitializer(self.xmlstream) + + + def tearDown(self): + xmlstream.ssl = self.savedSSL + + + def testWantedSupported(self): + """ + Test start when TLS is wanted and the SSL library available. + """ + self.xmlstream.transport = proto_helpers.StringTransport() + self.xmlstream.transport.startTLS = lambda ctx: self.done.append('TLS') + self.xmlstream.reset = lambda: self.done.append('reset') + self.xmlstream.sendHeader = lambda: self.done.append('header') + + d = self.init.start() + d.addCallback(self.assertEquals, xmlstream.Reset) + starttls = self.output[0] + self.assertEquals('starttls', starttls.name) + self.assertEquals(NS_XMPP_TLS, starttls.uri) + self.xmlstream.dataReceived("" % NS_XMPP_TLS) + self.assertEquals(['TLS', 'reset', 'header'], self.done) + + return d + + if not xmlstream.ssl: + testWantedSupported.skip = "SSL not available" + + + def testWantedNotSupportedNotRequired(self): + """ + Test start when TLS is wanted and the SSL library available. + """ + xmlstream.ssl = None + + d = self.init.start() + d.addCallback(self.assertEquals, None) + self.assertEquals([], self.output) + + return d + + + def testWantedNotSupportedRequired(self): + """ + Test start when TLS is wanted and the SSL library available. + """ + xmlstream.ssl = None + self.init.required = True + + d = self.init.start() + self.assertFailure(d, xmlstream.TLSNotSupported) + self.assertEquals([], self.output) + + return d + + + def testNotWantedRequired(self): + """ + Test start when TLS is not wanted, but required by the server. + """ + tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls')) + tls.addElement('required') + self.xmlstream.features = {(tls.uri, tls.name): tls} + self.init.wanted = False + + d = self.init.start() + self.assertEquals([], self.output) + self.assertFailure(d, xmlstream.TLSRequired) + + return d + + + def testNotWantedNotRequired(self): + """ + Test start when TLS is not wanted, but required by the server. + """ + tls = domish.Element(('urn:ietf:params:xml:ns:xmpp-tls', 'starttls')) + self.xmlstream.features = {(tls.uri, tls.name): tls} + self.init.wanted = False + + d = self.init.start() + d.addCallback(self.assertEqual, None) + self.assertEquals([], self.output) + return d + + + def testFailed(self): + """ + Test failed TLS negotiation. + """ + # Pretend that ssl is supported, it isn't actually used when the + # server starts out with a failure in response to our initial + # C{starttls} stanza. + xmlstream.ssl = 1 + + d = self.init.start() + self.assertFailure(d, xmlstream.TLSFailed) + self.xmlstream.dataReceived("" % NS_XMPP_TLS) + return d + + + +class TestFeatureInitializer(xmlstream.BaseFeatureInitiatingInitializer): + feature = ('testns', 'test') + + def start(self): + return defer.succeed(None) + + + +class BaseFeatureInitiatingInitializerTest(unittest.TestCase): + + def setUp(self): + self.xmlstream = xmlstream.XmlStream(xmlstream.Authenticator()) + self.init = TestFeatureInitializer(self.xmlstream) + + + def testAdvertized(self): + """ + Test that an advertized feature results in successful initialization. + """ + self.xmlstream.features = {self.init.feature: + domish.Element(self.init.feature)} + return self.init.initialize() + + + def testNotAdvertizedRequired(self): + """ + Test that when the feature is not advertized, but required by the + initializer, an exception is raised. + """ + self.init.required = True + self.assertRaises(xmlstream.FeatureNotAdvertized, self.init.initialize) + + + def testNotAdvertizedNotRequired(self): + """ + Test that when the feature is not advertized, and not required by the + initializer, the initializer silently succeeds. + """ + self.init.required = False + self.assertIdentical(None, self.init.initialize()) + + + +class ToResponseTest(unittest.TestCase): + + def test_toResponse(self): + """ + Test that a response stanza is generated with addressing swapped. + """ + stanza = domish.Element(('jabber:client', 'iq')) + stanza['type'] = 'get' + stanza['to'] = 'user1@example.com' + stanza['from'] = 'user2@example.com/resource' + stanza['id'] = 'stanza1' + response = xmlstream.toResponse(stanza, 'result') + self.assertNotIdentical(stanza, response) + self.assertEqual(response['from'], 'user1@example.com') + self.assertEqual(response['to'], 'user2@example.com/resource') + self.assertEqual(response['type'], 'result') + self.assertEqual(response['id'], 'stanza1') + + + def test_toResponseNoFrom(self): + """ + Test that a response is generated from a stanza without a from address. + """ + stanza = domish.Element(('jabber:client', 'iq')) + stanza['type'] = 'get' + stanza['to'] = 'user1@example.com' + response = xmlstream.toResponse(stanza) + self.assertEqual(response['from'], 'user1@example.com') + self.assertFalse(response.hasAttribute('to')) + + + def test_toResponseNoTo(self): + """ + Test that a response is generated from a stanza without a to address. + """ + stanza = domish.Element(('jabber:client', 'iq')) + stanza['type'] = 'get' + stanza['from'] = 'user2@example.com/resource' + response = xmlstream.toResponse(stanza) + self.assertFalse(response.hasAttribute('from')) + self.assertEqual(response['to'], 'user2@example.com/resource') + + + def test_toResponseNoAddressing(self): + """ + Test that a response is generated from a stanza without any addressing. + """ + stanza = domish.Element(('jabber:client', 'message')) + stanza['type'] = 'chat' + response = xmlstream.toResponse(stanza) + self.assertFalse(response.hasAttribute('to')) + self.assertFalse(response.hasAttribute('from')) + + + def test_noID(self): + """ + Test that a proper response is generated without id attribute. + """ + stanza = domish.Element(('jabber:client', 'message')) + response = xmlstream.toResponse(stanza) + self.assertFalse(response.hasAttribute('id')) + + + def test_noType(self): + """ + Test that a proper response is generated without type attribute. + """ + stanza = domish.Element(('jabber:client', 'message')) + response = xmlstream.toResponse(stanza) + self.assertFalse(response.hasAttribute('type')) + + +class DummyFactory(object): + """ + Dummy XmlStream factory that only registers bootstrap observers. + """ + def __init__(self): + self.callbacks = {} + + + def addBootstrap(self, event, callback): + self.callbacks[event] = callback + + + +class DummyXMPPHandler(xmlstream.XMPPHandler): + """ + Dummy XMPP subprotocol handler to count the methods are called on it. + """ + def __init__(self): + self.doneMade = 0 + self.doneInitialized = 0 + self.doneLost = 0 + + + def makeConnection(self, xs): + self.connectionMade() + + + def connectionMade(self): + self.doneMade += 1 + + + def connectionInitialized(self): + self.doneInitialized += 1 + + + def connectionLost(self, reason): + self.doneLost += 1 + + + +class XMPPHandlerTest(unittest.TestCase): + """ + Tests for L{xmlstream.XMPPHandler}. + """ + + def test_interface(self): + """ + L{xmlstream.XMPPHandler} implements L{ijabber.IXMPPHandler}. + """ + verifyObject(ijabber.IXMPPHandler, xmlstream.XMPPHandler()) + + + def test_send(self): + """ + Test that data is passed on for sending by the stream manager. + """ + class DummyStreamManager(object): + def __init__(self): + self.outlist = [] + + def send(self, data): + self.outlist.append(data) + + handler = xmlstream.XMPPHandler() + handler.parent = DummyStreamManager() + handler.send('') + self.assertEquals([''], handler.parent.outlist) + + + def test_makeConnection(self): + """ + Test that makeConnection saves the XML stream and calls connectionMade. + """ + class TestXMPPHandler(xmlstream.XMPPHandler): + def connectionMade(self): + self.doneMade = True + + handler = TestXMPPHandler() + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + handler.makeConnection(xs) + self.assertTrue(handler.doneMade) + self.assertIdentical(xs, handler.xmlstream) + + + def test_connectionLost(self): + """ + Test that connectionLost forgets the XML stream. + """ + handler = xmlstream.XMPPHandler() + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + handler.makeConnection(xs) + handler.connectionLost(Exception()) + self.assertIdentical(None, handler.xmlstream) + + + +class XMPPHandlerCollectionTest(unittest.TestCase): + """ + Tests for L{xmlstream.XMPPHandlerCollection}. + """ + + def setUp(self): + self.collection = xmlstream.XMPPHandlerCollection() + + + def test_interface(self): + """ + L{xmlstream.StreamManager} implements L{ijabber.IXMPPHandlerCollection}. + """ + verifyObject(ijabber.IXMPPHandlerCollection, self.collection) + + + def test_addHandler(self): + """ + Test the addition of a protocol handler. + """ + handler = DummyXMPPHandler() + handler.setHandlerParent(self.collection) + self.assertIn(handler, self.collection) + self.assertIdentical(self.collection, handler.parent) + + + def test_removeHandler(self): + """ + Test removal of a protocol handler. + """ + handler = DummyXMPPHandler() + handler.setHandlerParent(self.collection) + handler.disownHandlerParent(self.collection) + self.assertNotIn(handler, self.collection) + self.assertIdentical(None, handler.parent) + + + +class StreamManagerTest(unittest.TestCase): + """ + Tests for L{xmlstream.StreamManager}. + """ + + def setUp(self): + factory = DummyFactory() + self.streamManager = xmlstream.StreamManager(factory) + + + def test_basic(self): + """ + Test correct initialization and setup of factory observers. + """ + sm = self.streamManager + self.assertIdentical(None, sm.xmlstream) + self.assertEquals([], sm.handlers) + self.assertEquals(sm._connected, + sm.factory.callbacks['//event/stream/connected']) + self.assertEquals(sm._authd, + sm.factory.callbacks['//event/stream/authd']) + self.assertEquals(sm._disconnected, + sm.factory.callbacks['//event/stream/end']) + self.assertEquals(sm.initializationFailed, + sm.factory.callbacks['//event/xmpp/initfailed']) + + + def test_connected(self): + """ + Test that protocol handlers have their connectionMade method called + when the XML stream is connected. + """ + sm = self.streamManager + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._connected(xs) + self.assertEquals(1, handler.doneMade) + self.assertEquals(0, handler.doneInitialized) + self.assertEquals(0, handler.doneLost) + + + def test_connectedLogTrafficFalse(self): + """ + Test raw data functions unset when logTraffic is set to False. + """ + sm = self.streamManager + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._connected(xs) + self.assertIdentical(None, xs.rawDataInFn) + self.assertIdentical(None, xs.rawDataOutFn) + + + def test_connectedLogTrafficTrue(self): + """ + Test raw data functions set when logTraffic is set to True. + """ + sm = self.streamManager + sm.logTraffic = True + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._connected(xs) + self.assertNotIdentical(None, xs.rawDataInFn) + self.assertNotIdentical(None, xs.rawDataOutFn) + + + def test_authd(self): + """ + Test that protocol handlers have their connectionInitialized method + called when the XML stream is initialized. + """ + sm = self.streamManager + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._authd(xs) + self.assertEquals(0, handler.doneMade) + self.assertEquals(1, handler.doneInitialized) + self.assertEquals(0, handler.doneLost) + + + def test_disconnected(self): + """ + Test that protocol handlers have their connectionLost method + called when the XML stream is disconnected. + """ + sm = self.streamManager + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._disconnected(xs) + self.assertEquals(0, handler.doneMade) + self.assertEquals(0, handler.doneInitialized) + self.assertEquals(1, handler.doneLost) + + + def test_addHandler(self): + """ + Test the addition of a protocol handler while not connected. + """ + sm = self.streamManager + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + + self.assertEquals(0, handler.doneMade) + self.assertEquals(0, handler.doneInitialized) + self.assertEquals(0, handler.doneLost) + + + def test_addHandlerInitialized(self): + """ + Test the addition of a protocol handler after the stream + have been initialized. + + Make sure that the handler will have the connected stream + passed via C{makeConnection} and have C{connectionInitialized} + called. + """ + sm = self.streamManager + xs = xmlstream.XmlStream(xmlstream.Authenticator()) + sm._connected(xs) + sm._authd(xs) + handler = DummyXMPPHandler() + handler.setHandlerParent(sm) + + self.assertEquals(1, handler.doneMade) + self.assertEquals(1, handler.doneInitialized) + self.assertEquals(0, handler.doneLost) + + + def test_sendInitialized(self): + """ + Test send when the stream has been initialized. + + The data should be sent directly over the XML stream. + """ + factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator()) + sm = xmlstream.StreamManager(factory) + xs = factory.buildProtocol(None) + xs.transport = proto_helpers.StringTransport() + xs.connectionMade() + xs.dataReceived("") + xs.dispatch(xs, "//event/stream/authd") + sm.send("") + self.assertEquals("", xs.transport.value()) + + + def test_sendNotConnected(self): + """ + Test send when there is no established XML stream. + + The data should be cached until an XML stream has been established and + initialized. + """ + factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator()) + sm = xmlstream.StreamManager(factory) + handler = DummyXMPPHandler() + sm.addHandler(handler) + + xs = factory.buildProtocol(None) + xs.transport = proto_helpers.StringTransport() + sm.send("") + self.assertEquals("", xs.transport.value()) + self.assertEquals("", sm._packetQueue[0]) + + xs.connectionMade() + self.assertEquals("", xs.transport.value()) + self.assertEquals("", sm._packetQueue[0]) + + xs.dataReceived("") + xs.dispatch(xs, "//event/stream/authd") + + self.assertEquals("", xs.transport.value()) + self.assertFalse(sm._packetQueue) + + + def test_sendNotInitialized(self): + """ + Test send when the stream is connected but not yet initialized. + + The data should be cached until the XML stream has been initialized. + """ + factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator()) + sm = xmlstream.StreamManager(factory) + xs = factory.buildProtocol(None) + xs.transport = proto_helpers.StringTransport() + xs.connectionMade() + xs.dataReceived("") + sm.send("") + self.assertEquals("", xs.transport.value()) + self.assertEquals("", sm._packetQueue[0]) + + + def test_sendDisconnected(self): + """ + Test send after XML stream disconnection. + + The data should be cached until a new XML stream has been established + and initialized. + """ + factory = xmlstream.XmlStreamFactory(xmlstream.Authenticator()) + sm = xmlstream.StreamManager(factory) + handler = DummyXMPPHandler() + sm.addHandler(handler) + + xs = factory.buildProtocol(None) + xs.connectionMade() + xs.transport = proto_helpers.StringTransport() + xs.connectionLost(None) + + sm.send("") + self.assertEquals("", xs.transport.value()) + self.assertEquals("", sm._packetQueue[0]) + + + +class XmlStreamServerFactoryTest(GenericXmlStreamFactoryTestsMixin): + """ + Tests for L{xmlstream.XmlStreamServerFactory}. + """ + + def setUp(self): + """ + Set up a server factory with a authenticator factory function. + """ + class TestAuthenticator(object): + def __init__(self): + self.xmlstreams = [] + + def associateWithStream(self, xs): + self.xmlstreams.append(xs) + + def authenticatorFactory(): + return TestAuthenticator() + + self.factory = xmlstream.XmlStreamServerFactory(authenticatorFactory) + + + def test_interface(self): + """ + L{XmlStreamServerFactory} is a L{Factory}. + """ + verifyObject(IProtocolFactory, self.factory) + + + def test_buildProtocolAuthenticatorInstantiation(self): + """ + The authenticator factory should be used to instantiate the + authenticator and pass it to the protocol. + + The default protocol, L{XmlStream} stores the authenticator it is + passed, and calls its C{associateWithStream} method. so we use that to + check whether our authenticator factory is used and the protocol + instance gets an authenticator. + """ + xs = self.factory.buildProtocol(None) + self.assertEquals([xs], xs.authenticator.xmlstreams) + + + def test_buildProtocolXmlStream(self): + """ + The protocol factory creates Jabber XML Stream protocols by default. + """ + xs = self.factory.buildProtocol(None) + self.assertIsInstance(xs, xmlstream.XmlStream) + + + def test_buildProtocolTwice(self): + """ + Subsequent calls to buildProtocol should result in different instances + of the protocol, as well as their authenticators. + """ + xs1 = self.factory.buildProtocol(None) + xs2 = self.factory.buildProtocol(None) + self.assertNotIdentical(xs1, xs2) + self.assertNotIdentical(xs1.authenticator, xs2.authenticator) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmppstringprep.py b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmppstringprep.py new file mode 100644 index 000000000000..e42c0d6c521b --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_jabberxmppstringprep.py @@ -0,0 +1,84 @@ +# Copyright (c) 2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.trial import unittest + +from twisted.words.protocols.jabber.xmpp_stringprep import nodeprep, resourceprep, nameprep, crippled + +class XMPPStringPrepTest(unittest.TestCase): + """ + + The nodeprep stringprep profile is similar to the resourceprep profile, + but does an extra mapping of characters (table B.2) and disallows + more characters (table C.1.1 and eight extra punctuation characters). + Due to this similarity, the resourceprep tests are more extensive, and + the nodeprep tests only address the mappings additional restrictions. + + The nameprep profile is nearly identical to the nameprep implementation in + L{encodings.idna}, but that implementation assumes the C{UseSTD4ASCIIRules} + flag to be false. This implementation assumes it to be true, and restricts + the allowed set of characters. The tests here only check for the + differences. + + """ + + def testResourcePrep(self): + self.assertEquals(resourceprep.prepare(u'resource'), u'resource') + self.assertNotEquals(resourceprep.prepare(u'Resource'), u'resource') + self.assertEquals(resourceprep.prepare(u' '), u' ') + + if crippled: + return + + self.assertEquals(resourceprep.prepare(u'Henry \u2163'), u'Henry IV') + self.assertEquals(resourceprep.prepare(u'foo\xad\u034f\u1806\u180b' + u'bar\u200b\u2060' + u'baz\ufe00\ufe08\ufe0f\ufeff'), + u'foobarbaz') + self.assertEquals(resourceprep.prepare(u'\u00a0'), u' ') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u1680') + self.assertEquals(resourceprep.prepare(u'\u2000'), u' ') + self.assertEquals(resourceprep.prepare(u'\u200b'), u'') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0010\u007f') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u0085') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u180e') + self.assertEquals(resourceprep.prepare(u'\ufeff'), u'') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\uf123') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000f1234') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010f234') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0008fffe') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U0010ffff') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\udf42') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\ufffd') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u2ff5') + self.assertEquals(resourceprep.prepare(u'\u0341'), u'\u0301') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u200e') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u202a') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0001') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0042') + self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\u05bebar') + self.assertRaises(UnicodeError, resourceprep.prepare, u'foo\ufd50bar') + #self.assertEquals(resourceprep.prepare(u'foo\ufb38bar'), + # u'foo\u064ebar') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\u06271') + self.assertEquals(resourceprep.prepare(u'\u06271\u0628'), + u'\u06271\u0628') + self.assertRaises(UnicodeError, resourceprep.prepare, u'\U000e0002') + + def testNodePrep(self): + self.assertEquals(nodeprep.prepare(u'user'), u'user') + self.assertEquals(nodeprep.prepare(u'User'), u'user') + self.assertRaises(UnicodeError, nodeprep.prepare, u'us&er') + + def testNamePrep(self): + self.assertEquals(nameprep.prepare(u'example.com'), u'example.com') + self.assertEquals(nameprep.prepare(u'Example.com'), u'example.com') + self.assertRaises(UnicodeError, nameprep.prepare, u'ex@mple.com') + self.assertRaises(UnicodeError, nameprep.prepare, u'-example.com') + self.assertRaises(UnicodeError, nameprep.prepare, u'example-.com') + + if crippled: + return + + self.assertEquals(nameprep.prepare(u'stra\u00dfe.example.com'), + u'strasse.example.com') diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_msn.py b/vendor/Twisted-10.0.0/twisted/words/test/test_msn.py new file mode 100644 index 000000000000..9074310cb941 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_msn.py @@ -0,0 +1,503 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Test cases for L{twisted.words.protocols.msn}. +""" + +# System imports +import StringIO + +# Twisted imports + +# t.w.p.msn requires an HTTP client +try: + # So try to get one - do it directly instead of catching an ImportError + # from t.w.p.msn so that other problems which cause that module to fail + # to import don't cause the tests to be skipped. + from twisted.web import client +except ImportError: + # If there isn't one, we're going to skip all the tests. + msn = None +else: + # Otherwise importing it should work, so do it. + from twisted.words.protocols import msn + + +from twisted.python.hashlib import md5 +from twisted.protocols import loopback +from twisted.internet.defer import Deferred +from twisted.trial import unittest +from twisted.test.proto_helpers import StringTransport, StringIOWithoutClosing + +def printError(f): + print f + + +class PassportTests(unittest.TestCase): + + def setUp(self): + self.result = [] + self.deferred = Deferred() + self.deferred.addCallback(lambda r: self.result.append(r)) + self.deferred.addErrback(printError) + + def test_nexus(self): + """ + When L{msn.PassportNexus} receives enough information to identify the + address of the login server, it fires the L{Deferred} passed to its + initializer with that address. + """ + protocol = msn.PassportNexus(self.deferred, 'https://foobar.com/somepage.quux') + headers = { + 'Content-Length' : '0', + 'Content-Type' : 'text/html', + 'PassportURLs' : 'DARealm=Passport.Net,DALogin=login.myserver.com/,DAReg=reg.myserver.com' + } + transport = StringTransport() + protocol.makeConnection(transport) + protocol.dataReceived('HTTP/1.0 200 OK\r\n') + for (h, v) in headers.items(): + protocol.dataReceived('%s: %s\r\n' % (h,v)) + protocol.dataReceived('\r\n') + self.assertEquals(self.result[0], "https://login.myserver.com/") + + + def _doLoginTest(self, response, headers): + protocol = msn.PassportLogin(self.deferred,'foo@foo.com','testpass','https://foo.com/', 'a') + protocol.makeConnection(StringTransport()) + protocol.dataReceived(response) + for (h,v) in headers.items(): protocol.dataReceived('%s: %s\r\n' % (h,v)) + protocol.dataReceived('\r\n') + + def testPassportLoginSuccess(self): + headers = { + 'Content-Length' : '0', + 'Content-Type' : 'text/html', + 'Authentication-Info' : "Passport1.4 da-status=success,tname=MSPAuth," + + "tname=MSPProf,tname=MSPSec,from-PP='somekey'," + + "ru=http://messenger.msn.com" + } + self._doLoginTest('HTTP/1.1 200 OK\r\n', headers) + self.failUnless(self.result[0] == (msn.LOGIN_SUCCESS, 'somekey')) + + def testPassportLoginFailure(self): + headers = { + 'Content-Type' : 'text/html', + 'WWW-Authenticate' : 'Passport1.4 da-status=failed,' + + 'srealm=Passport.NET,ts=-3,prompt,cburl=http://host.com,' + + 'cbtxt=the%20error%20message' + } + self._doLoginTest('HTTP/1.1 401 Unauthorized\r\n', headers) + self.failUnless(self.result[0] == (msn.LOGIN_FAILURE, 'the error message')) + + def testPassportLoginRedirect(self): + headers = { + 'Content-Type' : 'text/html', + 'Authentication-Info' : 'Passport1.4 da-status=redir', + 'Location' : 'https://newlogin.host.com/' + } + self._doLoginTest('HTTP/1.1 302 Found\r\n', headers) + self.failUnless(self.result[0] == (msn.LOGIN_REDIRECT, 'https://newlogin.host.com/', 'a')) + + +if msn is not None: + class DummySwitchboardClient(msn.SwitchboardClient): + def userTyping(self, message): + self.state = 'TYPING' + + def gotSendRequest(self, fileName, fileSize, cookie, message): + if fileName == 'foobar.ext' and fileSize == 31337 and cookie == 1234: self.state = 'INVITATION' + + + class DummyNotificationClient(msn.NotificationClient): + def loggedIn(self, userHandle, screenName, verified): + if userHandle == 'foo@bar.com' and screenName == 'Test Screen Name' and verified: + self.state = 'LOGIN' + + def gotProfile(self, message): + self.state = 'PROFILE' + + def gotContactStatus(self, code, userHandle, screenName): + if code == msn.STATUS_AWAY and userHandle == "foo@bar.com" and screenName == "Test Screen Name": + self.state = 'INITSTATUS' + + def contactStatusChanged(self, code, userHandle, screenName): + if code == msn.STATUS_LUNCH and userHandle == "foo@bar.com" and screenName == "Test Name": + self.state = 'NEWSTATUS' + + def contactOffline(self, userHandle): + if userHandle == "foo@bar.com": self.state = 'OFFLINE' + + def statusChanged(self, code): + if code == msn.STATUS_HIDDEN: self.state = 'MYSTATUS' + + def listSynchronized(self, *args): + self.state = 'GOTLIST' + + def gotPhoneNumber(self, listVersion, userHandle, phoneType, number): + msn.NotificationClient.gotPhoneNumber(self, listVersion, userHandle, phoneType, number) + self.state = 'GOTPHONE' + + def userRemovedMe(self, userHandle, listVersion): + msn.NotificationClient.userRemovedMe(self, userHandle, listVersion) + c = self.factory.contacts.getContact(userHandle) + if not c and self.factory.contacts.version == listVersion: self.state = 'USERREMOVEDME' + + def userAddedMe(self, userHandle, screenName, listVersion): + msn.NotificationClient.userAddedMe(self, userHandle, screenName, listVersion) + c = self.factory.contacts.getContact(userHandle) + if c and (c.lists | msn.REVERSE_LIST) and (self.factory.contacts.version == listVersion) and \ + (screenName == 'Screen Name'): + self.state = 'USERADDEDME' + + def gotSwitchboardInvitation(self, sessionID, host, port, key, userHandle, screenName): + if sessionID == 1234 and \ + host == '192.168.1.1' and \ + port == 1863 and \ + key == '123.456' and \ + userHandle == 'foo@foo.com' and \ + screenName == 'Screen Name': + self.state = 'SBINVITED' + + + +class DispatchTests(unittest.TestCase): + """ + Tests for L{DispatchClient}. + """ + def _versionTest(self, serverVersionResponse): + """ + Test L{DispatchClient} version negotiation. + """ + client = msn.DispatchClient() + client.userHandle = "foo" + + transport = StringTransport() + client.makeConnection(transport) + self.assertEquals( + transport.value(), "VER 1 MSNP8 CVR0\r\n") + transport.clear() + + client.dataReceived(serverVersionResponse) + self.assertEquals( + transport.value(), + "CVR 2 0x0409 win 4.10 i386 MSNMSGR 5.0.0544 MSMSGS foo\r\n") + + + def test_version(self): + """ + L{DispatchClient.connectionMade} greets the server with a I{VER} + (version) message and then L{NotificationClient.dataReceived} + handles the server's I{VER} response by sending a I{CVR} (client + version) message. + """ + self._versionTest("VER 1 MSNP8 CVR0\r\n") + + + def test_versionWithoutCVR0(self): + """ + If the server responds to a I{VER} command without including the + I{CVR0} protocol, L{DispatchClient} behaves in the same way as if + that protocol were included. + + Starting in August 2008, CVR0 disappeared from the I{VER} response. + """ + self._versionTest("VER 1 MSNP8\r\n") + + + +class NotificationTests(unittest.TestCase): + """ testing the various events in NotificationClient """ + + def setUp(self): + self.client = DummyNotificationClient() + self.client.factory = msn.NotificationFactory() + self.client.state = 'START' + + + def tearDown(self): + self.client = None + + + def _versionTest(self, serverVersionResponse): + """ + Test L{NotificationClient} version negotiation. + """ + self.client.factory.userHandle = "foo" + + transport = StringTransport() + self.client.makeConnection(transport) + self.assertEquals( + transport.value(), "VER 1 MSNP8 CVR0\r\n") + transport.clear() + + self.client.dataReceived(serverVersionResponse) + self.assertEquals( + transport.value(), + "CVR 2 0x0409 win 4.10 i386 MSNMSGR 5.0.0544 MSMSGS foo\r\n") + + + def test_version(self): + """ + L{NotificationClient.connectionMade} greets the server with a I{VER} + (version) message and then L{NotificationClient.dataReceived} + handles the server's I{VER} response by sending a I{CVR} (client + version) message. + """ + self._versionTest("VER 1 MSNP8 CVR0\r\n") + + + def test_versionWithoutCVR0(self): + """ + If the server responds to a I{VER} command without including the + I{CVR0} protocol, L{NotificationClient} behaves in the same way as + if that protocol were included. + + Starting in August 2008, CVR0 disappeared from the I{VER} response. + """ + self._versionTest("VER 1 MSNP8\r\n") + + + def test_challenge(self): + """ + L{NotificationClient} responds to a I{CHL} message by sending a I{QRY} + back which included a hash based on the parameters of the I{CHL}. + """ + transport = StringTransport() + self.client.makeConnection(transport) + transport.clear() + + challenge = "15570131571988941333" + self.client.dataReceived('CHL 0 ' + challenge + '\r\n') + # md5 of the challenge and a magic string defined by the protocol + response = "8f2f5a91b72102cd28355e9fc9000d6e" + # Sanity check - the response is what the comment above says it is. + self.assertEquals( + response, md5(challenge + "Q1P7W2E4J9R8U3S5").hexdigest()) + self.assertEquals( + transport.value(), + # 2 is the next transaction identifier. 32 is the length of the + # response. + "QRY 2 msmsgs@msnmsgr.com 32\r\n" + response) + + + def testLogin(self): + self.client.lineReceived('USR 1 OK foo@bar.com Test%20Screen%20Name 1 0') + self.failUnless((self.client.state == 'LOGIN'), msg='Failed to detect successful login') + + + def testProfile(self): + m = 'MSG Hotmail Hotmail 353\r\nMIME-Version: 1.0\r\nContent-Type: text/x-msmsgsprofile; charset=UTF-8\r\n' + m += 'LoginTime: 1016941010\r\nEmailEnabled: 1\r\nMemberIdHigh: 40000\r\nMemberIdLow: -600000000\r\nlang_preference: 1033\r\n' + m += 'preferredEmail: foo@bar.com\r\ncountry: AU\r\nPostalCode: 90210\r\nGender: M\r\nKid: 0\r\nAge:\r\nsid: 400\r\n' + m += 'kv: 2\r\nMSPAuth: 2CACCBCCADMoV8ORoz64BVwmjtksIg!kmR!Rj5tBBqEaW9hc4YnPHSOQ$$\r\n\r\n' + map(self.client.lineReceived, m.split('\r\n')[:-1]) + self.failUnless((self.client.state == 'PROFILE'), msg='Failed to detect initial profile') + + def testStatus(self): + t = [('ILN 1 AWY foo@bar.com Test%20Screen%20Name 0', 'INITSTATUS', 'Failed to detect initial status report'), + ('NLN LUN foo@bar.com Test%20Name 0', 'NEWSTATUS', 'Failed to detect contact status change'), + ('FLN foo@bar.com', 'OFFLINE', 'Failed to detect contact signing off'), + ('CHG 1 HDN 0', 'MYSTATUS', 'Failed to detect my status changing')] + for i in t: + self.client.lineReceived(i[0]) + self.failUnless((self.client.state == i[1]), msg=i[2]) + + def testListSync(self): + # currently this test does not take into account the fact + # that BPRs sent as part of the SYN reply may not be interpreted + # as such if they are for the last LST -- maybe I should + # factor this in later. + self.client.makeConnection(StringTransport()) + msn.NotificationClient.loggedIn(self.client, 'foo@foo.com', 'foobar', 1) + lines = [ + "SYN %s 100 1 1" % self.client.currentID, + "GTC A", + "BLP AL", + "LSG 0 Other%20Contacts 0", + "LST userHandle@email.com Some%20Name 11 0" + ] + map(self.client.lineReceived, lines) + contacts = self.client.factory.contacts + contact = contacts.getContact('userHandle@email.com') + self.failUnless(contacts.version == 100, "Invalid contact list version") + self.failUnless(contact.screenName == 'Some Name', "Invalid screen-name for user") + self.failUnless(contacts.groups == {0 : 'Other Contacts'}, "Did not get proper group list") + self.failUnless(contact.groups == [0] and contact.lists == 11, "Invalid contact list/group info") + self.failUnless(self.client.state == 'GOTLIST', "Failed to call list sync handler") + + def testAsyncPhoneChange(self): + c = msn.MSNContact(userHandle='userHandle@email.com') + self.client.factory.contacts = msn.MSNContactList() + self.client.factory.contacts.addContact(c) + self.client.makeConnection(StringTransport()) + self.client.lineReceived("BPR 101 userHandle@email.com PHH 123%20456") + c = self.client.factory.contacts.getContact('userHandle@email.com') + self.failUnless(self.client.state == 'GOTPHONE', "Did not fire phone change callback") + self.failUnless(c.homePhone == '123 456', "Did not update the contact's phone number") + self.failUnless(self.client.factory.contacts.version == 101, "Did not update list version") + + def testLateBPR(self): + """ + This test makes sure that if a BPR response that was meant + to be part of a SYN response (but came after the last LST) + is received, the correct contact is updated and all is well + """ + self.client.makeConnection(StringTransport()) + msn.NotificationClient.loggedIn(self.client, 'foo@foo.com', 'foo', 1) + lines = [ + "SYN %s 100 1 1" % self.client.currentID, + "GTC A", + "BLP AL", + "LSG 0 Other%20Contacts 0", + "LST userHandle@email.com Some%20Name 11 0", + "BPR PHH 123%20456" + ] + map(self.client.lineReceived, lines) + contact = self.client.factory.contacts.getContact('userHandle@email.com') + self.failUnless(contact.homePhone == '123 456', "Did not update contact's phone number") + + def testUserRemovedMe(self): + self.client.factory.contacts = msn.MSNContactList() + contact = msn.MSNContact(userHandle='foo@foo.com') + contact.addToList(msn.REVERSE_LIST) + self.client.factory.contacts.addContact(contact) + self.client.lineReceived("REM 0 RL 100 foo@foo.com") + self.failUnless(self.client.state == 'USERREMOVEDME', "Failed to remove user from reverse list") + + def testUserAddedMe(self): + self.client.factory.contacts = msn.MSNContactList() + self.client.lineReceived("ADD 0 RL 100 foo@foo.com Screen%20Name") + self.failUnless(self.client.state == 'USERADDEDME', "Failed to add user to reverse lise") + + def testAsyncSwitchboardInvitation(self): + self.client.lineReceived("RNG 1234 192.168.1.1:1863 CKI 123.456 foo@foo.com Screen%20Name") + self.failUnless(self.client.state == "SBINVITED") + + def testCommandFailed(self): + """ + Ensures that error responses from the server fires an errback with + MSNCommandFailed. + """ + id, d = self.client._createIDMapping() + self.client.lineReceived("201 %s" % id) + d = self.assertFailure(d, msn.MSNCommandFailed) + def assertErrorCode(exception): + self.assertEqual(201, exception.errorCode) + return d.addCallback(assertErrorCode) + + +class MessageHandlingTests(unittest.TestCase): + """ testing various message handling methods from SwichboardClient """ + + def setUp(self): + self.client = DummySwitchboardClient() + self.client.state = 'START' + + def tearDown(self): + self.client = None + + def testClientCapabilitiesCheck(self): + m = msn.MSNMessage() + m.setHeader('Content-Type', 'text/x-clientcaps') + self.assertEquals(self.client.checkMessage(m), 0, 'Failed to detect client capability message') + + def testTypingCheck(self): + m = msn.MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgscontrol') + m.setHeader('TypingUser', 'foo@bar') + self.client.checkMessage(m) + self.failUnless((self.client.state == 'TYPING'), msg='Failed to detect typing notification') + + def testFileInvitation(self, lazyClient=False): + m = msn.MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Application-Name: File Transfer\r\n' + if not lazyClient: + m.message += 'Application-GUID: {5D3E02AB-6190-11d3-BBBB-00C04F795683}\r\n' + m.message += 'Invitation-Command: Invite\r\n' + m.message += 'Invitation-Cookie: 1234\r\n' + m.message += 'Application-File: foobar.ext\r\n' + m.message += 'Application-FileSize: 31337\r\n\r\n' + self.client.checkMessage(m) + self.failUnless((self.client.state == 'INVITATION'), msg='Failed to detect file transfer invitation') + + def testFileInvitationMissingGUID(self): + return self.testFileInvitation(True) + + def testFileResponse(self): + d = Deferred() + d.addCallback(self.fileResponse) + self.client.cookies['iCookies'][1234] = (d, None) + m = msn.MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Invitation-Command: ACCEPT\r\n' + m.message += 'Invitation-Cookie: 1234\r\n\r\n' + self.client.checkMessage(m) + self.failUnless((self.client.state == 'RESPONSE'), msg='Failed to detect file transfer response') + + def testFileInfo(self): + d = Deferred() + d.addCallback(self.fileInfo) + self.client.cookies['external'][1234] = (d, None) + m = msn.MSNMessage() + m.setHeader('Content-Type', 'text/x-msmsgsinvite; charset=UTF-8') + m.message += 'Invitation-Command: ACCEPT\r\n' + m.message += 'Invitation-Cookie: 1234\r\n' + m.message += 'IP-Address: 192.168.0.1\r\n' + m.message += 'Port: 6891\r\n' + m.message += 'AuthCookie: 4321\r\n\r\n' + self.client.checkMessage(m) + self.failUnless((self.client.state == 'INFO'), msg='Failed to detect file transfer info') + + def fileResponse(self, (accept, cookie, info)): + if accept and cookie == 1234: self.client.state = 'RESPONSE' + + def fileInfo(self, (accept, ip, port, aCookie, info)): + if accept and ip == '192.168.0.1' and port == 6891 and aCookie == 4321: self.client.state = 'INFO' + + +class FileTransferTestCase(unittest.TestCase): + """ + test FileSend against FileReceive + """ + + def setUp(self): + self.input = 'a' * 7000 + self.output = StringIOWithoutClosing() + + + def tearDown(self): + self.input = None + self.output = None + + + def test_fileTransfer(self): + """ + Test L{FileSend} against L{FileReceive} using a loopback transport. + """ + auth = 1234 + sender = msn.FileSend(StringIO.StringIO(self.input)) + sender.auth = auth + sender.fileSize = 7000 + client = msn.FileReceive(auth, "foo@bar.com", self.output) + client.fileSize = 7000 + def check(ignored): + self.assertTrue( + client.completed and sender.completed, + msg="send failed to complete") + self.assertEqual( + self.input, self.output.getvalue(), + msg="saved file does not match original") + d = loopback.loopbackAsync(sender, client) + d.addCallback(check) + return d + + +if msn is None: + for testClass in [PassportTests, NotificationTests, + MessageHandlingTests, FileTransferTestCase]: + testClass.skip = ( + "MSN requires an HTTP client but none is available, " + "skipping tests.") diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_oscar.py b/vendor/Twisted-10.0.0/twisted/words/test/test_oscar.py new file mode 100644 index 000000000000..7fdcf40238bb --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_oscar.py @@ -0,0 +1,24 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.protocols.oscar}. +""" + +from twisted.trial.unittest import TestCase + +from twisted.words.protocols.oscar import encryptPasswordMD5 + + +class PasswordTests(TestCase): + """ + Tests for L{encryptPasswordMD5}. + """ + def test_encryptPasswordMD5(self): + """ + L{encryptPasswordMD5} hashes the given password and key and returns a + string suitable to use to authenticate against an OSCAR server. + """ + self.assertEqual( + encryptPasswordMD5('foo', 'bar').encode('hex'), + 'd73475c370a7b18c6c20386bcf1339f2') diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_service.py b/vendor/Twisted-10.0.0/twisted/words/test/test_service.py new file mode 100644 index 000000000000..aa79a64311d4 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_service.py @@ -0,0 +1,992 @@ +# Copyright (c) 2009 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.service}. +""" + +import time + +from twisted.trial import unittest +from twisted.test import proto_helpers + +from twisted.cred import portal, credentials, checkers +from twisted.words import ewords, service +from twisted.words.protocols import irc +from twisted.spread import pb +from twisted.internet.defer import Deferred, DeferredList, maybeDeferred, succeed +from twisted.internet.defer import deferredGenerator as dG, waitForDeferred as wFD +from twisted.internet import address, reactor + +class RealmTestCase(unittest.TestCase): + def _entityCreationTest(self, kind): + # Kind is "user" or "group" + realm = service.InMemoryWordsRealm("realmname") + + name = u'test' + kind.lower() + create = getattr(realm, 'create' + kind.title()) + get = getattr(realm, 'get' + kind.title()) + flag = 'create' + kind.title() + 'OnRequest' + dupExc = getattr(ewords, 'Duplicate' + kind.title()) + noSuchExc = getattr(ewords, 'NoSuch' + kind.title()) + + # Creating should succeed + d = wFD(create(name)) + yield d + p = d.getResult() + self.assertEquals(p.name, name) + + # Creating the same user again should not + d = wFD(create(name)) + yield d + self.assertRaises(dupExc, d.getResult) + + # Getting a non-existent user should succeed if createUserOnRequest is True + setattr(realm, flag, True) + d = wFD(get(u"new" + kind.lower())) + yield d + p = d.getResult() + self.assertEquals(p.name, "new" + kind.lower()) + + # Getting that user again should return the same object + d = wFD(get(u"new" + kind.lower())) + yield d + newp = d.getResult() + self.assertIdentical(p, newp) + + # Getting a non-existent user should fail if createUserOnRequest is False + setattr(realm, flag, False) + d = wFD(get(u"another" + kind.lower())) + yield d + self.assertRaises(noSuchExc, d.getResult) + _entityCreationTest = dG(_entityCreationTest) + + + def testUserCreation(self): + return self._entityCreationTest("User") + + + def testGroupCreation(self): + return self._entityCreationTest("Group") + + + def testUserRetrieval(self): + realm = service.InMemoryWordsRealm("realmname") + + # Make a user to play around with + d = wFD(realm.createUser(u"testuser")) + yield d + user = d.getResult() + + # Make sure getting the user returns the same object + d = wFD(realm.getUser(u"testuser")) + yield d + retrieved = d.getResult() + self.assertIdentical(user, retrieved) + + # Make sure looking up the user also returns the same object + d = wFD(realm.lookupUser(u"testuser")) + yield d + lookedUp = d.getResult() + self.assertIdentical(retrieved, lookedUp) + + # Make sure looking up a user who does not exist fails + d = wFD(realm.lookupUser(u"nosuchuser")) + yield d + self.assertRaises(ewords.NoSuchUser, d.getResult) + testUserRetrieval = dG(testUserRetrieval) + + + def testUserAddition(self): + realm = service.InMemoryWordsRealm("realmname") + + # Create and manually add a user to the realm + p = service.User("testuser") + d = wFD(realm.addUser(p)) + yield d + user = d.getResult() + self.assertIdentical(p, user) + + # Make sure getting that user returns the same object + d = wFD(realm.getUser(u"testuser")) + yield d + retrieved = d.getResult() + self.assertIdentical(user, retrieved) + + # Make sure looking up that user returns the same object + d = wFD(realm.lookupUser(u"testuser")) + yield d + lookedUp = d.getResult() + self.assertIdentical(retrieved, lookedUp) + testUserAddition = dG(testUserAddition) + + + def testGroupRetrieval(self): + realm = service.InMemoryWordsRealm("realmname") + + d = wFD(realm.createGroup(u"testgroup")) + yield d + group = d.getResult() + + d = wFD(realm.getGroup(u"testgroup")) + yield d + retrieved = d.getResult() + + self.assertIdentical(group, retrieved) + + d = wFD(realm.getGroup(u"nosuchgroup")) + yield d + self.assertRaises(ewords.NoSuchGroup, d.getResult) + testGroupRetrieval = dG(testGroupRetrieval) + + + def testGroupAddition(self): + realm = service.InMemoryWordsRealm("realmname") + + p = service.Group("testgroup") + d = wFD(realm.addGroup(p)) + yield d + d.getResult() + + d = wFD(realm.getGroup(u"testGroup")) + yield d + group = d.getResult() + + self.assertIdentical(p, group) + testGroupAddition = dG(testGroupAddition) + + + def testGroupUsernameCollision(self): + """ + Try creating a group with the same name as an existing user and + assert that it succeeds, since users and groups should not be in the + same namespace and collisions should be impossible. + """ + realm = service.InMemoryWordsRealm("realmname") + + d = wFD(realm.createUser(u"test")) + yield d + user = d.getResult() + + d = wFD(realm.createGroup(u"test")) + yield d + group = d.getResult() + testGroupUsernameCollision = dG(testGroupUsernameCollision) + + + def testEnumeration(self): + realm = service.InMemoryWordsRealm("realmname") + d = wFD(realm.createGroup(u"groupone")) + yield d + d.getResult() + + d = wFD(realm.createGroup(u"grouptwo")) + yield d + d.getResult() + + groups = wFD(realm.itergroups()) + yield groups + groups = groups.getResult() + + n = [g.name for g in groups] + n.sort() + self.assertEquals(n, ["groupone", "grouptwo"]) + testEnumeration = dG(testEnumeration) + + +class TestGroup(object): + def __init__(self, name, size, topic): + self.name = name + self.size = lambda: size + self.meta = {'topic': topic} + + +class TestUser(object): + def __init__(self, name, groups, signOn, lastMessage): + self.name = name + self.itergroups = lambda: iter([TestGroup(g, 3, 'Hello') for g in groups]) + self.signOn = signOn + self.lastMessage = lastMessage + + +class TestPortal(object): + def __init__(self): + self.logins = [] + + + def login(self, credentials, mind, *interfaces): + d = Deferred() + self.logins.append((credentials, mind, interfaces, d)) + return d + + +class TestCaseUserAgg(object): + def __init__(self, user, realm, factory, address=address.IPv4Address('TCP', '127.0.0.1', 54321)): + self.user = user + self.transport = proto_helpers.StringTransportWithDisconnection() + self.protocol = factory.buildProtocol(address) + self.transport.protocol = self.protocol + self.user.mind = self.protocol + self.protocol.makeConnection(self.transport) + + + def write(self, stuff): + if isinstance(stuff, unicode): + stuff = stuff.encode('utf-8') + self.protocol.dataReceived(stuff) + + +class IRCProtocolTestCase(unittest.TestCase): + STATIC_USERS = [ + u'useruser', u'otheruser', u'someguy', u'firstuser', u'username', + u'userone', u'usertwo', u'userthree', u'someuser'] + + + def setUp(self): + self.realm = service.InMemoryWordsRealm("realmname") + self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() + self.portal = portal.Portal(self.realm, [self.checker]) + self.factory = service.IRCFactory(self.realm, self.portal) + + c = [] + for nick in self.STATIC_USERS: + c.append(self.realm.createUser(nick)) + self.checker.addUser(nick.encode('ascii'), nick + "_password") + return DeferredList(c) + + + def _assertGreeting(self, user): + """ + The user has been greeted with the four messages that are (usually) + considered to start an IRC session. + + Asserts that the required responses were received. + """ + # Make sure we get 1-4 at least + response = self._response(user) + expected = [irc.RPL_WELCOME, irc.RPL_YOURHOST, irc.RPL_CREATED, + irc.RPL_MYINFO] + for (prefix, command, args) in response: + if command in expected: + expected.remove(command) + self.failIf(expected, "Missing responses for %r" % (expected,)) + + + def _login(self, user, nick, password=None): + if password is None: + password = nick + "_password" + user.write('PASS %s\r\n' % (password,)) + user.write('NICK %s extrainfo\r\n' % (nick,)) + + + def _loggedInUser(self, name): + d = wFD(self.realm.lookupUser(name)) + yield d + user = d.getResult() + agg = TestCaseUserAgg(user, self.realm, self.factory) + self._login(agg, name) + yield agg + _loggedInUser = dG(_loggedInUser) + + + def _response(self, user, messageType=None): + """ + Extracts the user's response, and returns a list of parsed lines. + If messageType is defined, only messages of that type will be returned. + """ + response = user.transport.value().splitlines() + user.transport.clear() + result = [] + for message in map(irc.parsemsg, response): + if messageType is None or message[1] == messageType: + result.append(message) + return result + + + def testPASSLogin(self): + user = wFD(self._loggedInUser(u'firstuser')) + yield user + user = user.getResult() + self._assertGreeting(user) + testPASSLogin = dG(testPASSLogin) + + + def test_nickServLogin(self): + """ + Sending NICK without PASS will prompt the user for their password. + When the user sends their password to NickServ, it will respond with a + Greeting. + """ + firstuser = wFD(self.realm.lookupUser(u'firstuser')) + yield firstuser + firstuser = firstuser.getResult() + + user = TestCaseUserAgg(firstuser, self.realm, self.factory) + user.write('NICK firstuser extrainfo\r\n') + response = self._response(user, 'PRIVMSG') + self.assertEquals(len(response), 1) + self.assertEquals(response[0][0], service.NICKSERV) + self.assertEquals(response[0][1], 'PRIVMSG') + self.assertEquals(response[0][2], ['firstuser', 'Password?']) + user.transport.clear() + + user.write('PRIVMSG nickserv firstuser_password\r\n') + self._assertGreeting(user) + test_nickServLogin = dG(test_nickServLogin) + + + def testFailedLogin(self): + firstuser = wFD(self.realm.lookupUser(u'firstuser')) + yield firstuser + firstuser = firstuser.getResult() + + user = TestCaseUserAgg(firstuser, self.realm, self.factory) + self._login(user, "firstuser", "wrongpass") + response = self._response(user, "PRIVMSG") + self.assertEquals(len(response), 1) + self.assertEquals(response[0][2], ['firstuser', 'Login failed. Goodbye.']) + testFailedLogin = dG(testFailedLogin) + + + def testLogout(self): + logout = [] + firstuser = wFD(self.realm.lookupUser(u'firstuser')) + yield firstuser + firstuser = firstuser.getResult() + + user = TestCaseUserAgg(firstuser, self.realm, self.factory) + self._login(user, "firstuser") + user.protocol.logout = lambda: logout.append(True) + user.write('QUIT\r\n') + self.assertEquals(logout, [True]) + testLogout = dG(testLogout) + + + def testJoin(self): + firstuser = wFD(self.realm.lookupUser(u'firstuser')) + yield firstuser + firstuser = firstuser.getResult() + + somechannel = wFD(self.realm.createGroup(u"somechannel")) + yield somechannel + somechannel = somechannel.getResult() + + somechannel.meta['topic'] = 'some random topic' + + # Bring in one user, make sure he gets into the channel sanely + user = TestCaseUserAgg(firstuser, self.realm, self.factory) + self._login(user, "firstuser") + user.transport.clear() + user.write('JOIN #somechannel\r\n') + + response = self._response(user) + self.assertEquals(len(response), 5) + + # Join message + self.assertEquals(response[0][0], 'firstuser!firstuser@realmname') + self.assertEquals(response[0][1], 'JOIN') + self.assertEquals(response[0][2], ['#somechannel']) + + # User list + self.assertEquals(response[1][1], '353') + self.assertEquals(response[2][1], '366') + + # Topic (or lack thereof, as the case may be) + self.assertEquals(response[3][1], '332') + self.assertEquals(response[4][1], '333') + + + # Hook up another client! It is a CHAT SYSTEM!!!!!!! + other = wFD(self._loggedInUser(u'otheruser')) + yield other + other = other.getResult() + + other.transport.clear() + user.transport.clear() + other.write('JOIN #somechannel\r\n') + + # At this point, both users should be in the channel + response = self._response(other) + + event = self._response(user) + self.assertEquals(len(event), 1) + self.assertEquals(event[0][0], 'otheruser!otheruser@realmname') + self.assertEquals(event[0][1], 'JOIN') + self.assertEquals(event[0][2], ['#somechannel']) + + self.assertEquals(response[1][0], 'realmname') + self.assertEquals(response[1][1], '353') + self.assertEquals(response[1][2], ['otheruser', '=', '#somechannel', 'firstuser otheruser']) + testJoin = dG(testJoin) + + + def test_joinTopicless(self): + """ + When a user joins a group without a topic, no topic information is + sent to that user. + """ + firstuser = wFD(self.realm.lookupUser(u'firstuser')) + yield firstuser + firstuser = firstuser.getResult() + + somechannel = wFD(self.realm.createGroup(u"somechannel")) + yield somechannel + somechannel = somechannel.getResult() + + # Bring in one user, make sure he gets into the channel sanely + user = TestCaseUserAgg(firstuser, self.realm, self.factory) + self._login(user, "firstuser") + user.transport.clear() + user.write('JOIN #somechannel\r\n') + + response = self._response(user) + responseCodes = [r[1] for r in response] + self.assertNotIn('332', responseCodes) + self.assertNotIn('333', responseCodes) + test_joinTopicless = dG(test_joinTopicless) + + + def testLeave(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + somechannel = wFD(self.realm.createGroup(u"somechannel")) + yield somechannel + somechannel = somechannel.getResult() + + user.write('JOIN #somechannel\r\n') + user.transport.clear() + + other = wFD(self._loggedInUser(u'otheruser')) + yield other + other = other.getResult() + + other.write('JOIN #somechannel\r\n') + + user.transport.clear() + other.transport.clear() + + user.write('PART #somechannel\r\n') + + response = self._response(user) + event = self._response(other) + + self.assertEquals(len(response), 1) + self.assertEquals(response[0][0], 'useruser!useruser@realmname') + self.assertEquals(response[0][1], 'PART') + self.assertEquals(response[0][2], ['#somechannel', 'leaving']) + self.assertEquals(response, event) + + # Now again, with a part message + user.write('JOIN #somechannel\r\n') + + user.transport.clear() + other.transport.clear() + + user.write('PART #somechannel :goodbye stupidheads\r\n') + + response = self._response(user) + event = self._response(other) + + self.assertEquals(len(response), 1) + self.assertEquals(response[0][0], 'useruser!useruser@realmname') + self.assertEquals(response[0][1], 'PART') + self.assertEquals(response[0][2], ['#somechannel', 'goodbye stupidheads']) + self.assertEquals(response, event) + testLeave = dG(testLeave) + + + def testGetTopic(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + group = service.Group("somechannel") + group.meta["topic"] = "This is a test topic." + group.meta["topic_author"] = "some_fellow" + group.meta["topic_date"] = 77777777 + + add = wFD(self.realm.addGroup(group)) + yield add + add.getResult() + + user.transport.clear() + user.write("JOIN #somechannel\r\n") + + response = self._response(user) + + self.assertEquals(response[3][0], 'realmname') + self.assertEquals(response[3][1], '332') + + # XXX Sigh. irc.parsemsg() is not as correct as one might hope. + self.assertEquals(response[3][2], ['useruser', '#somechannel', 'This is a test topic.']) + self.assertEquals(response[4][1], '333') + self.assertEquals(response[4][2], ['useruser', '#somechannel', 'some_fellow', '77777777']) + + user.transport.clear() + + user.write('TOPIC #somechannel\r\n') + + response = self._response(user) + + self.assertEquals(response[0][1], '332') + self.assertEquals(response[0][2], ['useruser', '#somechannel', 'This is a test topic.']) + self.assertEquals(response[1][1], '333') + self.assertEquals(response[1][2], ['useruser', '#somechannel', 'some_fellow', '77777777']) + testGetTopic = dG(testGetTopic) + + + def testSetTopic(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + add = wFD(self.realm.createGroup(u"somechannel")) + yield add + somechannel = add.getResult() + + user.write("JOIN #somechannel\r\n") + + other = wFD(self._loggedInUser(u'otheruser')) + yield other + other = other.getResult() + + other.write("JOIN #somechannel\r\n") + + user.transport.clear() + other.transport.clear() + + other.write('TOPIC #somechannel :This is the new topic.\r\n') + + response = self._response(other) + event = self._response(user) + + self.assertEquals(response, event) + + self.assertEquals(response[0][0], 'otheruser!otheruser@realmname') + self.assertEquals(response[0][1], 'TOPIC') + self.assertEquals(response[0][2], ['#somechannel', 'This is the new topic.']) + + other.transport.clear() + + somechannel.meta['topic_date'] = 12345 + other.write('TOPIC #somechannel\r\n') + + response = self._response(other) + self.assertEquals(response[0][1], '332') + self.assertEquals(response[0][2], ['otheruser', '#somechannel', 'This is the new topic.']) + self.assertEquals(response[1][1], '333') + self.assertEquals(response[1][2], ['otheruser', '#somechannel', 'otheruser', '12345']) + + other.transport.clear() + other.write('TOPIC #asdlkjasd\r\n') + + response = self._response(other) + self.assertEquals(response[0][1], '403') + testSetTopic = dG(testSetTopic) + + + def testGroupMessage(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + add = wFD(self.realm.createGroup(u"somechannel")) + yield add + somechannel = add.getResult() + + user.write("JOIN #somechannel\r\n") + + other = wFD(self._loggedInUser(u'otheruser')) + yield other + other = other.getResult() + + other.write("JOIN #somechannel\r\n") + + user.transport.clear() + other.transport.clear() + + user.write('PRIVMSG #somechannel :Hello, world.\r\n') + + response = self._response(user) + event = self._response(other) + + self.failIf(response) + self.assertEquals(len(event), 1) + self.assertEquals(event[0][0], 'useruser!useruser@realmname') + self.assertEquals(event[0][1], 'PRIVMSG', -1) + self.assertEquals(event[0][2], ['#somechannel', 'Hello, world.']) + testGroupMessage = dG(testGroupMessage) + + + def testPrivateMessage(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + other = wFD(self._loggedInUser(u'otheruser')) + yield other + other = other.getResult() + + user.transport.clear() + other.transport.clear() + + user.write('PRIVMSG otheruser :Hello, monkey.\r\n') + + response = self._response(user) + event = self._response(other) + + self.failIf(response) + self.assertEquals(len(event), 1) + self.assertEquals(event[0][0], 'useruser!useruser@realmname') + self.assertEquals(event[0][1], 'PRIVMSG') + self.assertEquals(event[0][2], ['otheruser', 'Hello, monkey.']) + + user.write('PRIVMSG nousernamedthis :Hello, monkey.\r\n') + + response = self._response(user) + + self.assertEquals(len(response), 1) + self.assertEquals(response[0][0], 'realmname') + self.assertEquals(response[0][1], '401') + self.assertEquals(response[0][2], ['useruser', 'nousernamedthis', 'No such nick/channel.']) + testPrivateMessage = dG(testPrivateMessage) + + + def testOper(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + user.transport.clear() + user.write('OPER user pass\r\n') + response = self._response(user) + + self.assertEquals(len(response), 1) + self.assertEquals(response[0][1], '491') + testOper = dG(testOper) + + + def testGetUserMode(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + user.transport.clear() + user.write('MODE useruser\r\n') + + response = self._response(user) + self.assertEquals(len(response), 1) + self.assertEquals(response[0][0], 'realmname') + self.assertEquals(response[0][1], '221') + self.assertEquals(response[0][2], ['useruser', '+']) + testGetUserMode = dG(testGetUserMode) + + + def testSetUserMode(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + user.transport.clear() + user.write('MODE useruser +abcd\r\n') + + response = self._response(user) + self.assertEquals(len(response), 1) + self.assertEquals(response[0][1], '472') + testSetUserMode = dG(testSetUserMode) + + + def testGetGroupMode(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + add = wFD(self.realm.createGroup(u"somechannel")) + yield add + somechannel = add.getResult() + + user.write('JOIN #somechannel\r\n') + + user.transport.clear() + user.write('MODE #somechannel\r\n') + + response = self._response(user) + self.assertEquals(len(response), 1) + self.assertEquals(response[0][1], '324') + testGetGroupMode = dG(testGetGroupMode) + + + def testSetGroupMode(self): + user = wFD(self._loggedInUser(u'useruser')) + yield user + user = user.getResult() + + group = wFD(self.realm.createGroup(u"groupname")) + yield group + group = group.getResult() + + user.write('JOIN #groupname\r\n') + + user.transport.clear() + user.write('MODE #groupname +abcd\r\n') + + response = self._response(user) + self.assertEquals(len(response), 1) + self.assertEquals(response[0][1], '472') + testSetGroupMode = dG(testSetGroupMode) + + + def testWho(self): + group = service.Group('groupname') + add = wFD(self.realm.addGroup(group)) + yield add + add.getResult() + + users = [] + for nick in u'userone', u'usertwo', u'userthree': + u = wFD(self._loggedInUser(nick)) + yield u + u = u.getResult() + users.append(u) + users[-1].write('JOIN #groupname\r\n') + for user in users: + user.transport.clear() + + users[0].write('WHO #groupname\r\n') + + r = self._response(users[0]) + self.failIf(self._response(users[1])) + self.failIf(self._response(users[2])) + + wantusers = ['userone', 'usertwo', 'userthree'] + for (prefix, code, stuff) in r[:-1]: + self.assertEquals(prefix, 'realmname') + self.assertEquals(code, '352') + + (myname, group, theirname, theirhost, theirserver, theirnick, flag, extra) = stuff + self.assertEquals(myname, 'userone') + self.assertEquals(group, '#groupname') + self.failUnless(theirname in wantusers) + self.assertEquals(theirhost, 'realmname') + self.assertEquals(theirserver, 'realmname') + wantusers.remove(theirnick) + self.assertEquals(flag, 'H') + self.assertEquals(extra, '0 ' + theirnick) + self.failIf(wantusers) + + prefix, code, stuff = r[-1] + self.assertEquals(prefix, 'realmname') + self.assertEquals(code, '315') + myname, channel, extra = stuff + self.assertEquals(myname, 'userone') + self.assertEquals(channel, '#groupname') + self.assertEquals(extra, 'End of /WHO list.') + testWho = dG(testWho) + + + def testList(self): + user = wFD(self._loggedInUser(u"someuser")) + yield user + user = user.getResult() + user.transport.clear() + + somegroup = wFD(self.realm.createGroup(u"somegroup")) + yield somegroup + somegroup = somegroup.getResult() + somegroup.size = lambda: succeed(17) + somegroup.meta['topic'] = 'this is the topic woo' + + # Test one group + user.write('LIST #somegroup\r\n') + + r = self._response(user) + self.assertEquals(len(r), 2) + resp, end = r + + self.assertEquals(resp[0], 'realmname') + self.assertEquals(resp[1], '322') + self.assertEquals(resp[2][0], 'someuser') + self.assertEquals(resp[2][1], 'somegroup') + self.assertEquals(resp[2][2], '17') + self.assertEquals(resp[2][3], 'this is the topic woo') + + self.assertEquals(end[0], 'realmname') + self.assertEquals(end[1], '323') + self.assertEquals(end[2][0], 'someuser') + self.assertEquals(end[2][1], 'End of /LIST') + + user.transport.clear() + # Test all groups + + user.write('LIST\r\n') + r = self._response(user) + self.assertEquals(len(r), 2) + + fg1, end = r + + self.assertEquals(fg1[1], '322') + self.assertEquals(fg1[2][1], 'somegroup') + self.assertEquals(fg1[2][2], '17') + self.assertEquals(fg1[2][3], 'this is the topic woo') + + self.assertEquals(end[1], '323') + testList = dG(testList) + + + def testWhois(self): + user = wFD(self._loggedInUser(u'someguy')) + yield user + user = user.getResult() + + otherguy = service.User("otherguy") + otherguy.itergroups = lambda: iter([ + service.Group('groupA'), + service.Group('groupB')]) + otherguy.signOn = 10 + otherguy.lastMessage = time.time() - 15 + + add = wFD(self.realm.addUser(otherguy)) + yield add + add.getResult() + + user.transport.clear() + user.write('WHOIS otherguy\r\n') + r = self._response(user) + + self.assertEquals(len(r), 5) + wuser, wserver, idle, channels, end = r + + self.assertEquals(wuser[0], 'realmname') + self.assertEquals(wuser[1], '311') + self.assertEquals(wuser[2][0], 'someguy') + self.assertEquals(wuser[2][1], 'otherguy') + self.assertEquals(wuser[2][2], 'otherguy') + self.assertEquals(wuser[2][3], 'realmname') + self.assertEquals(wuser[2][4], '*') + self.assertEquals(wuser[2][5], 'otherguy') + + self.assertEquals(wserver[0], 'realmname') + self.assertEquals(wserver[1], '312') + self.assertEquals(wserver[2][0], 'someguy') + self.assertEquals(wserver[2][1], 'otherguy') + self.assertEquals(wserver[2][2], 'realmname') + self.assertEquals(wserver[2][3], 'Hi mom!') + + self.assertEquals(idle[0], 'realmname') + self.assertEquals(idle[1], '317') + self.assertEquals(idle[2][0], 'someguy') + self.assertEquals(idle[2][1], 'otherguy') + self.assertEquals(idle[2][2], '15') + self.assertEquals(idle[2][3], '10') + self.assertEquals(idle[2][4], "seconds idle, signon time") + + self.assertEquals(channels[0], 'realmname') + self.assertEquals(channels[1], '319') + self.assertEquals(channels[2][0], 'someguy') + self.assertEquals(channels[2][1], 'otherguy') + self.assertEquals(channels[2][2], '#groupA #groupB') + + self.assertEquals(end[0], 'realmname') + self.assertEquals(end[1], '318') + self.assertEquals(end[2][0], 'someguy') + self.assertEquals(end[2][1], 'otherguy') + self.assertEquals(end[2][2], 'End of WHOIS list.') + testWhois = dG(testWhois) + + +class TestMind(service.PBMind): + def __init__(self, *a, **kw): + self.joins = [] + self.parts = [] + self.messages = [] + self.meta = [] + + def remote_userJoined(self, user, group): + self.joins.append((user, group)) + + + def remote_userLeft(self, user, group, reason): + self.parts.append((user, group, reason)) + + + def remote_receive(self, sender, recipient, message): + self.messages.append((sender, recipient, message)) + + + def remote_groupMetaUpdate(self, group, meta): + self.meta.append((group, meta)) +pb.setUnjellyableForClass(TestMind, service.PBMindReference) + + +class PBProtocolTestCase(unittest.TestCase): + def setUp(self): + self.realm = service.InMemoryWordsRealm("realmname") + self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse() + self.portal = portal.Portal( + self.realm, [self.checker]) + self.serverFactory = pb.PBServerFactory(self.portal) + self.serverFactory.protocol = self._protocolFactory + self.serverFactory.unsafeTracebacks = True + self.clientFactory = pb.PBClientFactory() + self.clientFactory.unsafeTracebacks = True + self.serverPort = reactor.listenTCP(0, self.serverFactory) + self.clientConn = reactor.connectTCP( + '127.0.0.1', + self.serverPort.getHost().port, + self.clientFactory) + + + def _protocolFactory(self, *args, **kw): + self._serverProtocol = pb.Broker(0) + return self._serverProtocol + + + def tearDown(self): + d3 = Deferred() + self._serverProtocol.notifyOnDisconnect(lambda: d3.callback(None)) + return DeferredList([ + maybeDeferred(self.serverPort.stopListening), + maybeDeferred(self.clientConn.disconnect), d3]) + + + def _loggedInAvatar(self, name, password, mind): + creds = credentials.UsernamePassword(name, password) + self.checker.addUser(name.encode('ascii'), password) + d = self.realm.createUser(name) + d.addCallback(lambda ign: self.clientFactory.login(creds, mind)) + return d + + + def testGroups(self): + mindone = TestMind() + one = wFD(self._loggedInAvatar(u"one", "p1", mindone)) + yield one + one = one.getResult() + + mindtwo = TestMind() + two = wFD(self._loggedInAvatar(u"two", "p2", mindtwo)) + yield two + two = two.getResult() + + add = wFD(self.realm.createGroup(u"foobar")) + yield add + add.getResult() + + groupone = wFD(one.join(u"foobar")) + yield groupone + groupone = groupone.getResult() + + grouptwo = wFD(two.join(u"foobar")) + yield grouptwo + grouptwo = grouptwo.getResult() + + msg = wFD(groupone.send({"text": "hello, monkeys"})) + yield msg + msg = msg.getResult() + + leave = wFD(groupone.leave()) + yield leave + leave = leave.getResult() + testGroups = dG(testGroups) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_tap.py b/vendor/Twisted-10.0.0/twisted/words/test/test_tap.py new file mode 100644 index 000000000000..142d681df4ed --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_tap.py @@ -0,0 +1,78 @@ +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.cred import credentials, error +from twisted.words import tap +from twisted.trial import unittest + + + +class WordsTap(unittest.TestCase): + """ + Ensures that the twisted.words.tap API works. + """ + + PASSWD_TEXT = "admin:admin\njoe:foo\n" + admin = credentials.UsernamePassword('admin', 'admin') + joeWrong = credentials.UsernamePassword('joe', 'bar') + + + def setUp(self): + """ + Create a file with two users. + """ + self.filename = self.mktemp() + self.file = open(self.filename, 'w') + self.file.write(self.PASSWD_TEXT) + self.file.flush() + + + def tearDown(self): + """ + Close the dummy user database. + """ + self.file.close() + + + def test_hostname(self): + """ + Tests that the --hostname parameter gets passed to Options. + """ + opt = tap.Options() + opt.parseOptions(['--hostname', 'myhost']) + self.assertEquals(opt['hostname'], 'myhost') + + + def test_passwd(self): + """ + Tests the --passwd command for backwards-compatibility. + """ + opt = tap.Options() + opt.parseOptions(['--passwd', self.file.name]) + self._loginTest(opt) + + + def test_auth(self): + """ + Tests that the --auth command generates a checker. + """ + opt = tap.Options() + opt.parseOptions(['--auth', 'file:'+self.file.name]) + self._loginTest(opt) + + + def _loginTest(self, opt): + """ + This method executes both positive and negative authentication + tests against whatever credentials checker has been stored in + the Options class. + + @param opt: An instance of L{tap.Options}. + """ + self.assertEquals(len(opt['credCheckers']), 1) + checker = opt['credCheckers'][0] + self.assertFailure(checker.requestAvatarId(self.joeWrong), + error.UnauthorizedLogin) + def _gotAvatar(username): + self.assertEquals(username, self.admin.username) + return checker.requestAvatarId(self.admin).addCallback(_gotAvatar) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_toc.py b/vendor/Twisted-10.0.0/twisted/words/test/test_toc.py new file mode 100644 index 000000000000..907db4bedc89 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_toc.py @@ -0,0 +1,340 @@ +# Copyright (c) 2001-2009 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.trial import unittest + +from twisted.words.protocols import toc +from twisted.internet import protocol, main +from twisted.python import failure +from twisted.test.proto_helpers import StringIOWithoutClosing + +from struct import pack,unpack + + +class DummyTOC(toc.TOC): + """ + used to override authentication, now overrides printing. + """ + def _debug(self,data): + pass +SEQID=1001 +def flap(type,data): + global SEQID + send="*" + send=send+pack("!BHH",type,SEQID,len(data)) + send=send+data + SEQID=SEQID+1 + return send +def readFlap(data): + if data=="": return [None,""] + null,type,seqid,length=unpack("!BBHH",data[:6]) + val=data[6:6+length] + return [[type,val],data[6+length:]] + +class TOCGeneralTestCase(unittest.TestCase): + """ + general testing of TOC functions. + """ + def testTOC(self): + self.runTest() + def runTest(self): + USERS=2 + data=range(USERS) + data[0]=("FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon localhost 9999 test 0x100000 english \"penguin 0.1\"\000"),\ + flap(2,"toc_add_buddy test\000"),\ + flap(2,"toc_init_done\000"),\ + flap(2,"toc_send_im test \"hi\"\000"),\ + flap(2,"toc_send_im test2 \"hello\"\000"),\ + flap(2,"toc_set_away \"not here\"\000"),\ + flap(2,"toc_set_idle 602\000"),\ + flap(2,"toc_set_idle 0\000"),\ + flap(2,"toc_set_away\000"),\ + flap(2,"toc_evil test norm\000"),\ + flap(2,"toc_chat_join 4 \"Test Chat\"\000"),\ + flap(2,"toc_chat_send 0 \"hello\"\000"),\ + #flap(2,"toc_chat_leave 0\000")) #,\ + flap(2,"toc_chat_invite 0 \"come\" ooga\000"),\ + #flap(2,"toc_chat_accept 0\000"),\ + flap(5,"\000"),\ + flap(2,"toc_chat_whisper 0 ooga \"boo ga\"\000"),\ + flap(2,"toc_chat_leave 0"),\ + flap(5,"\000")) + data[1]=("FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004ooga"),\ + flap(2,"toc_signon localhost 9999 ooga 0x100000 english \"penguin 0.1\"\000"),\ + flap(2,"toc_add_buddy test\000"),\ + flap(2,"toc_init_done\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + #flap(5,"\000"),\ + #flap(5,"\000"),\ + #flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + flap(5,"\000"),\ + #flap(5,"\000"),\ + flap(2,"toc_chat_accept 0\000"),\ + flap(2,"toc_chat_send 0 \"hi test\"\000"),\ + flap(5,"\000"),\ + flap(2,"toc_chat_leave 0\000")) + strings=range(USERS) + for i in strings: + strings[i]=StringIOWithoutClosing() + fac=toc.TOCFactory() + dummy=range(USERS) + for i in dummy: + dummy[i]=DummyTOC() + dummy[i].factory=fac + dummy[i].makeConnection(protocol.FileWrapper(strings[i])) + while sum(map(lambda x: x == (), data)) != USERS: + for i in range(USERS): + d=data[i] + if len(d)>0: + k,data[i]=d[0],d[1:] + for j in k: + dummy[i].dataReceived(j) # test by doing a character at a time + else: + dummy[i].connectionLost(failure.Failure(main.CONNECTION_DONE)) + values=range(USERS) + for i in values: + values[i]=strings[i].getvalue() + flaps=map(lambda x:[],range(USERS)) + for value in values: + i=values.index(value) + f,value=readFlap(value) + while f: + flaps[i].append(f) + f,value=readFlap(value) + ts=range(USERS) + for t in ts: + ts[t]=dummy[t].signontime + shouldequal=range(USERS) + shouldequal[0]=[ \ + [1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:test\000"],\ + [2,"CONFIG:\00"],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\ + [2,"IM_IN:test:F:hi\000"],\ + [2,"ERROR:901:test2\000"],\ + #[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:10: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\ + [2,"EVILED:10:test\000"],\ + [2,"UPDATE_BUDDY:test:T:10:%s:0: O\000"%ts[0]],\ + [2,"CHAT_JOIN:0:Test Chat\000"],\ + [2,"CHAT_UPDATE_BUDDY:0:T:test\000"],\ + [2,"CHAT_IN:0:test:F:hello\000"],\ + [2,"CHAT_UPDATE_BUDDY:0:T:ooga\000"],\ + [2,"CHAT_IN:0:ooga:F:hi test\000"],\ + [2,"CHAT_LEFT:0\000"]] + shouldequal[1]=[ \ + [1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:ooga\000"],\ + [2,"CONFIG:\000"],\ + #[2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:10: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: OU\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:0:%s:0: O\000"%ts[0]],\ + [2,"UPDATE_BUDDY:test:T:10:%s:0: O\000"%ts[0]],\ + [2,"CHAT_INVITE:Test Chat:0:test:come\000"],\ + [2,"CHAT_JOIN:0:Test Chat\000"],\ + [2,"CHAT_UPDATE_BUDDY:0:T:test:ooga\000"],\ + [2,"CHAT_IN:0:ooga:F:hi test\000"],\ + [2,"CHAT_IN:0:test:T:boo ga\000"],\ + [2,"CHAT_UPDATE_BUDDY:0:F:test\000"],\ + [2,"CHAT_LEFT:0\000"]] + if flaps!=shouldequal: + for i in range(len(shouldequal)): + for j in range(len(shouldequal[i])): + if shouldequal[i][j]!=flaps[i][j]: + raise AssertionError("GeneralTest Failed!\nUser %s Line %s\nactual:%s\nshould be:%s"%(i,j,flaps[i][j],shouldequal[i][j])) + raise AssertionError("GeneralTest Failed with incorrect lengths!") +class TOCMultiPacketTestCase(unittest.TestCase): + """ + i saw this problem when using GAIM. It only read the flaps onces per dataReceived, and would basically block if it ever received two packets together in one dataReceived. this tests for that occurance. + """ + def testTOC(self): + self.runTest() + def runTest(self): + packets=["FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon null 9999 test 0x100000 english \"penguin 0.1\"\000"),\ + flap(2,"toc_init_done\000"),\ + flap(2,"toc_send_im test hi\000")] + shouldbe=[[1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:test\000"],\ + [2,"CONFIG:\000"],\ + [2,"IM_IN:test:F:hi\000"]] + data="" + for i in packets: + data=data+i + s=StringIOWithoutClosing() + d=DummyTOC() + fac=toc.TOCFactory() + d.factory=fac + d.makeConnection(protocol.FileWrapper(s)) + d.dataReceived(data) + d.connectionLost(failure.Failure(main.CONNECTION_DONE)) + value=s.getvalue() + flaps=[] + f,value=readFlap(value) + while f: + flaps.append(f) + f,value=readFlap(value) + if flaps!=shouldbe: + for i in range(len(flaps)): + if flaps[i]!=shouldbe[i]:raise AssertionError("MultiPacketTest Failed!\nactual:%s\nshould be:%s"%(flaps[i],shouldbe[i])) + raise AssertionError("MultiPacketTest Failed with incorrect length!, printing both lists\nactual:%s\nshould be:%s"%(flaps,shouldbe)) +class TOCSavedValuesTestCase(unittest.TestCase): + def testTOC(self): + self.runTest() + def runTest(self): + password1=toc.roast("test pass") + password2=toc.roast("pass test") + beforesend=[\ + "FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon localhost 9999 test %s english \"penguin 0.1\"\000"%password1),\ + flap(2,"toc_init_done\000"),\ + flap(2,"toc_set_config \"{m 4}\"\000"),\ + flap(2,"toc_format_nickname BOOGA\000"),\ + flap(2,"toc_format_nickname \"T E S T\"\000"),\ + flap(2,"toc_change_passwd \"testpass\" \"pass test\"\000"),\ + flap(2,"toc_change_passwd \"test pass\" \"pass test\"\000")] + beforeexpect=[\ + [1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:test\000"],\ + [2,"CONFIG:\000"],\ + [2,"ERROR:911\000"],\ + [2,"ADMIN_NICK_STATUS:0\000"],\ + [2,"ERROR:911\000"],\ + [2,"ADMIN_PASSWD_STATUS:0\000"]] + badpasssend=[\ + "FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon localhost 9999 test 0x1000 english \"penguin 0.1\"\000"),\ + flap(2,"toc_init_done")] + badpassexpect=[\ + [1,"\000\00\000\001"],\ + [2,"ERROR:980\000"]] + goodpasssend=[\ + "FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon localhost 9999 test %s english \"penguin 0.1\"\000"%password2),\ + flap(2,"toc_init_done")] + goodpassexpect=[\ + [1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:T E S T\000"],\ + [2,"CONFIG:{m 4}\000"]] + fac=toc.TOCFactory() + d=DummyTOC() + d.factory=fac + s=StringIOWithoutClosing() + d.makeConnection(protocol.FileWrapper(s)) + for i in beforesend: + d.dataReceived(i) + d.connectionLost(failure.Failure(main.CONNECTION_DONE)) + v=s.getvalue() + flaps=[] + f,v=readFlap(v) + while f: + flaps.append(f) + f,v=readFlap(v) + if flaps!=beforeexpect: + for i in range(len(flaps)): + if flaps[i]!=beforeexpect[i]: + raise AssertionError("SavedValuesTest Before Failed!\nactual:%s\nshould be:%s"%(flaps[i],beforeexpect[i])) + raise AssertionError("SavedValuesTest Before Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,beforeexpect)) + d=DummyTOC() + d.factory=fac + s=StringIOWithoutClosing() + d.makeConnection(protocol.FileWrapper(s)) + for i in badpasssend: + d.dataReceived(i) + d.connectionLost(failure.Failure(main.CONNECTION_DONE)) + v=s.getvalue() + flaps=[] + f,v=readFlap(v) + while f: + flaps.append(f) + f,v=readFlap(v) + if flaps!=badpassexpect: + for i in range(len(flaps)): + if flaps[i]!=badpassexpect[i]: + raise AssertionError("SavedValuesTest BadPass Failed!\nactual:%s\nshould be:%s"%(flaps[i],badpassexpect[i])) + raise AssertionError("SavedValuesTest BadPass Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,badpassexpect)) + d=DummyTOC() + d.factory=fac + s=StringIOWithoutClosing() + d.makeConnection(protocol.FileWrapper(s)) + for i in goodpasssend: + d.dataReceived(i) + d.connectionLost(failure.Failure(main.CONNECTION_DONE)) + v=s.getvalue() + flaps=[] + f,v=readFlap(v) + while f: + flaps.append(f) + f,v=readFlap(v) + if flaps!=goodpassexpect: + for i in range(len(flaps)): + if flaps[i]!=goodpassexpect[i]: + raise AssertionError("SavedValuesTest GoodPass Failed!\nactual:%s\nshould be:%s"%(flaps[i],goodpassexpect[i])) + raise AssertionError("SavedValuesTest GoodPass Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,beforeexpect)) +class TOCPrivacyTestCase(unittest.TestCase): + def runTest(self): + sends=["FLAPON\r\n\r\n",\ + flap(1,"\000\000\000\001\000\001\000\004test"),\ + flap(2,"toc_signon localhost 9999 test 0x00 english penguin\000"),\ + flap(2,"toc_init_done\000"),\ + flap(2,"toc_add_deny\000"),\ + flap(2,"toc_send_im test 1\000"),\ + flap(2,"toc_add_deny test\000"),\ + flap(2,"toc_send_im test 2\000"),\ + flap(2,"toc_add_permit\000"),\ + flap(2,"toc_send_im test 3\000"),\ + flap(2,"toc_add_permit test\000"),\ + flap(2,"toc_send_im test 4\000")] + expect=[[1,"\000\000\000\001"],\ + [2,"SIGN_ON:TOC1.0\000"],\ + [2,"NICK:test\000"],\ + [2,"CONFIG:\000"],\ + [2,"IM_IN:test:F:1\000"],\ + [2,"ERROR:901:test\000"],\ + [2,"ERROR:901:test\000"],\ + [2,"IM_IN:test:F:4\000"]] + d=DummyTOC() + d.factory=toc.TOCFactory() + s=StringIOWithoutClosing() + d.makeConnection(protocol.FileWrapper(s)) + for i in sends: + d.dataReceived(i) + d.connectionLost(failure.Failure(main.CONNECTION_DONE)) + v=s.getvalue() + flaps=[] + f,v=readFlap(v) + while f: + flaps.append(f) + f,v=readFlap(v) + if flaps!=expect: + for i in range(len(flaps)): + if flaps[i]!=expect[i]: + raise AssertionError("PrivacyTest Before Failed!\nactual:%s\nshould be:%s"%(flaps[i],expect[i])) + raise AssertionError("PrivacyTest Before Failed with incorrect length!\nactual:%s\nshould be:%s"%(flaps,expect)) +testCases=[TOCGeneralTestCase,TOCMultiPacketTestCase,TOCSavedValuesTestCase,TOCPrivacyTestCase] + diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_xishutil.py b/vendor/Twisted-10.0.0/twisted/words/test/test_xishutil.py new file mode 100644 index 000000000000..eb0dd4fb47e6 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_xishutil.py @@ -0,0 +1,345 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Test cases for twisted.words.xish.utility +""" + +from twisted.trial import unittest + +from twisted.python.util import OrderedDict +from twisted.words.xish import utility +from twisted.words.xish.domish import Element +from twisted.words.xish.utility import EventDispatcher + +class CallbackTracker: + """ + Test helper for tracking callbacks. + + Increases a counter on each call to L{call} and stores the object + passed in the call. + """ + + def __init__(self): + self.called = 0 + self.obj = None + + + def call(self, obj): + self.called = self.called + 1 + self.obj = obj + + + +class OrderedCallbackTracker: + """ + Test helper for tracking callbacks and their order. + """ + + def __init__(self): + self.callList = [] + + + def call1(self, object): + self.callList.append(self.call1) + + + def call2(self, object): + self.callList.append(self.call2) + + + def call3(self, object): + self.callList.append(self.call3) + + + +class EventDispatcherTest(unittest.TestCase): + """ + Tests for L{EventDispatcher}. + """ + + def testStuff(self): + d = EventDispatcher() + cb1 = CallbackTracker() + cb2 = CallbackTracker() + cb3 = CallbackTracker() + + d.addObserver("/message/body", cb1.call) + d.addObserver("/message", cb1.call) + d.addObserver("/presence", cb2.call) + d.addObserver("//event/testevent", cb3.call) + + msg = Element(("ns", "message")) + msg.addElement("body") + + pres = Element(("ns", "presence")) + pres.addElement("presence") + + d.dispatch(msg) + self.assertEquals(cb1.called, 2) + self.assertEquals(cb1.obj, msg) + self.assertEquals(cb2.called, 0) + + d.dispatch(pres) + self.assertEquals(cb1.called, 2) + self.assertEquals(cb2.called, 1) + self.assertEquals(cb2.obj, pres) + self.assertEquals(cb3.called, 0) + + d.dispatch(d, "//event/testevent") + self.assertEquals(cb3.called, 1) + self.assertEquals(cb3.obj, d) + + d.removeObserver("/presence", cb2.call) + d.dispatch(pres) + self.assertEquals(cb2.called, 1) + + + def test_addObserverTwice(self): + """ + Test adding two observers for the same query. + + When the event is dispath both of the observers need to be called. + """ + d = EventDispatcher() + cb1 = CallbackTracker() + cb2 = CallbackTracker() + + d.addObserver("//event/testevent", cb1.call) + d.addObserver("//event/testevent", cb2.call) + d.dispatch(d, "//event/testevent") + + self.assertEquals(cb1.called, 1) + self.assertEquals(cb1.obj, d) + self.assertEquals(cb2.called, 1) + self.assertEquals(cb2.obj, d) + + + def test_addObserverInDispatch(self): + """ + Test for registration of an observer during dispatch. + """ + d = EventDispatcher() + msg = Element(("ns", "message")) + cb = CallbackTracker() + + def onMessage(_): + d.addObserver("/message", cb.call) + + d.addOnetimeObserver("/message", onMessage) + + d.dispatch(msg) + self.assertEquals(cb.called, 0) + + d.dispatch(msg) + self.assertEquals(cb.called, 1) + + d.dispatch(msg) + self.assertEquals(cb.called, 2) + + + def test_addOnetimeObserverInDispatch(self): + """ + Test for registration of a onetime observer during dispatch. + """ + d = EventDispatcher() + msg = Element(("ns", "message")) + cb = CallbackTracker() + + def onMessage(msg): + d.addOnetimeObserver("/message", cb.call) + + d.addOnetimeObserver("/message", onMessage) + + d.dispatch(msg) + self.assertEquals(cb.called, 0) + + d.dispatch(msg) + self.assertEquals(cb.called, 1) + + d.dispatch(msg) + self.assertEquals(cb.called, 1) + + + def testOnetimeDispatch(self): + d = EventDispatcher() + msg = Element(("ns", "message")) + cb = CallbackTracker() + + d.addOnetimeObserver("/message", cb.call) + d.dispatch(msg) + self.assertEquals(cb.called, 1) + d.dispatch(msg) + self.assertEquals(cb.called, 1) + + + def testDispatcherResult(self): + d = EventDispatcher() + msg = Element(("ns", "message")) + pres = Element(("ns", "presence")) + cb = CallbackTracker() + + d.addObserver("/presence", cb.call) + result = d.dispatch(msg) + self.assertEquals(False, result) + + result = d.dispatch(pres) + self.assertEquals(True, result) + + + def testOrderedXPathDispatch(self): + d = EventDispatcher() + cb = OrderedCallbackTracker() + d.addObserver("/message/body", cb.call2) + d.addObserver("/message", cb.call3, -1) + d.addObserver("/message/body", cb.call1, 1) + + msg = Element(("ns", "message")) + msg.addElement("body") + d.dispatch(msg) + self.assertEquals(cb.callList, [cb.call1, cb.call2, cb.call3], + "Calls out of order: %s" % + repr([c.__name__ for c in cb.callList])) + + + # Observers are put into CallbackLists that are then put into dictionaries + # keyed by the event trigger. Upon removal of the last observer for a + # particular event trigger, the (now empty) CallbackList and corresponding + # event trigger should be removed from those dictionaries to prevent + # slowdown and memory leakage. + + def test_cleanUpRemoveEventObserver(self): + """ + Test observer clean-up after removeObserver for named events. + """ + + d = EventDispatcher() + cb = CallbackTracker() + + d.addObserver('//event/test', cb.call) + d.dispatch(None, '//event/test') + self.assertEqual(1, cb.called) + d.removeObserver('//event/test', cb.call) + self.assertEqual(0, len(d._eventObservers.pop(0))) + + + def test_cleanUpRemoveXPathObserver(self): + """ + Test observer clean-up after removeObserver for XPath events. + """ + + d = EventDispatcher() + cb = CallbackTracker() + msg = Element((None, "message")) + + d.addObserver('/message', cb.call) + d.dispatch(msg) + self.assertEqual(1, cb.called) + d.removeObserver('/message', cb.call) + self.assertEqual(0, len(d._xpathObservers.pop(0))) + + + def test_cleanUpOnetimeEventObserver(self): + """ + Test observer clean-up after onetime named events. + """ + + d = EventDispatcher() + cb = CallbackTracker() + + d.addOnetimeObserver('//event/test', cb.call) + d.dispatch(None, '//event/test') + self.assertEqual(1, cb.called) + self.assertEqual(0, len(d._eventObservers.pop(0))) + + + def test_cleanUpOnetimeXPathObserver(self): + """ + Test observer clean-up after onetime XPath events. + """ + + d = EventDispatcher() + cb = CallbackTracker() + msg = Element((None, "message")) + + d.addOnetimeObserver('/message', cb.call) + d.dispatch(msg) + self.assertEqual(1, cb.called) + self.assertEqual(0, len(d._xpathObservers.pop(0))) + + + def test_observerRaisingException(self): + """ + Test that exceptions in observers do not bubble up to dispatch. + + The exceptions raised in observers should be logged and other + observers should be called as if nothing happened. + """ + + class OrderedCallbackList(utility.CallbackList): + def __init__(self): + self.callbacks = OrderedDict() + + class TestError(Exception): + pass + + def raiseError(_): + raise TestError() + + d = EventDispatcher() + cb = CallbackTracker() + + originalCallbackList = utility.CallbackList + + try: + utility.CallbackList = OrderedCallbackList + + d.addObserver('//event/test', raiseError) + d.addObserver('//event/test', cb.call) + try: + d.dispatch(None, '//event/test') + except TestError: + self.fail("TestError raised. Should have been logged instead.") + + self.assertEqual(1, len(self.flushLoggedErrors(TestError))) + self.assertEqual(1, cb.called) + finally: + utility.CallbackList = originalCallbackList + + + +class XmlPipeTest(unittest.TestCase): + """ + Tests for L{twisted.words.xish.utility.XmlPipe}. + """ + + def setUp(self): + self.pipe = utility.XmlPipe() + + + def test_sendFromSource(self): + """ + Send an element from the source and observe it from the sink. + """ + def cb(obj): + called.append(obj) + + called = [] + self.pipe.sink.addObserver('/test[@xmlns="testns"]', cb) + element = Element(('testns', 'test')) + self.pipe.source.send(element) + self.assertEquals([element], called) + + + def test_sendFromSink(self): + """ + Send an element from the sink and observe it from the source. + """ + def cb(obj): + called.append(obj) + + called = [] + self.pipe.source.addObserver('/test[@xmlns="testns"]', cb) + element = Element(('testns', 'test')) + self.pipe.sink.send(element) + self.assertEquals([element], called) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_xmlstream.py b/vendor/Twisted-10.0.0/twisted/words/test/test_xmlstream.py new file mode 100644 index 000000000000..743de6876de2 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_xmlstream.py @@ -0,0 +1,201 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.xish.xmlstream}. +""" + +from twisted.internet import protocol +from twisted.trial import unittest +from twisted.words.xish import domish, utility, xmlstream + +class XmlStreamTest(unittest.TestCase): + def setUp(self): + self.outlist = [] + self.xmlstream = xmlstream.XmlStream() + self.xmlstream.transport = self + self.xmlstream.transport.write = self.outlist.append + + + def loseConnection(self): + """ + Stub loseConnection because we are a transport. + """ + self.xmlstream.connectionLost("no reason") + + + def test_send(self): + """ + Sending data should result into it being written to the transport. + """ + self.xmlstream.connectionMade() + self.xmlstream.send("") + self.assertEquals(self.outlist[0], "") + + + def test_receiveRoot(self): + """ + Receiving the starttag of the root element results in stream start. + """ + streamStarted = [] + + def streamStartEvent(rootelem): + streamStarted.append(None) + + self.xmlstream.addObserver(xmlstream.STREAM_START_EVENT, + streamStartEvent) + self.xmlstream.connectionMade() + self.xmlstream.dataReceived("") + self.assertEquals(1, len(streamStarted)) + + + def test_receiveBadXML(self): + """ + Receiving malformed XML should result in in error. + """ + streamError = [] + streamEnd = [] + + def streamErrorEvent(reason): + streamError.append(reason) + + def streamEndEvent(_): + streamEnd.append(None) + + self.xmlstream.addObserver(xmlstream.STREAM_ERROR_EVENT, + streamErrorEvent) + self.xmlstream.addObserver(xmlstream.STREAM_END_EVENT, + streamEndEvent) + self.xmlstream.connectionMade() + + self.xmlstream.dataReceived("") + self.assertEquals(0, len(streamError)) + self.assertEquals(0, len(streamEnd)) + + self.xmlstream.dataReceived("") + self.assertEquals(1, len(streamError)) + self.assertTrue(streamError[0].check(domish.ParserError)) + self.assertEquals(1, len(streamEnd)) + + + +class DummyProtocol(protocol.Protocol, utility.EventDispatcher): + """ + I am a protocol with an event dispatcher without further processing. + + This protocol is only used for testing XmlStreamFactoryMixin to make + sure the bootstrap observers are added to the protocol instance. + """ + + def __init__(self, *args, **kwargs): + self.args = args + self.kwargs = kwargs + self.observers = [] + + utility.EventDispatcher.__init__(self) + + + +class BootstrapMixinTest(unittest.TestCase): + """ + Tests for L{xmlstream.BootstrapMixin}. + + @ivar factory: Instance of the factory or mixin under test. + """ + + def setUp(self): + self.factory = xmlstream.BootstrapMixin() + + + def test_installBootstraps(self): + """ + Dispatching an event should fire registered bootstrap observers. + """ + called = [] + + def cb(data): + called.append(data) + + dispatcher = DummyProtocol() + self.factory.addBootstrap('//event/myevent', cb) + self.factory.installBootstraps(dispatcher) + + dispatcher.dispatch(None, '//event/myevent') + self.assertEquals(1, len(called)) + + + def test_addAndRemoveBootstrap(self): + """ + Test addition and removal of a bootstrap event handler. + """ + + called = [] + + def cb(data): + called.append(data) + + self.factory.addBootstrap('//event/myevent', cb) + self.factory.removeBootstrap('//event/myevent', cb) + + dispatcher = DummyProtocol() + self.factory.installBootstraps(dispatcher) + + dispatcher.dispatch(None, '//event/myevent') + self.assertFalse(called) + + + +class GenericXmlStreamFactoryTestsMixin(BootstrapMixinTest): + """ + Generic tests for L{XmlStream} factories. + """ + + def setUp(self): + self.factory = xmlstream.XmlStreamFactory() + + + def test_buildProtocolInstallsBootstraps(self): + """ + The protocol factory installs bootstrap event handlers on the protocol. + """ + called = [] + + def cb(data): + called.append(data) + + self.factory.addBootstrap('//event/myevent', cb) + + xs = self.factory.buildProtocol(None) + xs.dispatch(None, '//event/myevent') + + self.assertEquals(1, len(called)) + + + def test_buildProtocolStoresFactory(self): + """ + The protocol factory is saved in the protocol. + """ + xs = self.factory.buildProtocol(None) + self.assertIdentical(self.factory, xs.factory) + + + +class XmlStreamFactoryMixinTest(GenericXmlStreamFactoryTestsMixin): + """ + Tests for L{xmlstream.XmlStreamFactoryMixin}. + """ + + def setUp(self): + self.factory = xmlstream.XmlStreamFactoryMixin(None, test=None) + self.factory.protocol = DummyProtocol + + + def test_buildProtocolFactoryArguments(self): + """ + Arguments passed to the factory should be passed to protocol on + instantiation. + """ + xs = self.factory.buildProtocol(None) + + self.assertEquals((None,), xs.args) + self.assertEquals({'test': None}, xs.kwargs) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_xmpproutertap.py b/vendor/Twisted-10.0.0/twisted/words/test/test_xmpproutertap.py new file mode 100644 index 000000000000..7885c73345c7 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_xmpproutertap.py @@ -0,0 +1,86 @@ +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Tests for L{twisted.words.xmpproutertap}. +""" + +from twisted.application import internet +from twisted.trial import unittest +from twisted.words import xmpproutertap as tap +from twisted.words.protocols.jabber import component + +class XMPPRouterTapTest(unittest.TestCase): + + def test_port(self): + """ + The port option is recognised as a parameter. + """ + opt = tap.Options() + opt.parseOptions(['--port', '7001']) + self.assertEquals(opt['port'], '7001') + + + def test_portDefault(self): + """ + The port option has '5347' as default value + """ + opt = tap.Options() + opt.parseOptions([]) + self.assertEquals(opt['port'], 'tcp:5347:interface=127.0.0.1') + + + def test_secret(self): + """ + The secret option is recognised as a parameter. + """ + opt = tap.Options() + opt.parseOptions(['--secret', 'hushhush']) + self.assertEquals(opt['secret'], 'hushhush') + + + def test_secretDefault(self): + """ + The secret option has 'secret' as default value + """ + opt = tap.Options() + opt.parseOptions([]) + self.assertEquals(opt['secret'], 'secret') + + + def test_verbose(self): + """ + The verbose option is recognised as a flag. + """ + opt = tap.Options() + opt.parseOptions(['--verbose']) + self.assertTrue(opt['verbose']) + + + def test_makeService(self): + """ + The service gets set up with a router and factory. + """ + opt = tap.Options() + opt.parseOptions([]) + s = tap.makeService(opt) + self.assertIsInstance(s, internet.TCPServer) + self.assertEquals('127.0.0.1', s.kwargs['interface']) + self.assertEquals(2, len(s.args)) + self.assertEquals(5347, s.args[0]) + factory = s.args[1] + self.assertIsInstance(factory, component.XMPPComponentServerFactory) + self.assertIsInstance(factory.router, component.Router) + self.assertEquals('secret', factory.secret) + self.assertFalse(factory.logTraffic) + + + def test_makeServiceVerbose(self): + """ + The verbose flag enables traffic logging. + """ + opt = tap.Options() + opt.parseOptions(['--verbose']) + s = tap.makeService(opt) + factory = s.args[1] + self.assertTrue(factory.logTraffic) diff --git a/vendor/Twisted-10.0.0/twisted/words/test/test_xpath.py b/vendor/Twisted-10.0.0/twisted/words/test/test_xpath.py new file mode 100644 index 000000000000..ad9ef67ee6c5 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/test/test_xpath.py @@ -0,0 +1,260 @@ +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + + +from twisted.trial import unittest +import sys, os + +from twisted.words.xish.domish import Element +from twisted.words.xish.xpath import XPathQuery +from twisted.words.xish import xpath + +class XPathTest(unittest.TestCase): + def setUp(self): + # Build element: + # + # somecontent + # + # + # DEF + # + # + # somemorecontent + # + # + # + # ABC + # + # + # + # + # JKL + # + # + # + # MNO + # + # + # + self.e = Element(("testns", "foo")) + self.e["attrib1"] = "value1" + self.e["attrib3"] = "user@host/resource" + self.e.addContent("somecontent") + self.bar1 = self.e.addElement("bar") + self.subfoo = self.bar1.addElement("foo") + self.gar1 = self.subfoo.addElement("gar") + self.gar1.addContent("DEF") + self.e.addContent("somemorecontent") + self.bar2 = self.e.addElement("bar") + self.bar2["attrib2"] = "value2" + self.bar3 = self.bar2.addElement("bar") + self.subfoo2 = self.bar3.addElement("foo") + self.gar2 = self.bar3.addElement("gar") + self.gar2.addContent("ABC") + self.bar4 = self.e.addElement("bar") + self.bar5 = self.e.addElement("bar") + self.bar5["attrib4"] = "value4" + self.bar5["attrib5"] = "value5" + self.subfoo3 = self.bar5.addElement("foo") + self.gar3 = self.bar5.addElement("gar") + self.gar3.addContent("JKL") + self.bar6 = self.e.addElement("bar") + self.bar6["attrib4"] = "value4" + self.bar6["attrib5"] = "value4" + self.subfoo4 = self.bar6.addElement("foo") + self.gar4 = self.bar6.addElement("gar") + self.gar4.addContent("MNO") + self.bar7 = self.e.addElement("bar") + self.bar7["attrib4"] = "value4" + self.bar7["attrib5"] = "value6" + + def test_staticMethods(self): + """ + Test basic operation of the static methods. + """ + self.assertEquals(xpath.matches("/foo/bar", self.e), + True) + self.assertEquals(xpath.queryForNodes("/foo/bar", self.e), + [self.bar1, self.bar2, self.bar4, + self.bar5, self.bar6, self.bar7]) + self.assertEquals(xpath.queryForString("/foo", self.e), + "somecontent") + self.assertEquals(xpath.queryForStringList("/foo", self.e), + ["somecontent", "somemorecontent"]) + + def test_locationFooBar(self): + """ + Test matching foo with child bar. + """ + xp = XPathQuery("/foo/bar") + self.assertEquals(xp.matches(self.e), 1) + + def test_locationFooBarFoo(self): + """ + Test finding foos at the second level. + """ + xp = XPathQuery("/foo/bar/foo") + self.assertEquals(xp.matches(self.e), 1) + self.assertEquals(xp.queryForNodes(self.e), [self.subfoo, + self.subfoo3, + self.subfoo4]) + + def test_locationNoBar3(self): + """ + Test not finding bar3. + """ + xp = XPathQuery("/foo/bar3") + self.assertEquals(xp.matches(self.e), 0) + + def test_locationAllChilds(self): + """ + Test finding childs of foo. + """ + xp = XPathQuery("/foo/*") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, + self.bar4, self.bar5, + self.bar6, self.bar7]) + + def test_attribute(self): + """ + Test matching foo with attribute. + """ + xp = XPathQuery("/foo[@attrib1]") + self.assertEquals(xp.matches(self.e), True) + + def test_attributeWithValueAny(self): + """ + Test find nodes with attribute having value. + """ + xp = XPathQuery("/foo/*[@attrib2='value2']") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar2]) + + def test_position(self): + """ + Test finding element at position. + """ + xp = XPathQuery("/foo/bar[2]") + self.assertEquals(xp.matches(self.e), 1) + self.assertEquals(xp.queryForNodes(self.e), [self.bar1]) + + test_position.todo = "XPath queries with position are not working." + + def test_namespaceFound(self): + """ + Test matching node with namespace. + """ + xp = XPathQuery("/foo[@xmlns='testns']/bar") + self.assertEquals(xp.matches(self.e), 1) + + def test_namespaceNotFound(self): + """ + Test not matching node with wrong namespace. + """ + xp = XPathQuery("/foo[@xmlns='badns']/bar2") + self.assertEquals(xp.matches(self.e), 0) + + def test_attributeWithValue(self): + """ + Test matching node with attribute having value. + """ + xp = XPathQuery("/foo[@attrib1='value1']") + self.assertEquals(xp.matches(self.e), 1) + + def test_queryForString(self): + """ + Test for queryForString and queryForStringList. + """ + xp = XPathQuery("/foo") + self.assertEquals(xp.queryForString(self.e), "somecontent") + self.assertEquals(xp.queryForStringList(self.e), + ["somecontent", "somemorecontent"]) + + def test_queryForNodes(self): + """ + Test finding nodes. + """ + xp = XPathQuery("/foo/bar") + self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, + self.bar4, self.bar5, + self.bar6, self.bar7]) + + def test_textCondition(self): + """ + Test matching a node with given text. + """ + xp = XPathQuery("/foo[text() = 'somecontent']") + self.assertEquals(xp.matches(self.e), True) + + def test_textNotOperator(self): + """ + Test for not operator. + """ + xp = XPathQuery("/foo[not(@nosuchattrib)]") + self.assertEquals(xp.matches(self.e), True) + + def test_anyLocationAndText(self): + """ + Test finding any nodes named gar and getting their text contents. + """ + xp = XPathQuery("//gar") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.gar1, self.gar2, + self.gar3, self.gar4]) + self.assertEquals(xp.queryForStringList(self.e), ["DEF", "ABC", + "JKL", "MNO"]) + + def test_anyLocation(self): + """ + Test finding any nodes named bar. + """ + xp = XPathQuery("//bar") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar1, self.bar2, + self.bar3, self.bar4, + self.bar5, self.bar6, + self.bar7]) + + def test_anyLocationQueryForString(self): + """ + L{XPathQuery.queryForString} should raise a L{NotImplementedError} + for any location. + """ + xp = XPathQuery("//bar") + self.assertRaises(NotImplementedError, xp.queryForString, None) + + def test_andOperator(self): + """ + Test boolean and operator in condition. + """ + xp = XPathQuery("//bar[@attrib4='value4' and @attrib5='value5']") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar5]) + + def test_orOperator(self): + """ + Test boolean or operator in condition. + """ + xp = XPathQuery("//bar[@attrib5='value4' or @attrib5='value5']") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar5, self.bar6]) + + def test_booleanOperatorsParens(self): + """ + Test multiple boolean operators in condition with parens. + """ + xp = XPathQuery("""//bar[@attrib4='value4' and + (@attrib5='value4' or @attrib5='value6')]""") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar6, self.bar7]) + + def test_booleanOperatorsNoParens(self): + """ + Test multiple boolean operators in condition without parens. + """ + xp = XPathQuery("""//bar[@attrib5='value4' or + @attrib5='value5' or + @attrib5='value6']""") + self.assertEquals(xp.matches(self.e), True) + self.assertEquals(xp.queryForNodes(self.e), [self.bar5, self.bar6, self.bar7]) diff --git a/vendor/Twisted-10.0.0/twisted/words/toctap.py b/vendor/Twisted-10.0.0/twisted/words/toctap.py new file mode 100644 index 000000000000..ce79bb89d800 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/toctap.py @@ -0,0 +1,20 @@ + +# Copyright (c) 2001-2004 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" +Support module for making TOC servers with twistd. +""" + +from twisted.words.protocols import toc +from twisted.python import usage +from twisted.application import strports + +class Options(usage.Options): + synopsis = "[-p ]" + optParameters = [["port", "p", "5190"]] + longdesc = "Makes a TOC server." + +def makeService(config): + return strports.service(config['port'], toc.TOCFactory()) diff --git a/vendor/Twisted-10.0.0/twisted/words/topfiles/NEWS b/vendor/Twisted-10.0.0/twisted/words/topfiles/NEWS new file mode 100644 index 000000000000..aad5ef0d4066 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/topfiles/NEWS @@ -0,0 +1,230 @@ +Ticket numbers in this file can be looked up by visiting +http://twistedmatrix.com/trac/ticket/ + +Twisted Words 10.0.0 (2010-03-01) +================================= + +Features +-------- + - twisted.words.protocols.irc.IRCClient.irc_MODE now takes ISUPPORT + parameters into account when parsing mode messages with arguments + that take parameters (#3296) + +Bugfixes +-------- + - When twisted.words.protocols.irc.IRCClient's versionNum and + versionEnv attributes are set to None, they will no longer be + included in the client's response to CTCP VERSION queries. (#3660) + + - twisted.words.protocols.jabber.xmlstream.hashPassword now only + accepts unicode as input (#3741, #3742, #3847) + +Other +----- + - #2503, #4066, #4261 + + +Twisted Words 9.0.0 (2009-11-24) +================================ + +Features +-------- + - IRCClient.describe is a new method meant to replace IRCClient.me to send + CTCP ACTION messages with less confusing behavior (#3910) + - The XMPP client protocol implementation now supports ANONYMOUS SASL + authentication (#4067) + - The IRC client protocol implementation now has better support for the + ISUPPORT server->client message, storing the data in a new + ServerSupportedFeatures object accessible via IRCClient.supported (#3285) + +Fixes +----- + - The twisted.words IRC server now always sends an MOTD, which at least makes + Pidgin able to successfully connect to a twisted.words IRC server (#2385) + - The IRC client will now dispatch "RPL MOTD" messages received before a + "RPL MOTD START" instead of raising an exception (#3676) + - The IRC client protocol implementation no longer updates its 'nickname' + attribute directly; instead, that attribute will be updated when the server + acknowledges the change (#3377) + - The IRC client protocol implementation now supports falling back to another + nickname when a nick change request fails (#3377, #4010) + +Deprecations and Removals +------------------------- + - The TOC protocol implementation is now deprecated, since the protocol itself + has been deprecated and obselete for quite a long time (#3580) + - The gui "im" application has been removed, since it relied on GTK1, which is + hard to find these days (#3699, #3340) + +Other +----- + - #2763, #3540, #3647, #3750, #3895, #3968, #4050 + +Words 8.2.0 (2008-12-16) +======================== + +Feature +------- + - There is now a standalone XMPP router included in twisted.words: it can be + used with the 'twistd xmpp-router' command line (#3407) + - A server factory for Jabber XML Streams has been added (#3435) + - Domish now allows for iterating child elements with specific qualified names + (#2429) + - IRCClient now has a 'back' method which removes the away status (#3366) + - IRCClient now has a 'whois' method (#3133) + +Fixes +----- + - The IRC Client implementation can now deal with compound mode changes (#3230) + - The MSN protocol implementation no longer requires the CVR0 protocol to + be included in the VER command (#3394) + - In the IRC server implementation, topic messages will no longer be sent for + a group which has no topic (#2204) + - An infinite loop (which caused infinite memory usage) in irc.split has been + fixed. This was triggered any time a message that starts with a delimiter + was sent (#3446) + - Jabber's toResponse now generates a valid stanza even when stanzaType is not + specified (#3467) + - The lifetime of authenticator instances in XmlStreamServerFactory is no + longer artificially extended (#3464) + +Other +----- + - #3365 + + +8.1.0 (2008-05-18) +================== + +Features +-------- + - JID objects now have a nice __repr__ (#3156) + - Extending XMPP protocols is now easier (#2178) + +Fixes +----- + - The deprecated mktap API is no longer used (#3127) + - A bug whereby one-time XMPP observers would be enabled permanently was fixed + (#3066) + + +8.0.0 (2008-03-17) +================== + +Features +-------- + - Provide function for creating XMPP response stanzas. (#2614, #2614) + - Log exceptions raised in Xish observers. (#2616) + - Add 'and' and 'or' operators for Xish XPath expressions. (#2502) + - Make JIDs hashable. (#2770) + +Fixes +----- + - Respect the hostname and servername parameters to IRCClient.register. (#1649) + - Make EventDispatcher remove empty callback lists. (#1652) + - Use legacy base64 API to support Python 2.3 (#2461) + - Fix support of DIGEST-MD5 challenge parsing with multi-valued directives. + (#2606) + - Fix reuse of dict of prefixes in domish.Element.toXml (#2609) + - Properly process XMPP stream headers (#2615) + - Use proper namespace for XMPP stream errors. (#2630) + - Properly parse XMPP stream errors. (#2771) + - Fix toResponse for XMPP stanzas without an id attribute. (#2773) + - Move XMPP stream header procesing to authenticators. (#2772) + +Misc +---- + - #2617, #2640, #2741, #2063, #2570, #2847 + + +0.5.0 (2007-01-06) +================== + +Features +-------- + - (Jabber) IQ.send now optionally has a 'timeout' parameter which + specifies a time at which to errback the Deferred with a + TimeoutError (#2218) + - (Jabber) SASL authentication, resource binding and session + establishment were added. (#1046) The following were done in + support of this change: + - Rework ConnectAuthenticator to work with initializer objects that + provide a stream initialization step. + - Reimplement iq:auth as an initializer. + - Reimplement TLS negotiation as an initializer. + - Add XMPPAuthenticator as a XMPP 1.0 client authenticator (only), along + with XMPPClientFactory. + - Add support for working with pre-XMPP-1.0 error stanzas. + - Remove hasFeature() from XmlStream as you can test (uri, name) in + xs.features. + - Add sendFooter() and sendStreamError() to XmlStream + +Fixes +----- + - (Jabber) Deferreds from queries which were never resolved before + a lost connection are now errbacked (#2006) + - (Jabber) servers which didn't send a 'realm' directive in + authentication challenges no longer cause the Jabber client to + choke (#2098) + - (MSN) error responses are now properly turned into errbacks (#2019) + - (IRC) A trivial bug in IRCClient which would cause whois(oper=True) + to always raise an exception was fixed (#2089) + - (IM) Bugs in the error handling and already-connecting cases of + AbstractAccount.logOn were fixed (#2086) + +Misc +---- + - #1734, #1735, #1636, #1936, #1883, #1995, #2171, #2165, #2177 + + +0.4.0 (2006-05-21) +================== + +Features +-------- + - Jabber: + - Add support for stream and stanza level errors + - Create new IQ stanza helper that works with deferreds + - Add TLS support for initiating entities to XmlStream + - Fix account registration + - Xish: + - Fix various namespace issues + - Add IElement + - Store namespace declarations in parsed XML for later serialization + - Fix user name/group collision in server service (#1655). + - Correctly recognize MSN capability messages (#861). + +Fixes +----- + - Misc: #1283, #1296, #1302, #1424 + - Fix unicode/str confusion in IRC server service. + + +0.3.0: + - Jabber: + + - Fix digest authentication in Jabber + - Add Jabber xmlstream module that contains the Jabber specific bits that + got factored out of Twisted Xish's xmlstream, and make it suitable for + implementing full XMPP support. + - Xish: + - Fixed serialization in _ListSerializer + - Removed unneeded extra whitespace generated in serialization + - Removed _Serializer in favour of _ListSerializer + - Use unicode objects for representing serialized XML, instead of utf-8 + encoded str objects. + - Properly catch XML parser errors + - Rework and fix element stream test cases + - Strip xmlstream from all Jabber specifics that moved to Twisted Words + - Added exhaustive docstrings to xmlstream. + - Words Service: + - Complete rewrite + - Not backwards compatible + +0.1.0: + - Fix some miscellaneous bugs in OSCAR + - Add QUIT notification for IRC + - Fix message wrapping + - Misc Jabber fixes + - Add stringprep support for Jabber IDs + This only works properly on 2.3.2 or higher diff --git a/vendor/Twisted-10.0.0/twisted/words/topfiles/README b/vendor/Twisted-10.0.0/twisted/words/topfiles/README new file mode 100644 index 000000000000..712466cfb783 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/topfiles/README @@ -0,0 +1,4 @@ +Twisted Words 10.0.0 + +Twisted Words depends on Twisted Core and Twisted Web. The Twisted Web +dependency is only necessary for MSN support. diff --git a/vendor/Twisted-10.0.0/twisted/words/topfiles/setup.py b/vendor/Twisted-10.0.0/twisted/words/topfiles/setup.py new file mode 100644 index 000000000000..fb33c62f88cc --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/topfiles/setup.py @@ -0,0 +1,53 @@ +# Copyright (c) 2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +import sys + +try: + from twisted.python import dist +except ImportError: + raise SystemExit("twisted.python.dist module not found. Make sure you " + "have installed the Twisted core package before " + "attempting to install any other Twisted projects.") + +if __name__ == '__main__': + if sys.version_info[:2] >= (2, 4): + extraMeta = dict( + classifiers=[ + "Development Status :: 4 - Beta", + "Environment :: No Input/Output (Daemon)", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python", + "Topic :: Communications :: Chat", + "Topic :: Communications :: Chat :: AOL Instant Messenger", + "Topic :: Communications :: Chat :: ICQ", + "Topic :: Communications :: Chat :: Internet Relay Chat", + "Topic :: Internet", + "Topic :: Software Development :: Libraries :: Python Modules", + ]) + else: + extraMeta = {} + + dist.setup( + twisted_subproject="words", + scripts=dist.getScripts("words"), + # metadata + name="Twisted Words", + description="Twisted Words contains Instant Messaging implementations.", + author="Twisted Matrix Laboratories", + author_email="twisted-python@twistedmatrix.com", + maintainer="Jp Calderone", + url="http://twistedmatrix.com/trac/wiki/TwistedWords", + license="MIT", + long_description="""\ +Twisted Words contains implementations of many Instant Messaging +protocols, including IRC, Jabber, MSN, OSCAR (AIM & ICQ), TOC (AOL), +and some functionality for creating bots, inter-protocol gateways, and +a client application for many of the protocols. + +In support of Jabber, Twisted Words also contains X-ish, a library for +processing XML with Twisted and Python, with support for a Pythonic DOM and +an XPath-like toolkit. +""", + **extraMeta) diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/__init__.py b/vendor/Twisted-10.0.0/twisted/words/xish/__init__.py new file mode 100644 index 000000000000..747d943010f9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/__init__.py @@ -0,0 +1,10 @@ +# -*- test-case-name: twisted.words.test -*- +# Copyright (c) 2001-2005 Twisted Matrix Laboratories. +# See LICENSE for details. + + +""" + +Twisted X-ish: XML-ish DOM and XPath-ish engine + +""" diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/domish.py b/vendor/Twisted-10.0.0/twisted/words/xish/domish.py new file mode 100644 index 000000000000..cb8b5d417ef9 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/domish.py @@ -0,0 +1,848 @@ +# -*- test-case-name: twisted.words.test.test_domish -*- +# +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +DOM-like XML processing support. + +This module provides support for parsing XML into DOM-like object structures +and serializing such structures to an XML string representation, optimized +for use in streaming XML applications. +""" + +import types + +from zope.interface import implements, Interface, Attribute + +def _splitPrefix(name): + """ Internal method for splitting a prefixed Element name into its + respective parts """ + ntok = name.split(":", 1) + if len(ntok) == 2: + return ntok + else: + return (None, ntok[0]) + +# Global map of prefixes that always get injected +# into the serializers prefix map (note, that doesn't +# mean they're always _USED_) +G_PREFIXES = { "http://www.w3.org/XML/1998/namespace":"xml" } + +class _ListSerializer: + """ Internal class which serializes an Element tree into a buffer """ + def __init__(self, prefixes=None, prefixesInScope=None): + self.writelist = [] + self.prefixes = {} + if prefixes: + self.prefixes.update(prefixes) + self.prefixes.update(G_PREFIXES) + self.prefixStack = [G_PREFIXES.values()] + (prefixesInScope or []) + self.prefixCounter = 0 + + def getValue(self): + return u"".join(self.writelist) + + def getPrefix(self, uri): + if not self.prefixes.has_key(uri): + self.prefixes[uri] = "xn%d" % (self.prefixCounter) + self.prefixCounter = self.prefixCounter + 1 + return self.prefixes[uri] + + def prefixInScope(self, prefix): + stack = self.prefixStack + for i in range(-1, (len(self.prefixStack)+1) * -1, -1): + if prefix in stack[i]: + return True + return False + + def serialize(self, elem, closeElement=1, defaultUri=''): + # Optimization shortcuts + write = self.writelist.append + + # Shortcut, check to see if elem is actually a chunk o' serialized XML + if isinstance(elem, SerializedXML): + write(elem) + return + + # Shortcut, check to see if elem is actually a string (aka Cdata) + if isinstance(elem, types.StringTypes): + write(escapeToXml(elem)) + return + + # Further optimizations + parent = elem.parent + name = elem.name + uri = elem.uri + defaultUri, currentDefaultUri = elem.defaultUri, defaultUri + + for p, u in elem.localPrefixes.iteritems(): + self.prefixes[u] = p + self.prefixStack.append(elem.localPrefixes.keys()) + + # Inherit the default namespace + if defaultUri is None: + defaultUri = currentDefaultUri + + if uri is None: + uri = defaultUri + + prefix = None + if uri != defaultUri or uri in self.prefixes: + prefix = self.getPrefix(uri) + inScope = self.prefixInScope(prefix) + + # Create the starttag + + if not prefix: + write("<%s" % (name)) + else: + write("<%s:%s" % (prefix, name)) + + if not inScope: + write(" xmlns:%s='%s'" % (prefix, uri)) + self.prefixStack[-1].append(prefix) + inScope = True + + if defaultUri != currentDefaultUri and \ + (uri != defaultUri or not prefix or not inScope): + write(" xmlns='%s'" % (defaultUri)) + + for p, u in elem.localPrefixes.iteritems(): + write(" xmlns:%s='%s'" % (p, u)) + + # Serialize attributes + for k,v in elem.attributes.items(): + # If the attribute name is a tuple, it's a qualified attribute + if isinstance(k, types.TupleType): + attr_uri, attr_name = k + attr_prefix = self.getPrefix(attr_uri) + + if not self.prefixInScope(attr_prefix): + write(" xmlns:%s='%s'" % (attr_prefix, attr_uri)) + self.prefixStack[-1].append(attr_prefix) + + write(" %s:%s='%s'" % (attr_prefix, attr_name, + escapeToXml(v, 1))) + else: + write((" %s='%s'" % ( k, escapeToXml(v, 1)))) + + # Shortcut out if this is only going to return + # the element (i.e. no children) + if closeElement == 0: + write(">") + return + + # Serialize children + if len(elem.children) > 0: + write(">") + for c in elem.children: + self.serialize(c, defaultUri=defaultUri) + # Add closing tag + if not prefix: + write("" % (name)) + else: + write("" % (prefix, name)) + else: + write("/>") + + self.prefixStack.pop() + + +SerializerClass = _ListSerializer + +def escapeToXml(text, isattrib = 0): + """ Escape text to proper XML form, per section 2.3 in the XML specification. + + @type text: L{str} + @param text: Text to escape + + @type isattrib: L{bool} + @param isattrib: Triggers escaping of characters necessary for use as + attribute values + """ + text = text.replace("&", "&") + text = text.replace("<", "<") + text = text.replace(">", ">") + if isattrib == 1: + text = text.replace("'", "'") + text = text.replace("\"", """) + return text + +def unescapeFromXml(text): + text = text.replace("<", "<") + text = text.replace(">", ">") + text = text.replace("'", "'") + text = text.replace(""", "\"") + text = text.replace("&", "&") + return text + +def generateOnlyInterface(list, int): + """ Filters items in a list by class + """ + for n in list: + if int.providedBy(n): + yield n + +def generateElementsQNamed(list, name, uri): + """ Filters Element items in a list with matching name and URI. """ + for n in list: + if IElement.providedBy(n) and n.name == name and n.uri == uri: + yield n + +def generateElementsNamed(list, name): + """ Filters Element items in a list with matching name, regardless of URI. + """ + for n in list: + if IElement.providedBy(n) and n.name == name: + yield n + + +class SerializedXML(unicode): + """ Marker class for pre-serialized XML in the DOM. """ + pass + + +class Namespace: + """ Convenience object for tracking namespace declarations. """ + def __init__(self, uri): + self._uri = uri + def __getattr__(self, n): + return (self._uri, n) + def __getitem__(self, n): + return (self._uri, n) + +class IElement(Interface): + """ + Interface to XML element nodes. + + See L{Element} for a detailed example of its general use. + + Warning: this Interface is not yet complete! + """ + + uri = Attribute(""" Element's namespace URI """) + name = Attribute(""" Element's local name """) + defaultUri = Attribute(""" Default namespace URI of child elements """) + attributes = Attribute(""" Dictionary of element attributes """) + children = Attribute(""" List of child nodes """) + parent = Attribute(""" Reference to element's parent element """) + localPrefixes = Attribute(""" Dictionary of local prefixes """) + + def toXml(prefixes=None, closeElement=1, defaultUri='', + prefixesInScope=None): + """ Serializes object to a (partial) XML document + + @param prefixes: dictionary that maps namespace URIs to suggested + prefix names. + @type prefixes: L{dict} + @param closeElement: flag that determines whether to include the + closing tag of the element in the serialized + string. A value of C{0} only generates the + element's start tag. A value of C{1} yields a + complete serialization. + @type closeElement: L{int} + @param defaultUri: Initial default namespace URI. This is most useful + for partial rendering, where the logical parent + element (of which the starttag was already + serialized) declares a default namespace that should + be inherited. + @type defaultUri: L{str} + @param prefixesInScope: list of prefixes that are assumed to be + declared by ancestors. + @type prefixesInScope: L{list} + @return: (partial) serialized XML + @rtype: L{unicode} + """ + + def addElement(name, defaultUri = None, content = None): + """ Create an element and add as child. + + The new element is added to this element as a child, and will have + this element as its parent. + + @param name: element name. This can be either a L{unicode} object that + contains the local name, or a tuple of (uri, local_name) + for a fully qualified name. In the former case, + the namespace URI is inherited from this element. + @type name: L{unicode} or L{tuple} of (L{unicode}, L{unicode}) + @param defaultUri: default namespace URI for child elements. If + C{None}, this is inherited from this element. + @type defaultUri: L{unicode} + @param content: text contained by the new element. + @type content: L{unicode} + @return: the created element + @rtype: object providing L{IElement} + """ + + def addChild(node): + """ Adds a node as child of this element. + + The C{node} will be added to the list of childs of this element, and + will have this element set as its parent when C{node} provides + L{IElement}. + + @param node: the child node. + @type node: L{unicode} or object implementing L{IElement} + """ + +class Element(object): + """ Represents an XML element node. + + An Element contains a series of attributes (name/value pairs), content + (character data), and other child Element objects. When building a document + with markup (such as HTML or XML), use this object as the starting point. + + Element objects fully support XML Namespaces. The fully qualified name of + the XML Element it represents is stored in the C{uri} and C{name} + attributes, where C{uri} holds the namespace URI. There is also a default + namespace, for child elements. This is stored in the C{defaultUri} + attribute. Note that C{''} means the empty namespace. + + Serialization of Elements through C{toXml()} will use these attributes + for generating proper serialized XML. When both C{uri} and C{defaultUri} + are not None in the Element and all of its descendents, serialization + proceeds as expected: + + >>> from twisted.words.xish import domish + >>> root = domish.Element(('myns', 'root')) + >>> root.addElement('child', content='test') + + >>> root.toXml() + u"test" + + For partial serialization, needed for streaming XML, a special value for + namespace URIs can be used: C{None}. + + Using C{None} as the value for C{uri} means: this element is in whatever + namespace inherited by the closest logical ancestor when the complete XML + document has been serialized. The serialized start tag will have a + non-prefixed name, and no xmlns declaration will be generated. + + Similarly, C{None} for C{defaultUri} means: the default namespace for my + child elements is inherited from the logical ancestors of this element, + when the complete XML document has been serialized. + + To illustrate, an example from a Jabber stream. Assume the start tag of the + root element of the stream has already been serialized, along with several + complete child elements, and sent off, looking like this:: + + + ... + + Now suppose we want to send a complete element represented by an + object C{message} created like: + + >>> message = domish.Element((None, 'message')) + >>> message['to'] = 'user@example.com' + >>> message.addElement('body', content='Hi!') + + >>> message.toXml() + u"Hi!" + + As, you can see, this XML snippet has no xmlns declaration. When sent + off, it inherits the C{jabber:client} namespace from the root element. + Note that this renders the same as using C{''} instead of C{None}: + + >>> presence = domish.Element(('', 'presence')) + >>> presence.toXml() + u"" + + However, if this object has a parent defined, the difference becomes + clear: + + >>> child = message.addElement(('http://example.com/', 'envelope')) + >>> child.addChild(presence) + + >>> message.toXml() + u"Hi!" + + As, you can see, the element is now in the empty namespace, not + in the default namespace of the parent or the streams'. + + @type uri: L{unicode} or None + @ivar uri: URI of this Element's name + + @type name: L{unicode} + @ivar name: Name of this Element + + @type defaultUri: L{unicode} or None + @ivar defaultUri: URI this Element exists within + + @type children: L{list} + @ivar children: List of child Elements and content + + @type parent: L{Element} + @ivar parent: Reference to the parent Element, if any. + + @type attributes: L{dict} + @ivar attributes: Dictionary of attributes associated with this Element. + + @type localPrefixes: L{dict} + @ivar localPrefixes: Dictionary of namespace declarations on this + element. The key is the prefix to bind the + namespace uri to. + """ + + implements(IElement) + + _idCounter = 0 + + def __init__(self, qname, defaultUri=None, attribs=None, + localPrefixes=None): + """ + @param qname: Tuple of (uri, name) + @param defaultUri: The default URI of the element; defaults to the URI + specified in L{qname} + @param attribs: Dictionary of attributes + @param localPrefixes: Dictionary of namespace declarations on this + element. The key is the prefix to bind the + namespace uri to. + """ + self.localPrefixes = localPrefixes or {} + self.uri, self.name = qname + if defaultUri is None and \ + self.uri not in self.localPrefixes.itervalues(): + self.defaultUri = self.uri + else: + self.defaultUri = defaultUri + self.attributes = attribs or {} + self.children = [] + self.parent = None + + def __getattr__(self, key): + # Check child list for first Element with a name matching the key + for n in self.children: + if IElement.providedBy(n) and n.name == key: + return n + + # Tweak the behaviour so that it's more friendly about not + # finding elements -- we need to document this somewhere :) + if key.startswith('_'): + raise AttributeError(key) + else: + return None + + def __getitem__(self, key): + return self.attributes[self._dqa(key)] + + def __delitem__(self, key): + del self.attributes[self._dqa(key)]; + + def __setitem__(self, key, value): + self.attributes[self._dqa(key)] = value + + def __str__(self): + """ Retrieve the first CData (content) node + """ + for n in self.children: + if isinstance(n, types.StringTypes): return n + return "" + + def _dqa(self, attr): + """ Dequalify an attribute key as needed """ + if isinstance(attr, types.TupleType) and not attr[0]: + return attr[1] + else: + return attr + + def getAttribute(self, attribname, default = None): + """ Retrieve the value of attribname, if it exists """ + return self.attributes.get(attribname, default) + + def hasAttribute(self, attrib): + """ Determine if the specified attribute exists """ + return self.attributes.has_key(self._dqa(attrib)) + + def compareAttribute(self, attrib, value): + """ Safely compare the value of an attribute against a provided value. + + C{None}-safe. + """ + return self.attributes.get(self._dqa(attrib), None) == value + + def swapAttributeValues(self, left, right): + """ Swap the values of two attribute. """ + d = self.attributes + l = d[left] + d[left] = d[right] + d[right] = l + + def addChild(self, node): + """ Add a child to this Element. """ + if IElement.providedBy(node): + node.parent = self + self.children.append(node) + return self.children[-1] + + def addContent(self, text): + """ Add some text data to this Element. """ + c = self.children + if len(c) > 0 and isinstance(c[-1], types.StringTypes): + c[-1] = c[-1] + text + else: + c.append(text) + return c[-1] + + def addElement(self, name, defaultUri = None, content = None): + result = None + if isinstance(name, type(())): + if defaultUri is None: + defaultUri = name[0] + self.children.append(Element(name, defaultUri)) + else: + if defaultUri is None: + defaultUri = self.defaultUri + self.children.append(Element((defaultUri, name), defaultUri)) + + result = self.children[-1] + result.parent = self + + if content: + result.children.append(content) + + return result + + def addRawXml(self, rawxmlstring): + """ Add a pre-serialized chunk o' XML as a child of this Element. """ + self.children.append(SerializedXML(rawxmlstring)) + + def addUniqueId(self): + """ Add a unique (across a given Python session) id attribute to this + Element. + """ + self.attributes["id"] = "H_%d" % Element._idCounter + Element._idCounter = Element._idCounter + 1 + + + def elements(self, uri=None, name=None): + """ + Iterate across all children of this Element that are Elements. + + Returns a generator over the child elements. If both the C{uri} and + C{name} parameters are set, the returned generator will only yield + on elements matching the qualified name. + + @param uri: Optional element URI. + @type uri: C{unicode} + @param name: Optional element name. + @type name: C{unicode} + @return: Iterator that yields objects implementing L{IElement}. + """ + if name is None: + return generateOnlyInterface(self.children, IElement) + else: + return generateElementsQNamed(self.children, name, uri) + + + def toXml(self, prefixes=None, closeElement=1, defaultUri='', + prefixesInScope=None): + """ Serialize this Element and all children to a string. """ + s = SerializerClass(prefixes=prefixes, prefixesInScope=prefixesInScope) + s.serialize(self, closeElement=closeElement, defaultUri=defaultUri) + return s.getValue() + + def firstChildElement(self): + for c in self.children: + if IElement.providedBy(c): + return c + return None + + +class ParserError(Exception): + """ Exception thrown when a parsing error occurs """ + pass + +def elementStream(): + """ Preferred method to construct an ElementStream + + Uses Expat-based stream if available, and falls back to Sux if necessary. + """ + try: + es = ExpatElementStream() + return es + except ImportError: + if SuxElementStream is None: + raise Exception("No parsers available :(") + es = SuxElementStream() + return es + +try: + from twisted.web import sux +except: + SuxElementStream = None +else: + class SuxElementStream(sux.XMLParser): + def __init__(self): + self.connectionMade() + self.DocumentStartEvent = None + self.ElementEvent = None + self.DocumentEndEvent = None + self.currElem = None + self.rootElem = None + self.documentStarted = False + self.defaultNsStack = [] + self.prefixStack = [] + + def parse(self, buffer): + try: + self.dataReceived(buffer) + except sux.ParseError, e: + raise ParserError, str(e) + + + def findUri(self, prefix): + # Walk prefix stack backwards, looking for the uri + # matching the specified prefix + stack = self.prefixStack + for i in range(-1, (len(self.prefixStack)+1) * -1, -1): + if prefix in stack[i]: + return stack[i][prefix] + return None + + def gotTagStart(self, name, attributes): + defaultUri = None + localPrefixes = {} + attribs = {} + uri = None + + # Pass 1 - Identify namespace decls + for k, v in attributes.items(): + if k.startswith("xmlns"): + x, p = _splitPrefix(k) + if (x is None): # I.e. default declaration + defaultUri = v + else: + localPrefixes[p] = v + del attributes[k] + + # Push namespace decls onto prefix stack + self.prefixStack.append(localPrefixes) + + # Determine default namespace for this element; if there + # is one + if defaultUri is None: + if len(self.defaultNsStack) > 0: + defaultUri = self.defaultNsStack[-1] + else: + defaultUri = '' + + # Fix up name + prefix, name = _splitPrefix(name) + if prefix is None: # This element is in the default namespace + uri = defaultUri + else: + # Find the URI for the prefix + uri = self.findUri(prefix) + + # Pass 2 - Fix up and escape attributes + for k, v in attributes.items(): + p, n = _splitPrefix(k) + if p is None: + attribs[n] = v + else: + attribs[(self.findUri(p)), n] = unescapeFromXml(v) + + # Construct the actual Element object + e = Element((uri, name), defaultUri, attribs, localPrefixes) + + # Save current default namespace + self.defaultNsStack.append(defaultUri) + + # Document already started + if self.documentStarted: + # Starting a new packet + if self.currElem is None: + self.currElem = e + # Adding to existing element + else: + self.currElem = self.currElem.addChild(e) + # New document + else: + self.rootElem = e + self.documentStarted = True + self.DocumentStartEvent(e) + + def gotText(self, data): + if self.currElem != None: + self.currElem.addContent(data) + + def gotCData(self, data): + if self.currElem != None: + self.currElem.addContent(data) + + def gotComment(self, data): + # Ignore comments for the moment + pass + + entities = { "amp" : "&", + "lt" : "<", + "gt" : ">", + "apos": "'", + "quot": "\"" } + + def gotEntityReference(self, entityRef): + # If this is an entity we know about, add it as content + # to the current element + if entityRef in SuxElementStream.entities: + self.currElem.addContent(SuxElementStream.entities[entityRef]) + + def gotTagEnd(self, name): + # Ensure the document hasn't already ended + if self.rootElem is None: + # XXX: Write more legible explanation + raise ParserError, "Element closed after end of document." + + # Fix up name + prefix, name = _splitPrefix(name) + if prefix is None: + uri = self.defaultNsStack[-1] + else: + uri = self.findUri(prefix) + + # End of document + if self.currElem is None: + # Ensure element name and uri matches + if self.rootElem.name != name or self.rootElem.uri != uri: + raise ParserError, "Mismatched root elements" + self.DocumentEndEvent() + self.rootElem = None + + # Other elements + else: + # Ensure the tag being closed matches the name of the current + # element + if self.currElem.name != name or self.currElem.uri != uri: + # XXX: Write more legible explanation + raise ParserError, "Malformed element close" + + # Pop prefix and default NS stack + self.prefixStack.pop() + self.defaultNsStack.pop() + + # Check for parent null parent of current elem; + # that's the top of the stack + if self.currElem.parent is None: + self.currElem.parent = self.rootElem + self.ElementEvent(self.currElem) + self.currElem = None + + # Anything else is just some element wrapping up + else: + self.currElem = self.currElem.parent + + +class ExpatElementStream: + def __init__(self): + import pyexpat + self.DocumentStartEvent = None + self.ElementEvent = None + self.DocumentEndEvent = None + self.error = pyexpat.error + self.parser = pyexpat.ParserCreate("UTF-8", " ") + self.parser.StartElementHandler = self._onStartElement + self.parser.EndElementHandler = self._onEndElement + self.parser.CharacterDataHandler = self._onCdata + self.parser.StartNamespaceDeclHandler = self._onStartNamespace + self.parser.EndNamespaceDeclHandler = self._onEndNamespace + self.currElem = None + self.defaultNsStack = [''] + self.documentStarted = 0 + self.localPrefixes = {} + + def parse(self, buffer): + try: + self.parser.Parse(buffer) + except self.error, e: + raise ParserError, str(e) + + def _onStartElement(self, name, attrs): + # Generate a qname tuple from the provided name + qname = name.split(" ") + if len(qname) == 1: + qname = ('', name) + + # Process attributes + for k, v in attrs.items(): + if k.find(" ") != -1: + aqname = k.split(" ") + attrs[(aqname[0], aqname[1])] = v + del attrs[k] + + # Construct the new element + e = Element(qname, self.defaultNsStack[-1], attrs, self.localPrefixes) + self.localPrefixes = {} + + # Document already started + if self.documentStarted == 1: + if self.currElem != None: + self.currElem.children.append(e) + e.parent = self.currElem + self.currElem = e + + # New document + else: + self.documentStarted = 1 + self.DocumentStartEvent(e) + + def _onEndElement(self, _): + # Check for null current elem; end of doc + if self.currElem is None: + self.DocumentEndEvent() + + # Check for parent that is None; that's + # the top of the stack + elif self.currElem.parent is None: + self.ElementEvent(self.currElem) + self.currElem = None + + # Anything else is just some element in the current + # packet wrapping up + else: + self.currElem = self.currElem.parent + + def _onCdata(self, data): + if self.currElem != None: + self.currElem.addContent(data) + + def _onStartNamespace(self, prefix, uri): + # If this is the default namespace, put + # it on the stack + if prefix is None: + self.defaultNsStack.append(uri) + else: + self.localPrefixes[prefix] = uri + + def _onEndNamespace(self, prefix): + # Remove last element on the stack + if prefix is None: + self.defaultNsStack.pop() + +## class FileParser(ElementStream): +## def __init__(self): +## ElementStream.__init__(self) +## self.DocumentStartEvent = self.docStart +## self.ElementEvent = self.elem +## self.DocumentEndEvent = self.docEnd +## self.done = 0 + +## def docStart(self, elem): +## self.document = elem + +## def elem(self, elem): +## self.document.addChild(elem) + +## def docEnd(self): +## self.done = 1 + +## def parse(self, filename): +## for l in open(filename).readlines(): +## self.parser.Parse(l) +## assert self.done == 1 +## return self.document + +## def parseFile(filename): +## return FileParser().parse(filename) + + diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/utility.py b/vendor/Twisted-10.0.0/twisted/words/xish/utility.py new file mode 100644 index 000000000000..aae0cfce8a08 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/utility.py @@ -0,0 +1,372 @@ +# -*- test-case-name: twisted.words.test.test_xishutil -*- +# +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +Event Dispatching and Callback utilities. +""" + +from twisted.python import log +from twisted.words.xish import xpath + +class _MethodWrapper(object): + """ + Internal class for tracking method calls. + """ + def __init__(self, method, *args, **kwargs): + self.method = method + self.args = args + self.kwargs = kwargs + + + def __call__(self, *args, **kwargs): + nargs = self.args + args + nkwargs = self.kwargs.copy() + nkwargs.update(kwargs) + self.method(*nargs, **nkwargs) + + + +class CallbackList: + """ + Container for callbacks. + + Event queries are linked to lists of callables. When a matching event + occurs, these callables are called in sequence. One-time callbacks + are removed from the list after the first time the event was triggered. + + Arguments to callbacks are split spread across two sets. The first set, + callback specific, is passed to C{addCallback} and is used for all + subsequent event triggers. The second set is passed to C{callback} and is + event specific. Positional arguments in the second set come after the + positional arguments of the first set. Keyword arguments in the second set + override those in the first set. + + @ivar callbacks: The registered callbacks as mapping from the callable to a + tuple of a wrapper for that callable that keeps the + callback specific arguments and a boolean that signifies + if it is to be called only once. + @type callbacks: C{dict} + """ + + def __init__(self): + self.callbacks = {} + + + def addCallback(self, onetime, method, *args, **kwargs): + """ + Add callback. + + The arguments passed are used as callback specific arguments. + + @param onetime: If C{True}, this callback is called at most once. + @type onetime: C{bool} + @param method: The callback callable to be added. + @param args: Positional arguments to the callable. + @type args: C{list} + @param kwargs: Keyword arguments to the callable. + @type kwargs: C{dict} + """ + + if not method in self.callbacks: + self.callbacks[method] = (_MethodWrapper(method, *args, **kwargs), + onetime) + + + def removeCallback(self, method): + """ + Remove callback. + + @param method: The callable to be removed. + """ + + if method in self.callbacks: + del self.callbacks[method] + + + def callback(self, *args, **kwargs): + """ + Call all registered callbacks. + + The passed arguments are event specific and augment and override + the callback specific arguments as described above. + + @note: Exceptions raised by callbacks are trapped and logged. They will + not propagate up to make sure other callbacks will still be + called, and the event dispatching allways succeeds. + + @param args: Positional arguments to the callable. + @type args: C{list} + @param kwargs: Keyword arguments to the callable. + @type kwargs: C{dict} + """ + + for key, (methodwrapper, onetime) in self.callbacks.items(): + try: + methodwrapper(*args, **kwargs) + except: + log.err() + + if onetime: + del self.callbacks[key] + + + def isEmpty(self): + """ + Return if list of registered callbacks is empty. + + @rtype: C{bool} + """ + + return len(self.callbacks) == 0 + + + +class EventDispatcher: + """ + Event dispatching service. + + The C{EventDispatcher} allows observers to be registered for certain events + that are dispatched. There are two types of events: XPath events and Named + events. + + Every dispatch is triggered by calling L{dispatch} with a data object and, + for named events, the name of the event. + + When an XPath type event is dispatched, the associated object is assumed to + be an L{Element} instance, which is + matched against all registered XPath queries. For every match, the + respective observer will be called with the data object. + + A named event will simply call each registered observer for that particular + event name, with the data object. Unlike XPath type events, the data object + is not restricted to L{Element}, but can + be anything. + + When registering observers, the event that is to be observed is specified + using an L{xpath.XPathQuery} instance or a string. In the latter case, the + string can also contain the string representation of an XPath expression. + To distinguish these from named events, each named event should start with + a special prefix that is stored in C{self.prefix}. It defaults to + C{//event/}. + + Observers registered using L{addObserver} are persistent: after the + observer has been triggered by a dispatch, it remains registered for a + possible next dispatch. If instead L{addOnetimeObserver} was used to + observe an event, the observer is removed from the list of observers after + the first observed event. + + Obsevers can also prioritized, by providing an optional C{priority} + parameter to the L{addObserver} and L{addOnetimeObserver} methods. Higher + priority observers are then called before lower priority observers. + + Finally, observers can be unregistered by using L{removeObserver}. + """ + + def __init__(self, eventprefix="//event/"): + self.prefix = eventprefix + self._eventObservers = {} + self._xpathObservers = {} + self._dispatchDepth = 0 # Flag indicating levels of dispatching + # in progress + self._updateQueue = [] # Queued updates for observer ops + + + def _getEventAndObservers(self, event): + if isinstance(event, xpath.XPathQuery): + # Treat as xpath + observers = self._xpathObservers + else: + if self.prefix == event[:len(self.prefix)]: + # Treat as event + observers = self._eventObservers + else: + # Treat as xpath + event = xpath.internQuery(event) + observers = self._xpathObservers + + return event, observers + + + def addOnetimeObserver(self, event, observerfn, priority=0, *args, **kwargs): + """ + Register a one-time observer for an event. + + Like L{addObserver}, but is only triggered at most once. See there + for a description of the parameters. + """ + self._addObserver(True, event, observerfn, priority, *args, **kwargs) + + + def addObserver(self, event, observerfn, priority=0, *args, **kwargs): + """ + Register an observer for an event. + + Each observer will be registered with a certain priority. Higher + priority observers get called before lower priority observers. + + @param event: Name or XPath query for the event to be monitored. + @type event: C{str} or L{xpath.XPathQuery}. + @param observerfn: Function to be called when the specified event + has been triggered. This callable takes + one parameter: the data object that triggered + the event. When specified, the C{*args} and + C{**kwargs} parameters to addObserver are being used + as additional parameters to the registered observer + callable. + @param priority: (Optional) priority of this observer in relation to + other observer that match the same event. Defaults to + C{0}. + @type priority: C{int} + """ + self._addObserver(False, event, observerfn, priority, *args, **kwargs) + + + def _addObserver(self, onetime, event, observerfn, priority, *args, **kwargs): + # If this is happening in the middle of the dispatch, queue + # it up for processing after the dispatch completes + if self._dispatchDepth > 0: + self._updateQueue.append(lambda:self._addObserver(onetime, event, observerfn, priority, *args, **kwargs)) + return + + event, observers = self._getEventAndObservers(event) + + if priority not in observers: + cbl = CallbackList() + observers[priority] = {event: cbl} + else: + priorityObservers = observers[priority] + if event not in priorityObservers: + cbl = CallbackList() + observers[priority][event] = cbl + else: + cbl = priorityObservers[event] + + cbl.addCallback(onetime, observerfn, *args, **kwargs) + + + def removeObserver(self, event, observerfn): + """ + Remove callable as observer for an event. + + The observer callable is removed for all priority levels for the + specified event. + + @param event: Event for which the observer callable was registered. + @type event: C{str} or L{xpath.XPathQuery} + @param observerfn: Observer callable to be unregistered. + """ + + # If this is happening in the middle of the dispatch, queue + # it up for processing after the dispatch completes + if self._dispatchDepth > 0: + self._updateQueue.append(lambda:self.removeObserver(event, observerfn)) + return + + event, observers = self._getEventAndObservers(event) + + emptyLists = [] + for priority, priorityObservers in observers.iteritems(): + for query, callbacklist in priorityObservers.iteritems(): + if event == query: + callbacklist.removeCallback(observerfn) + if callbacklist.isEmpty(): + emptyLists.append((priority, query)) + + for priority, query in emptyLists: + del observers[priority][query] + + + def dispatch(self, obj, event=None): + """ + Dispatch an event. + + When C{event} is C{None}, an XPath type event is triggered, and + C{obj} is assumed to be an instance of + L{Element}. Otherwise, C{event} + holds the name of the named event being triggered. In the latter case, + C{obj} can be anything. + + @param obj: The object to be dispatched. + @param event: Optional event name. + @type event: C{str} + """ + + foundTarget = False + + self._dispatchDepth += 1 + + if event != None: + # Named event + observers = self._eventObservers + match = lambda query, obj: query == event + else: + # XPath event + observers = self._xpathObservers + match = lambda query, obj: query.matches(obj) + + priorities = observers.keys() + priorities.sort() + priorities.reverse() + + emptyLists = [] + for priority in priorities: + for query, callbacklist in observers[priority].iteritems(): + if match(query, obj): + callbacklist.callback(obj) + foundTarget = True + if callbacklist.isEmpty(): + emptyLists.append((priority, query)) + + for priority, query in emptyLists: + del observers[priority][query] + + self._dispatchDepth -= 1 + + # If this is a dispatch within a dispatch, don't + # do anything with the updateQueue -- it needs to + # wait until we've back all the way out of the stack + if self._dispatchDepth == 0: + # Deal with pending update operations + for f in self._updateQueue: + f() + self._updateQueue = [] + + return foundTarget + + + +class XmlPipe(object): + """ + XML stream pipe. + + Connects two objects that communicate stanzas through an XML stream like + interface. Each of the ends of the pipe (sink and source) can be used to + send XML stanzas to the other side, or add observers to process XML stanzas + that were sent from the other side. + + XML pipes are usually used in place of regular XML streams that are + transported over TCP. This is the reason for the use of the names source + and sink for both ends of the pipe. The source side corresponds with the + entity that initiated the TCP connection, whereas the sink corresponds with + the entity that accepts that connection. In this object, though, the source + and sink are treated equally. + + Unlike Jabber + L{XmlStream}s, the sink + and source objects are assumed to represent an eternal connected and + initialized XML stream. As such, events corresponding to connection, + disconnection, initialization and stream errors are not dispatched or + processed. + + @since: 8.2 + @ivar source: Source XML stream. + @ivar sink: Sink XML stream. + """ + + def __init__(self): + self.source = EventDispatcher() + self.sink = EventDispatcher() + self.source.send = lambda obj: self.sink.dispatch(obj) + self.sink.send = lambda obj: self.source.dispatch(obj) diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/xmlstream.py b/vendor/Twisted-10.0.0/twisted/words/xish/xmlstream.py new file mode 100644 index 000000000000..90f4c8e5bc83 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/xmlstream.py @@ -0,0 +1,261 @@ +# -*- test-case-name: twisted.words.test.test_xmlstream -*- +# +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +XML Stream processing. + +An XML Stream is defined as a connection over which two XML documents are +exchanged during the lifetime of the connection, one for each direction. The +unit of interaction is a direct child element of the root element (stanza). + +The most prominent use of XML Streams is Jabber, but this module is generically +usable. See Twisted Words for Jabber specific protocol support. + +Maintainer: Ralph Meijer +""" + +from twisted.python import failure +from twisted.internet import protocol +from twisted.words.xish import domish, utility + +STREAM_CONNECTED_EVENT = intern("//event/stream/connected") +STREAM_START_EVENT = intern("//event/stream/start") +STREAM_END_EVENT = intern("//event/stream/end") +STREAM_ERROR_EVENT = intern("//event/stream/error") + +class XmlStream(protocol.Protocol, utility.EventDispatcher): + """ Generic Streaming XML protocol handler. + + This protocol handler will parse incoming data as XML and dispatch events + accordingly. Incoming stanzas can be handled by registering observers using + XPath-like expressions that are matched against each stanza. See + L{utility.EventDispatcher} for details. + """ + def __init__(self): + utility.EventDispatcher.__init__(self) + self.stream = None + self.rawDataOutFn = None + self.rawDataInFn = None + + def _initializeStream(self): + """ Sets up XML Parser. """ + self.stream = domish.elementStream() + self.stream.DocumentStartEvent = self.onDocumentStart + self.stream.ElementEvent = self.onElement + self.stream.DocumentEndEvent = self.onDocumentEnd + + ### -------------------------------------------------------------- + ### + ### Protocol events + ### + ### -------------------------------------------------------------- + + def connectionMade(self): + """ Called when a connection is made. + + Sets up the XML parser and dispatches the L{STREAM_CONNECTED_EVENT} + event indicating the connection has been established. + """ + self._initializeStream() + self.dispatch(self, STREAM_CONNECTED_EVENT) + + def dataReceived(self, data): + """ Called whenever data is received. + + Passes the data to the XML parser. This can result in calls to the + DOM handlers. If a parse error occurs, the L{STREAM_ERROR_EVENT} event + is called to allow for cleanup actions, followed by dropping the + connection. + """ + try: + if self.rawDataInFn: + self.rawDataInFn(data) + self.stream.parse(data) + except domish.ParserError: + self.dispatch(failure.Failure(), STREAM_ERROR_EVENT) + self.transport.loseConnection() + + def connectionLost(self, reason): + """ Called when the connection is shut down. + + Dispatches the L{STREAM_END_EVENT}. + """ + self.dispatch(self, STREAM_END_EVENT) + self.stream = None + + ### -------------------------------------------------------------- + ### + ### DOM events + ### + ### -------------------------------------------------------------- + + def onDocumentStart(self, rootElement): + """ Called whenever the start tag of a root element has been received. + + Dispatches the L{STREAM_START_EVENT}. + """ + self.dispatch(self, STREAM_START_EVENT) + + def onElement(self, element): + """ Called whenever a direct child element of the root element has + been received. + + Dispatches the received element. + """ + self.dispatch(element) + + def onDocumentEnd(self): + """ Called whenever the end tag of the root element has been received. + + Closes the connection. This causes C{connectionLost} being called. + """ + self.transport.loseConnection() + + def setDispatchFn(self, fn): + """ Set another function to handle elements. """ + self.stream.ElementEvent = fn + + def resetDispatchFn(self): + """ Set the default function (C{onElement}) to handle elements. """ + self.stream.ElementEvent = self.onElement + + def send(self, obj): + """ Send data over the stream. + + Sends the given C{obj} over the connection. C{obj} may be instances of + L{domish.Element}, L{unicode} and L{str}. The first two will be + properly serialized and/or encoded. L{str} objects must be in UTF-8 + encoding. + + Note: because it is easy to make mistakes in maintaining a properly + encoded L{str} object, it is advised to use L{unicode} objects + everywhere when dealing with XML Streams. + + @param obj: Object to be sent over the stream. + @type obj: L{domish.Element}, L{domish} or L{str} + + """ + if domish.IElement.providedBy(obj): + obj = obj.toXml() + + if isinstance(obj, unicode): + obj = obj.encode('utf-8') + + if self.rawDataOutFn: + self.rawDataOutFn(obj) + + self.transport.write(obj) + + + +class BootstrapMixin(object): + """ + XmlStream factory mixin to install bootstrap event observers. + + This mixin is for factories providing + L{IProtocolFactory} to make + sure bootstrap event observers are set up on protocols, before incoming + data is processed. Such protocols typically derive from + L{utility.EventDispatcher}, like L{XmlStream}. + + You can set up bootstrap event observers using C{addBootstrap}. The + C{event} and C{fn} parameters correspond with the C{event} and + C{observerfn} arguments to L{utility.EventDispatcher.addObserver}. + + @since: 8.2. + @ivar bootstraps: The list of registered bootstrap event observers. + @type bootstrap: C{list} + """ + + def __init__(self): + self.bootstraps = [] + + + def installBootstraps(self, dispatcher): + """ + Install registered bootstrap observers. + + @param dispatcher: Event dispatcher to add the observers to. + @type dispatcher: L{utility.EventDispatcher} + """ + for event, fn in self.bootstraps: + dispatcher.addObserver(event, fn) + + + def addBootstrap(self, event, fn): + """ + Add a bootstrap event handler. + + @param event: The event to register an observer for. + @type event: C{str} or L{xpath.XPathQuery} + @param fn: The observer callable to be registered. + """ + self.bootstraps.append((event, fn)) + + + def removeBootstrap(self, event, fn): + """ + Remove a bootstrap event handler. + + @param event: The event the observer is registered for. + @type event: C{str} or L{xpath.XPathQuery} + @param fn: The registered observer callable. + """ + self.bootstraps.remove((event, fn)) + + + +class XmlStreamFactoryMixin(BootstrapMixin): + """ + XmlStream factory mixin that takes care of event handlers. + + All positional and keyword arguments passed to create this factory are + passed on as-is to the protocol. + + @ivar args: Positional arguments passed to the protocol upon instantiation. + @type args: C{tuple}. + @ivar kwargs: Keyword arguments passed to the protocol upon instantiation. + @type kwargs: C{dict}. + """ + + def __init__(self, *args, **kwargs): + BootstrapMixin.__init__(self) + self.args = args + self.kwargs = kwargs + + + def buildProtocol(self, addr): + """ + Create an instance of XmlStream. + + The returned instance will have bootstrap event observers registered + and will proceed to handle input on an incoming connection. + """ + xs = self.protocol(*self.args, **self.kwargs) + xs.factory = self + self.installBootstraps(xs) + return xs + + + +class XmlStreamFactory(XmlStreamFactoryMixin, + protocol.ReconnectingClientFactory): + """ + Factory for XmlStream protocol objects as a reconnection client. + """ + + protocol = XmlStream + + def buildProtocol(self, addr): + """ + Create a protocol instance. + + Overrides L{XmlStreamFactoryMixin.buildProtocol} to work with + a L{ReconnectingClientFactory}. As this is called upon having an + connection established, we are resetting the delay for reconnection + attempts when the connection is lost again. + """ + self.resetDelay() + return XmlStreamFactoryMixin.buildProtocol(self, addr) diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/xpath.py b/vendor/Twisted-10.0.0/twisted/words/xish/xpath.py new file mode 100644 index 000000000000..9505c38d932b --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/xpath.py @@ -0,0 +1,333 @@ +# -*- test-case-name: twisted.words.test.test_xpath -*- +# +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +""" +XPath query support. + +This module provides L{XPathQuery} to match +L{domish.Element} instances against +XPath-like expressions. +""" + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +class LiteralValue(str): + def value(self, elem): + return self + + +class IndexValue: + def __init__(self, index): + self.index = int(index) - 1 + + def value(self, elem): + return elem.children[self.index] + + +class AttribValue: + def __init__(self, attribname): + self.attribname = attribname + if self.attribname == "xmlns": + self.value = self.value_ns + + def value_ns(self, elem): + return elem.uri + + def value(self, elem): + if self.attribname in elem.attributes: + return elem.attributes[self.attribname] + else: + return None + + +class CompareValue: + def __init__(self, lhs, op, rhs): + self.lhs = lhs + self.rhs = rhs + if op == "=": + self.value = self._compareEqual + else: + self.value = self._compareNotEqual + + def _compareEqual(self, elem): + return self.lhs.value(elem) == self.rhs.value(elem) + + def _compareNotEqual(self, elem): + return self.lhs.value(elem) != self.rhs.value(elem) + + +class BooleanValue: + """ + Provide boolean XPath expression operators. + + @ivar lhs: Left hand side expression of the operator. + @ivar op: The operator. One of C{'and'}, C{'or'}. + @ivar rhs: Right hand side expression of the operator. + @ivar value: Reference to the method that will calculate the value of + this expression given an element. + """ + def __init__(self, lhs, op, rhs): + self.lhs = lhs + self.rhs = rhs + if op == "and": + self.value = self._booleanAnd + else: + self.value = self._booleanOr + + def _booleanAnd(self, elem): + """ + Calculate boolean and of the given expressions given an element. + + @param elem: The element to calculate the value of the expression from. + """ + return self.lhs.value(elem) and self.rhs.value(elem) + + def _booleanOr(self, elem): + """ + Calculate boolean or of the given expressions given an element. + + @param elem: The element to calculate the value of the expression from. + """ + return self.lhs.value(elem) or self.rhs.value(elem) + + +def Function(fname): + """ + Internal method which selects the function object + """ + klassname = "_%s_Function" % fname + c = globals()[klassname]() + return c + + +class _not_Function: + def __init__(self): + self.baseValue = None + + def setParams(self, baseValue): + self.baseValue = baseValue + + def value(self, elem): + return not self.baseValue.value(elem) + + +class _text_Function: + def setParams(self): + pass + + def value(self, elem): + return str(elem) + + +class _Location: + def __init__(self): + self.predicates = [] + self.elementName = None + self.childLocation = None + + def matchesPredicates(self, elem): + if self.elementName != None and self.elementName != elem.name: + return 0 + + for p in self.predicates: + if not p.value(elem): + return 0 + + return 1 + + def matches(self, elem): + if not self.matchesPredicates(elem): + return 0 + + if self.childLocation != None: + for c in elem.elements(): + if self.childLocation.matches(c): + return 1 + else: + return 1 + + return 0 + + def queryForString(self, elem, resultbuf): + if not self.matchesPredicates(elem): + return + + if self.childLocation != None: + for c in elem.elements(): + self.childLocation.queryForString(c, resultbuf) + else: + resultbuf.write(str(elem)) + + def queryForNodes(self, elem, resultlist): + if not self.matchesPredicates(elem): + return + + if self.childLocation != None: + for c in elem.elements(): + self.childLocation.queryForNodes(c, resultlist) + else: + resultlist.append(elem) + + def queryForStringList(self, elem, resultlist): + if not self.matchesPredicates(elem): + return + + if self.childLocation != None: + for c in elem.elements(): + self.childLocation.queryForStringList(c, resultlist) + else: + for c in elem.children: + if isinstance(c, (str, unicode)): + resultlist.append(c) + + +class _AnyLocation: + def __init__(self): + self.predicates = [] + self.elementName = None + self.childLocation = None + + def matchesPredicates(self, elem): + for p in self.predicates: + if not p.value(elem): + return 0 + return 1 + + def listParents(self, elem, parentlist): + if elem.parent != None: + self.listParents(elem.parent, parentlist) + parentlist.append(elem.name) + + def isRootMatch(self, elem): + if (self.elementName == None or self.elementName == elem.name) and \ + self.matchesPredicates(elem): + if self.childLocation != None: + for c in elem.elements(): + if self.childLocation.matches(c): + return True + else: + return True + return False + + def findFirstRootMatch(self, elem): + if (self.elementName == None or self.elementName == elem.name) and \ + self.matchesPredicates(elem): + # Thus far, the name matches and the predicates match, + # now check into the children and find the first one + # that matches the rest of the structure + # the rest of the structure + if self.childLocation != None: + for c in elem.elements(): + if self.childLocation.matches(c): + return c + return None + else: + # No children locations; this is a match! + return elem + else: + # Ok, predicates or name didn't match, so we need to start + # down each child and treat it as the root and try + # again + for c in elem.elements(): + if self.matches(c): + return c + # No children matched... + return None + + def matches(self, elem): + if self.isRootMatch(elem): + return True + else: + # Ok, initial element isn't an exact match, walk + # down each child and treat it as the root and try + # again + for c in elem.elements(): + if self.matches(c): + return True + # No children matched... + return False + + def queryForString(self, elem, resultbuf): + raise NotImplementedError( + "queryForString is not implemented for any location") + + def queryForNodes(self, elem, resultlist): + # First check to see if _this_ element is a root + if self.isRootMatch(elem): + resultlist.append(elem) + + # Now check each child + for c in elem.elements(): + self.queryForNodes(c, resultlist) + + + def queryForStringList(self, elem, resultlist): + if self.isRootMatch(elem): + for c in elem.children: + if isinstance(c, (str, unicode)): + resultlist.append(c) + for c in elem.elements(): + self.queryForStringList(c, resultlist) + + +class XPathQuery: + def __init__(self, queryStr): + self.queryStr = queryStr + from twisted.words.xish.xpathparser import parse + self.baseLocation = parse('XPATH', queryStr) + + def __hash__(self): + return self.queryStr.__hash__() + + def matches(self, elem): + return self.baseLocation.matches(elem) + + def queryForString(self, elem): + result = StringIO.StringIO() + self.baseLocation.queryForString(elem, result) + return result.getvalue() + + def queryForNodes(self, elem): + result = [] + self.baseLocation.queryForNodes(elem, result) + if len(result) == 0: + return None + else: + return result + + def queryForStringList(self, elem): + result = [] + self.baseLocation.queryForStringList(elem, result) + if len(result) == 0: + return None + else: + return result + + +__internedQueries = {} + +def internQuery(queryString): + if queryString not in __internedQueries: + __internedQueries[queryString] = XPathQuery(queryString) + return __internedQueries[queryString] + + +def matches(xpathstr, elem): + return internQuery(xpathstr).matches(elem) + + +def queryForStringList(xpathstr, elem): + return internQuery(xpathstr).queryForStringList(elem) + + +def queryForString(xpathstr, elem): + return internQuery(xpathstr).queryForString(elem) + + +def queryForNodes(xpathstr, elem): + return internQuery(xpathstr).queryForNodes(elem) diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.g b/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.g new file mode 100644 index 000000000000..4e51636655bd --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.g @@ -0,0 +1,375 @@ +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +# DO NOT EDIT xpathparser.py! +# +# It is generated from xpathparser.g using Yapps. Make needed changes there. +# This also means that the generated Python may not conform to Twisted's coding +# standards. + +# HOWTO Generate me: +# +# 1.) Grab a copy of yapps2, version 2.1.1: +# http://theory.stanford.edu/~amitp/Yapps/ +# +# Note: Do NOT use the package in debian/ubuntu as it has incompatible +# modifications. +# +# 2.) Generate the grammar: +# +# yapps2 xpathparser.g xpathparser.py.proto +# +# 3.) Edit the output to depend on the embedded runtime, not yappsrt. +# +# sed -e '/^import yapps/d' -e '/^[^#]/s/yappsrt\.//g' \ +# xpathparser.py.proto > xpathparser.py + +""" +XPath Parser. + +Besides the parser code produced by Yapps, this module also defines the +parse-time exception classes, a scanner class, a base class for parsers +produced by Yapps, and a context class that keeps track of the parse stack. +These have been copied from the Yapps runtime. +""" + +import sys, re + +class SyntaxError(Exception): + """When we run into an unexpected token, this is the exception to use""" + def __init__(self, charpos=-1, msg="Bad Token", context=None): + Exception.__init__(self) + self.charpos = charpos + self.msg = msg + self.context = context + + def __str__(self): + if self.charpos < 0: return 'SyntaxError' + else: return 'SyntaxError@char%s(%s)' % (repr(self.charpos), self.msg) + +class NoMoreTokens(Exception): + """Another exception object, for when we run out of tokens""" + pass + +class Scanner: + """Yapps scanner. + + The Yapps scanner can work in context sensitive or context + insensitive modes. The token(i) method is used to retrieve the + i-th token. It takes a restrict set that limits the set of tokens + it is allowed to return. In context sensitive mode, this restrict + set guides the scanner. In context insensitive mode, there is no + restriction (the set is always the full set of tokens). + + """ + + def __init__(self, patterns, ignore, input): + """Initialize the scanner. + + @param patterns: [(terminal, uncompiled regex), ...] or C{None} + @param ignore: [terminal,...] + @param input: string + + If patterns is C{None}, we assume that the subclass has defined + C{self.patterns} : [(terminal, compiled regex), ...]. Note that the + patterns parameter expects uncompiled regexes, whereas the + C{self.patterns} field expects compiled regexes. + """ + self.tokens = [] # [(begin char pos, end char pos, token name, matched text), ...] + self.restrictions = [] + self.input = input + self.pos = 0 + self.ignore = ignore + self.first_line_number = 1 + + if patterns is not None: + # Compile the regex strings into regex objects + self.patterns = [] + for terminal, regex in patterns: + self.patterns.append( (terminal, re.compile(regex)) ) + + def get_token_pos(self): + """Get the current token position in the input text.""" + return len(self.tokens) + + def get_char_pos(self): + """Get the current char position in the input text.""" + return self.pos + + def get_prev_char_pos(self, i=None): + """Get the previous position (one token back) in the input text.""" + if self.pos == 0: return 0 + if i is None: i = -1 + return self.tokens[i][0] + + def get_line_number(self): + """Get the line number of the current position in the input text.""" + # TODO: make this work at any token/char position + return self.first_line_number + self.get_input_scanned().count('\n') + + def get_column_number(self): + """Get the column number of the current position in the input text.""" + s = self.get_input_scanned() + i = s.rfind('\n') # may be -1, but that's okay in this case + return len(s) - (i+1) + + def get_input_scanned(self): + """Get the portion of the input that has been tokenized.""" + return self.input[:self.pos] + + def get_input_unscanned(self): + """Get the portion of the input that has not yet been tokenized.""" + return self.input[self.pos:] + + def token(self, i, restrict=None): + """Get the i'th token in the input. + + If C{i} is one past the end, then scan for another token. + + @param i: token index + + @param restrict: [token, ...] or C{None}; if restrict is + C{None}, then any token is allowed. You may call + token(i) more than once. However, the restrict set + may never be larger than what was passed in on the + first call to token(i). + """ + if i == len(self.tokens): + self.scan(restrict) + if i < len(self.tokens): + # Make sure the restriction is more restricted. This + # invariant is needed to avoid ruining tokenization at + # position i+1 and higher. + if restrict and self.restrictions[i]: + for r in restrict: + if r not in self.restrictions[i]: + raise NotImplementedError("Unimplemented: restriction set changed") + return self.tokens[i] + raise NoMoreTokens() + + def __repr__(self): + """Print the last 10 tokens that have been scanned in""" + output = '' + for t in self.tokens[-10:]: + output = '%s\n (@%s) %s = %s' % (output,t[0],t[2],repr(t[3])) + return output + + def scan(self, restrict): + """Should scan another token and add it to the list, self.tokens, + and add the restriction to self.restrictions""" + # Keep looking for a token, ignoring any in self.ignore + while 1: + # Search the patterns for the longest match, with earlier + # tokens in the list having preference + best_match = -1 + best_pat = '(error)' + for p, regexp in self.patterns: + # First check to see if we're ignoring this token + if restrict and p not in restrict and p not in self.ignore: + continue + m = regexp.match(self.input, self.pos) + if m and len(m.group(0)) > best_match: + # We got a match that's better than the previous one + best_pat = p + best_match = len(m.group(0)) + + # If we didn't find anything, raise an error + if best_pat == '(error)' and best_match < 0: + msg = 'Bad Token' + if restrict: + msg = 'Trying to find one of '+', '.join(restrict) + raise SyntaxError(self.pos, msg) + + # If we found something that isn't to be ignored, return it + if best_pat not in self.ignore: + # Create a token with this data + token = (self.pos, self.pos+best_match, best_pat, + self.input[self.pos:self.pos+best_match]) + self.pos = self.pos + best_match + # Only add this token if it's not in the list + # (to prevent looping) + if not self.tokens or token != self.tokens[-1]: + self.tokens.append(token) + self.restrictions.append(restrict) + return + else: + # This token should be ignored .. + self.pos = self.pos + best_match + +class Parser: + """Base class for Yapps-generated parsers. + + """ + + def __init__(self, scanner): + self._scanner = scanner + self._pos = 0 + + def _peek(self, *types): + """Returns the token type for lookahead; if there are any args + then the list of args is the set of token types to allow""" + tok = self._scanner.token(self._pos, types) + return tok[2] + + def _scan(self, type): + """Returns the matched text, and moves to the next token""" + tok = self._scanner.token(self._pos, [type]) + if tok[2] != type: + raise SyntaxError(tok[0], 'Trying to find '+type+' :'+ ' ,'.join(self._scanner.restrictions[self._pos])) + self._pos = 1 + self._pos + return tok[3] + +class Context: + """Class to represent the parser's call stack. + + Every rule creates a Context that links to its parent rule. The + contexts can be used for debugging. + + """ + + def __init__(self, parent, scanner, tokenpos, rule, args=()): + """Create a new context. + + @param parent: Context object or C{None} + @param scanner: Scanner object + @param tokenpos: scanner token position + @type tokenpos: L{int} + @param rule: name of the rule + @type rule: L{str} + @param args: tuple listing parameters to the rule + + """ + self.parent = parent + self.scanner = scanner + self.tokenpos = tokenpos + self.rule = rule + self.args = args + + def __str__(self): + output = '' + if self.parent: output = str(self.parent) + ' > ' + output += self.rule + return output + +def print_line_with_pointer(text, p): + """Print the line of 'text' that includes position 'p', + along with a second line with a single caret (^) at position p""" + + # TODO: separate out the logic for determining the line/character + # location from the logic for determining how to display an + # 80-column line to stderr. + + # Now try printing part of the line + text = text[max(p-80, 0):p+80] + p = p - max(p-80, 0) + + # Strip to the left + i = text[:p].rfind('\n') + j = text[:p].rfind('\r') + if i < 0 or (0 <= j < i): i = j + if 0 <= i < p: + p = p - i - 1 + text = text[i+1:] + + # Strip to the right + i = text.find('\n', p) + j = text.find('\r', p) + if i < 0 or (0 <= j < i): i = j + if i >= 0: + text = text[:i] + + # Now shorten the text + while len(text) > 70 and p > 60: + # Cut off 10 chars + text = "..." + text[10:] + p = p - 7 + + # Now print the string, along with an indicator + print >>sys.stderr, '> ',text + print >>sys.stderr, '> ',' '*p + '^' + +def print_error(input, err, scanner): + """Print error messages, the parser stack, and the input text -- for human-readable error messages.""" + # NOTE: this function assumes 80 columns :-( + # Figure out the line number + line_number = scanner.get_line_number() + column_number = scanner.get_column_number() + print >>sys.stderr, '%d:%d: %s' % (line_number, column_number, err.msg) + + context = err.context + if not context: + print_line_with_pointer(input, err.charpos) + + while context: + # TODO: add line number + print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args)) + print_line_with_pointer(input, context.scanner.get_prev_char_pos(context.tokenpos)) + context = context.parent + +def wrap_error_reporter(parser, rule): + try: + return getattr(parser, rule)() + except SyntaxError, e: + input = parser._scanner.input + print_error(input, e, parser._scanner) + except NoMoreTokens: + print >>sys.stderr, 'Could not complete parsing; stopped around here:' + print >>sys.stderr, parser._scanner + + +from twisted.words.xish.xpath import AttribValue, BooleanValue, CompareValue +from twisted.words.xish.xpath import Function, IndexValue, LiteralValue +from twisted.words.xish.xpath import _AnyLocation, _Location + +%% +parser XPathParser: + ignore: "\\s+" + token INDEX: "[0-9]+" + token WILDCARD: "\*" + token IDENTIFIER: "[a-zA-Z][a-zA-Z0-9_\-]*" + token ATTRIBUTE: "\@[a-zA-Z][a-zA-Z0-9_\-]*" + token FUNCNAME: "[a-zA-Z][a-zA-Z0-9_]*" + token CMP_EQ: "\=" + token CMP_NE: "\!\=" + token STR_DQ: '"([^"]|(\\"))*?"' + token STR_SQ: "'([^']|(\\'))*?'" + token OP_AND: "and" + token OP_OR: "or" + token END: "$" + + rule XPATH: PATH {{ result = PATH; current = result }} + ( PATH {{ current.childLocation = PATH; current = current.childLocation }} ) * END + {{ return result }} + + rule PATH: ("/" {{ result = _Location() }} | "//" {{ result = _AnyLocation() }} ) + ( IDENTIFIER {{ result.elementName = IDENTIFIER }} | WILDCARD {{ result.elementName = None }} ) + ( "\[" PREDICATE {{ result.predicates.append(PREDICATE) }} "\]")* + {{ return result }} + + rule PREDICATE: EXPR {{ return EXPR }} | + INDEX {{ return IndexValue(INDEX) }} + + rule EXPR: FACTOR {{ e = FACTOR }} + ( BOOLOP FACTOR {{ e = BooleanValue(e, BOOLOP, FACTOR) }} )* + {{ return e }} + + rule BOOLOP: ( OP_AND {{ return OP_AND }} | OP_OR {{ return OP_OR }} ) + + rule FACTOR: TERM {{ return TERM }} + | "\(" EXPR "\)" {{ return EXPR }} + + rule TERM: VALUE {{ t = VALUE }} + [ CMP VALUE {{ t = CompareValue(t, CMP, VALUE) }} ] + {{ return t }} + + rule VALUE: "@" IDENTIFIER {{ return AttribValue(IDENTIFIER) }} | + FUNCNAME {{ f = Function(FUNCNAME); args = [] }} + "\(" [ VALUE {{ args.append(VALUE) }} + ( + "," VALUE {{ args.append(VALUE) }} + )* + ] "\)" {{ f.setParams(*args); return f }} | + STR {{ return LiteralValue(STR[1:len(STR)-1]) }} + + rule CMP: (CMP_EQ {{ return CMP_EQ }} | CMP_NE {{ return CMP_NE }}) + rule STR: (STR_DQ {{ return STR_DQ }} | STR_SQ {{ return STR_SQ }}) diff --git a/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.py b/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.py new file mode 100644 index 000000000000..c6b235c48aac --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xish/xpathparser.py @@ -0,0 +1,508 @@ +# Copyright (c) 2001-2007 Twisted Matrix Laboratories. +# See LICENSE for details. + +# DO NOT EDIT xpathparser.py! +# +# It is generated from xpathparser.g using Yapps. Make needed changes there. +# This also means that the generated Python may not conform to Twisted's coding +# standards. + +# HOWTO Generate me: +# +# 1.) Grab a copy of yapps2, version 2.1.1: +# http://theory.stanford.edu/~amitp/Yapps/ +# +# Note: Do NOT use the package in debian/ubuntu as it has incompatible +# modifications. +# +# 2.) Generate the grammar: +# +# yapps2 xpathparser.g xpathparser.py.proto +# +# 3.) Edit the output to depend on the embedded runtime, not yappsrt. +# +# sed -e '/^import yapps/d' -e '/^[^#]/s/yappsrt\.//g' \ +# xpathparser.py.proto > xpathparser.py + +""" +XPath Parser. + +Besides the parser code produced by Yapps, this module also defines the +parse-time exception classes, a scanner class, a base class for parsers +produced by Yapps, and a context class that keeps track of the parse stack. +These have been copied from the Yapps runtime. +""" + +import sys, re + +class SyntaxError(Exception): + """When we run into an unexpected token, this is the exception to use""" + def __init__(self, charpos=-1, msg="Bad Token", context=None): + Exception.__init__(self) + self.charpos = charpos + self.msg = msg + self.context = context + + def __str__(self): + if self.charpos < 0: return 'SyntaxError' + else: return 'SyntaxError@char%s(%s)' % (repr(self.charpos), self.msg) + +class NoMoreTokens(Exception): + """Another exception object, for when we run out of tokens""" + pass + +class Scanner: + """Yapps scanner. + + The Yapps scanner can work in context sensitive or context + insensitive modes. The token(i) method is used to retrieve the + i-th token. It takes a restrict set that limits the set of tokens + it is allowed to return. In context sensitive mode, this restrict + set guides the scanner. In context insensitive mode, there is no + restriction (the set is always the full set of tokens). + + """ + + def __init__(self, patterns, ignore, input): + """Initialize the scanner. + + @param patterns: [(terminal, uncompiled regex), ...] or C{None} + @param ignore: [terminal,...] + @param input: string + + If patterns is C{None}, we assume that the subclass has defined + C{self.patterns} : [(terminal, compiled regex), ...]. Note that the + patterns parameter expects uncompiled regexes, whereas the + C{self.patterns} field expects compiled regexes. + """ + self.tokens = [] # [(begin char pos, end char pos, token name, matched text), ...] + self.restrictions = [] + self.input = input + self.pos = 0 + self.ignore = ignore + self.first_line_number = 1 + + if patterns is not None: + # Compile the regex strings into regex objects + self.patterns = [] + for terminal, regex in patterns: + self.patterns.append( (terminal, re.compile(regex)) ) + + def get_token_pos(self): + """Get the current token position in the input text.""" + return len(self.tokens) + + def get_char_pos(self): + """Get the current char position in the input text.""" + return self.pos + + def get_prev_char_pos(self, i=None): + """Get the previous position (one token back) in the input text.""" + if self.pos == 0: return 0 + if i is None: i = -1 + return self.tokens[i][0] + + def get_line_number(self): + """Get the line number of the current position in the input text.""" + # TODO: make this work at any token/char position + return self.first_line_number + self.get_input_scanned().count('\n') + + def get_column_number(self): + """Get the column number of the current position in the input text.""" + s = self.get_input_scanned() + i = s.rfind('\n') # may be -1, but that's okay in this case + return len(s) - (i+1) + + def get_input_scanned(self): + """Get the portion of the input that has been tokenized.""" + return self.input[:self.pos] + + def get_input_unscanned(self): + """Get the portion of the input that has not yet been tokenized.""" + return self.input[self.pos:] + + def token(self, i, restrict=None): + """Get the i'th token in the input. + + If C{i} is one past the end, then scan for another token. + + @param i: token index + + @param restrict: [token, ...] or C{None}; if restrict is + C{None}, then any token is allowed. You may call + token(i) more than once. However, the restrict set + may never be larger than what was passed in on the + first call to token(i). + """ + if i == len(self.tokens): + self.scan(restrict) + if i < len(self.tokens): + # Make sure the restriction is more restricted. This + # invariant is needed to avoid ruining tokenization at + # position i+1 and higher. + if restrict and self.restrictions[i]: + for r in restrict: + if r not in self.restrictions[i]: + raise NotImplementedError("Unimplemented: restriction set changed") + return self.tokens[i] + raise NoMoreTokens() + + def __repr__(self): + """Print the last 10 tokens that have been scanned in""" + output = '' + for t in self.tokens[-10:]: + output = '%s\n (@%s) %s = %s' % (output,t[0],t[2],repr(t[3])) + return output + + def scan(self, restrict): + """Should scan another token and add it to the list, self.tokens, + and add the restriction to self.restrictions""" + # Keep looking for a token, ignoring any in self.ignore + while 1: + # Search the patterns for the longest match, with earlier + # tokens in the list having preference + best_match = -1 + best_pat = '(error)' + for p, regexp in self.patterns: + # First check to see if we're ignoring this token + if restrict and p not in restrict and p not in self.ignore: + continue + m = regexp.match(self.input, self.pos) + if m and len(m.group(0)) > best_match: + # We got a match that's better than the previous one + best_pat = p + best_match = len(m.group(0)) + + # If we didn't find anything, raise an error + if best_pat == '(error)' and best_match < 0: + msg = 'Bad Token' + if restrict: + msg = 'Trying to find one of '+', '.join(restrict) + raise SyntaxError(self.pos, msg) + + # If we found something that isn't to be ignored, return it + if best_pat not in self.ignore: + # Create a token with this data + token = (self.pos, self.pos+best_match, best_pat, + self.input[self.pos:self.pos+best_match]) + self.pos = self.pos + best_match + # Only add this token if it's not in the list + # (to prevent looping) + if not self.tokens or token != self.tokens[-1]: + self.tokens.append(token) + self.restrictions.append(restrict) + return + else: + # This token should be ignored .. + self.pos = self.pos + best_match + +class Parser: + """Base class for Yapps-generated parsers. + + """ + + def __init__(self, scanner): + self._scanner = scanner + self._pos = 0 + + def _peek(self, *types): + """Returns the token type for lookahead; if there are any args + then the list of args is the set of token types to allow""" + tok = self._scanner.token(self._pos, types) + return tok[2] + + def _scan(self, type): + """Returns the matched text, and moves to the next token""" + tok = self._scanner.token(self._pos, [type]) + if tok[2] != type: + raise SyntaxError(tok[0], 'Trying to find '+type+' :'+ ' ,'.join(self._scanner.restrictions[self._pos])) + self._pos = 1 + self._pos + return tok[3] + +class Context: + """Class to represent the parser's call stack. + + Every rule creates a Context that links to its parent rule. The + contexts can be used for debugging. + + """ + + def __init__(self, parent, scanner, tokenpos, rule, args=()): + """Create a new context. + + @param parent: Context object or C{None} + @param scanner: Scanner object + @param tokenpos: scanner token position + @type tokenpos: L{int} + @param rule: name of the rule + @type rule: L{str} + @param args: tuple listing parameters to the rule + + """ + self.parent = parent + self.scanner = scanner + self.tokenpos = tokenpos + self.rule = rule + self.args = args + + def __str__(self): + output = '' + if self.parent: output = str(self.parent) + ' > ' + output += self.rule + return output + +def print_line_with_pointer(text, p): + """Print the line of 'text' that includes position 'p', + along with a second line with a single caret (^) at position p""" + + # TODO: separate out the logic for determining the line/character + # location from the logic for determining how to display an + # 80-column line to stderr. + + # Now try printing part of the line + text = text[max(p-80, 0):p+80] + p = p - max(p-80, 0) + + # Strip to the left + i = text[:p].rfind('\n') + j = text[:p].rfind('\r') + if i < 0 or (0 <= j < i): i = j + if 0 <= i < p: + p = p - i - 1 + text = text[i+1:] + + # Strip to the right + i = text.find('\n', p) + j = text.find('\r', p) + if i < 0 or (0 <= j < i): i = j + if i >= 0: + text = text[:i] + + # Now shorten the text + while len(text) > 70 and p > 60: + # Cut off 10 chars + text = "..." + text[10:] + p = p - 7 + + # Now print the string, along with an indicator + print >>sys.stderr, '> ',text + print >>sys.stderr, '> ',' '*p + '^' + +def print_error(input, err, scanner): + """Print error messages, the parser stack, and the input text -- for human-readable error messages.""" + # NOTE: this function assumes 80 columns :-( + # Figure out the line number + line_number = scanner.get_line_number() + column_number = scanner.get_column_number() + print >>sys.stderr, '%d:%d: %s' % (line_number, column_number, err.msg) + + context = err.context + if not context: + print_line_with_pointer(input, err.charpos) + + while context: + # TODO: add line number + print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args)) + print_line_with_pointer(input, context.scanner.get_prev_char_pos(context.tokenpos)) + context = context.parent + +def wrap_error_reporter(parser, rule): + try: + return getattr(parser, rule)() + except SyntaxError, e: + input = parser._scanner.input + print_error(input, e, parser._scanner) + except NoMoreTokens: + print >>sys.stderr, 'Could not complete parsing; stopped around here:' + print >>sys.stderr, parser._scanner + + +from twisted.words.xish.xpath import AttribValue, BooleanValue, CompareValue +from twisted.words.xish.xpath import Function, IndexValue, LiteralValue +from twisted.words.xish.xpath import _AnyLocation, _Location + + +# Begin -- grammar generated by Yapps +import sys, re + +class XPathParserScanner(Scanner): + patterns = [ + ('","', re.compile(',')), + ('"@"', re.compile('@')), + ('"\\)"', re.compile('\\)')), + ('"\\("', re.compile('\\(')), + ('"\\]"', re.compile('\\]')), + ('"\\["', re.compile('\\[')), + ('"//"', re.compile('//')), + ('"/"', re.compile('/')), + ('\\s+', re.compile('\\s+')), + ('INDEX', re.compile('[0-9]+')), + ('WILDCARD', re.compile('\\*')), + ('IDENTIFIER', re.compile('[a-zA-Z][a-zA-Z0-9_\\-]*')), + ('ATTRIBUTE', re.compile('\\@[a-zA-Z][a-zA-Z0-9_\\-]*')), + ('FUNCNAME', re.compile('[a-zA-Z][a-zA-Z0-9_]*')), + ('CMP_EQ', re.compile('\\=')), + ('CMP_NE', re.compile('\\!\\=')), + ('STR_DQ', re.compile('"([^"]|(\\"))*?"')), + ('STR_SQ', re.compile("'([^']|(\\'))*?'")), + ('OP_AND', re.compile('and')), + ('OP_OR', re.compile('or')), + ('END', re.compile('$')), + ] + def __init__(self, str): + Scanner.__init__(self,None,['\\s+'],str) + +class XPathParser(Parser): + Context = Context + def XPATH(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'XPATH', []) + PATH = self.PATH(_context) + result = PATH; current = result + while self._peek('END', '"/"', '"//"') != 'END': + PATH = self.PATH(_context) + current.childLocation = PATH; current = current.childLocation + if self._peek() not in ['END', '"/"', '"//"']: + raise SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['END', '"/"', '"//"'])) + END = self._scan('END') + return result + + def PATH(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'PATH', []) + _token = self._peek('"/"', '"//"') + if _token == '"/"': + self._scan('"/"') + result = _Location() + else: # == '"//"' + self._scan('"//"') + result = _AnyLocation() + _token = self._peek('IDENTIFIER', 'WILDCARD') + if _token == 'IDENTIFIER': + IDENTIFIER = self._scan('IDENTIFIER') + result.elementName = IDENTIFIER + else: # == 'WILDCARD' + WILDCARD = self._scan('WILDCARD') + result.elementName = None + while self._peek('"\\["', 'END', '"/"', '"//"') == '"\\["': + self._scan('"\\["') + PREDICATE = self.PREDICATE(_context) + result.predicates.append(PREDICATE) + self._scan('"\\]"') + if self._peek() not in ['"\\["', 'END', '"/"', '"//"']: + raise SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['"\\["', 'END', '"/"', '"//"'])) + return result + + def PREDICATE(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'PREDICATE', []) + _token = self._peek('INDEX', '"\\("', '"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ') + if _token != 'INDEX': + EXPR = self.EXPR(_context) + return EXPR + else: # == 'INDEX' + INDEX = self._scan('INDEX') + return IndexValue(INDEX) + + def EXPR(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'EXPR', []) + FACTOR = self.FACTOR(_context) + e = FACTOR + while self._peek('OP_AND', 'OP_OR', '"\\)"', '"\\]"') in ['OP_AND', 'OP_OR']: + BOOLOP = self.BOOLOP(_context) + FACTOR = self.FACTOR(_context) + e = BooleanValue(e, BOOLOP, FACTOR) + if self._peek() not in ['OP_AND', 'OP_OR', '"\\)"', '"\\]"']: + raise SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['OP_AND', 'OP_OR', '"\\)"', '"\\]"'])) + return e + + def BOOLOP(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'BOOLOP', []) + _token = self._peek('OP_AND', 'OP_OR') + if _token == 'OP_AND': + OP_AND = self._scan('OP_AND') + return OP_AND + else: # == 'OP_OR' + OP_OR = self._scan('OP_OR') + return OP_OR + + def FACTOR(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'FACTOR', []) + _token = self._peek('"\\("', '"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ') + if _token != '"\\("': + TERM = self.TERM(_context) + return TERM + else: # == '"\\("' + self._scan('"\\("') + EXPR = self.EXPR(_context) + self._scan('"\\)"') + return EXPR + + def TERM(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'TERM', []) + VALUE = self.VALUE(_context) + t = VALUE + if self._peek('CMP_EQ', 'CMP_NE', 'OP_AND', 'OP_OR', '"\\)"', '"\\]"') in ['CMP_EQ', 'CMP_NE']: + CMP = self.CMP(_context) + VALUE = self.VALUE(_context) + t = CompareValue(t, CMP, VALUE) + return t + + def VALUE(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'VALUE', []) + _token = self._peek('"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ') + if _token == '"@"': + self._scan('"@"') + IDENTIFIER = self._scan('IDENTIFIER') + return AttribValue(IDENTIFIER) + elif _token == 'FUNCNAME': + FUNCNAME = self._scan('FUNCNAME') + f = Function(FUNCNAME); args = [] + self._scan('"\\("') + if self._peek('"\\)"', '"@"', 'FUNCNAME', '","', 'STR_DQ', 'STR_SQ') not in ['"\\)"', '","']: + VALUE = self.VALUE(_context) + args.append(VALUE) + while self._peek('","', '"\\)"') == '","': + self._scan('","') + VALUE = self.VALUE(_context) + args.append(VALUE) + if self._peek() not in ['","', '"\\)"']: + raise SyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['","', '"\\)"'])) + self._scan('"\\)"') + f.setParams(*args); return f + else: # in ['STR_DQ', 'STR_SQ'] + STR = self.STR(_context) + return LiteralValue(STR[1:len(STR)-1]) + + def CMP(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'CMP', []) + _token = self._peek('CMP_EQ', 'CMP_NE') + if _token == 'CMP_EQ': + CMP_EQ = self._scan('CMP_EQ') + return CMP_EQ + else: # == 'CMP_NE' + CMP_NE = self._scan('CMP_NE') + return CMP_NE + + def STR(self, _parent=None): + _context = self.Context(_parent, self._scanner, self._pos, 'STR', []) + _token = self._peek('STR_DQ', 'STR_SQ') + if _token == 'STR_DQ': + STR_DQ = self._scan('STR_DQ') + return STR_DQ + else: # == 'STR_SQ' + STR_SQ = self._scan('STR_SQ') + return STR_SQ + + +def parse(rule, text): + P = XPathParser(XPathParserScanner(text)) + return wrap_error_reporter(P, rule) + +if __name__ == '__main__': + from sys import argv, stdin + if len(argv) >= 2: + if len(argv) >= 3: + f = open(argv[2],'r') + else: + f = stdin + print parse(argv[1], f.read()) + else: print >>sys.stderr, 'Args: []' +# End -- grammar generated by Yapps diff --git a/vendor/Twisted-10.0.0/twisted/words/xmpproutertap.py b/vendor/Twisted-10.0.0/twisted/words/xmpproutertap.py new file mode 100644 index 000000000000..2c66c2a9f001 --- /dev/null +++ b/vendor/Twisted-10.0.0/twisted/words/xmpproutertap.py @@ -0,0 +1,30 @@ +# -*- test-case-name: twisted.words.test.test_xmpproutertap -*- +# +# Copyright (c) 2001-2008 Twisted Matrix Laboratories. +# See LICENSE for details. + +from twisted.application import strports +from twisted.python import usage +from twisted.words.protocols.jabber import component + +class Options(usage.Options): + optParameters = [ + ('port', None, 'tcp:5347:interface=127.0.0.1', + 'Port components connect to'), + ('secret', None, 'secret', 'Router secret'), + ] + + optFlags = [ + ('verbose', 'v', 'Log traffic'), + ] + + + +def makeService(config): + router = component.Router() + factory = component.XMPPComponentServerFactory(router, config['secret']) + + if config['verbose']: + factory.logTraffic = True + + return strports.service(config['port'], factory) diff --git a/vendor/amqplib/__init__.py b/vendor/amqplib/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/vendor/amqplib/__init__.py @@ -0,0 +1 @@ + diff --git a/vendor/amqplib/client_0_8/__init__.py b/vendor/amqplib/client_0_8/__init__.py new file mode 100644 index 000000000000..4306dea326a4 --- /dev/null +++ b/vendor/amqplib/client_0_8/__init__.py @@ -0,0 +1,35 @@ +""" +AMQP Client implementing the 0-8 spec. + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +# +# Pull in the public items from the various sub-modules +# +from basic_message import * +from connection import * +from exceptions import * + +__all__ = [ + 'Connection', + 'Channel', # here mainly so it shows in in pydoc + 'Message', + 'AMQPException', + 'AMQPConnectionException', + 'AMQPChannelException', + ] diff --git a/vendor/amqplib/client_0_8/abstract_channel.py b/vendor/amqplib/client_0_8/abstract_channel.py new file mode 100644 index 000000000000..74c8b3fa1d8f --- /dev/null +++ b/vendor/amqplib/client_0_8/abstract_channel.py @@ -0,0 +1,114 @@ +""" +Code common to Connection and Channel objects. + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +from serialization import AMQPWriter + +__all__ = [ + 'AbstractChannel', + ] + + +class AbstractChannel(object): + """ + Superclass for both the Connection, which is treated + as channel 0, and other user-created Channel objects. + + The subclasses must have a _METHOD_MAP class property, mapping + between AMQP method signatures and Python methods. + + """ + def __init__(self, connection, channel_id): + self.connection = connection + self.channel_id = channel_id + connection.channels[channel_id] = self + self.method_queue = [] # Higher level queue for methods + self.auto_decode = False + + + def __enter__(self): + """ + Support for Python >= 2.5 'with' statements. + + """ + return self + + + def __exit__(self, type, value, traceback): + """ + Support for Python >= 2.5 'with' statements. + + """ + self.close() + + + def _send_method(self, method_sig, args='', content=None): + """ + Send a method for our channel. + + """ + if isinstance(args, AMQPWriter): + args = args.getvalue() + + self.connection.method_writer.write_method(self.channel_id, + method_sig, args, content) + + + def close(self): + """ + Close this Channel or Connection + + """ + raise NotImplementedError('Must be overriden in subclass') + + + + def wait(self, allowed_methods=None): + """ + Wait for a method that matches our allowed_methods parameter (the + default value of None means match any method), and dispatch to it. + + """ + method_sig, args, content = self.connection._wait_method( + self.channel_id, allowed_methods) + + if content \ + and self.auto_decode \ + and hasattr(content, 'content_encoding'): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + amqp_method = self._METHOD_MAP.get(method_sig, None) + + if amqp_method is None: + raise Exception('Unknown AMQP method (%d, %d)' % method_sig) + + if content is None: + return amqp_method(self, args) + else: + return amqp_method(self, args, content) + + + # + # Placeholder, the concrete implementations will have to + # supply their own versions of _METHOD_MAP + # + _METHOD_MAP = {} diff --git a/vendor/amqplib/client_0_8/basic_message.py b/vendor/amqplib/client_0_8/basic_message.py new file mode 100644 index 000000000000..c970634e6329 --- /dev/null +++ b/vendor/amqplib/client_0_8/basic_message.py @@ -0,0 +1,137 @@ +""" +Messages for AMQP + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + + +from serialization import GenericContent + +__all__ = [ + 'Message', + ] + + +class Message(GenericContent): + """ + A Message for use with the Channnel.basic_* methods. + + """ + # + # Instances of this class have these attributes, which + # are passed back and forth as message properties between + # client and server + # + PROPERTIES = [ + ('content_type', 'shortstr'), + ('content_encoding', 'shortstr'), + ('application_headers', 'table'), + ('delivery_mode', 'octet'), + ('priority', 'octet'), + ('correlation_id', 'shortstr'), + ('reply_to', 'shortstr'), + ('expiration', 'shortstr'), + ('message_id', 'shortstr'), + ('timestamp', 'timestamp'), + ('type', 'shortstr'), + ('user_id', 'shortstr'), + ('app_id', 'shortstr'), + ('cluster_id', 'shortstr') + ] + + def __init__(self, body='', children=None, **properties): + """ + Expected arg types + + body: string + children: (not supported) + + Keyword properties may include: + + content_type: shortstr + MIME content type + + content_encoding: shortstr + MIME content encoding + + application_headers: table + Message header field table, a dict with string keys, + and string | int | Decimal | datetime | dict values. + + delivery_mode: octet + Non-persistent (1) or persistent (2) + + priority: octet + The message priority, 0 to 9 + + correlation_id: shortstr + The application correlation identifier + + reply_to: shortstr + The destination to reply to + + expiration: shortstr + Message expiration specification + + message_id: shortstr + The application message identifier + + timestamp: datetime.datetime + The message timestamp + + type: shortstr + The message type name + + user_id: shortstr + The creating user id + + app_id: shortstr + The creating application id + + cluster_id: shortstr + Intra-cluster routing identifier + + Unicode bodies are encoded according to the 'content_encoding' + argument. If that's None, it's set to 'UTF-8' automatically. + + example: + + msg = Message('hello world', + content_type='text/plain', + application_headers={'foo': 7}) + + """ + if isinstance(body, unicode): + if properties.get('content_encoding', None) is None: + properties['content_encoding'] = 'UTF-8' + self.body = body.encode(properties['content_encoding']) + else: + self.body = body + + super(Message, self).__init__(**properties) + + + def __eq__(self, other): + """ + Check if the properties and bodies of this Message and another + Message are the same. + + Received messages may contain a 'delivery_info' attribute, + which isn't compared. + + """ + return super(Message, self).__eq__(other) and (self.body == other.body) diff --git a/vendor/amqplib/client_0_8/channel.py b/vendor/amqplib/client_0_8/channel.py new file mode 100644 index 000000000000..8de0b220ace0 --- /dev/null +++ b/vendor/amqplib/client_0_8/channel.py @@ -0,0 +1,2602 @@ +""" +AMQP 0-8 Channels + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +import logging +from Queue import Queue + +from abstract_channel import AbstractChannel +from exceptions import * +from serialization import AMQPWriter + +__all__ = [ + 'Channel', # here mainly so it shows in in pydoc + ] + +AMQP_LOGGER = logging.getLogger('amqplib') + + +class Channel(AbstractChannel): + """ + work with channels + + The channel class provides methods for a client to establish a + virtual connection - a channel - to a server and for both peers to + operate the virtual connection thereafter. + + GRAMMAR: + + channel = open-channel *use-channel close-channel + open-channel = C:OPEN S:OPEN-OK + use-channel = C:FLOW S:FLOW-OK + / S:FLOW C:FLOW-OK + / S:ALERT + / functional-class + close-channel = C:CLOSE S:CLOSE-OK + / S:CLOSE C:CLOSE-OK + + """ + def __init__(self, connection, channel_id=None, auto_decode=True): + """ + Create a channel bound to a connection and using the specified + numeric channel_id, and open on the server. + + The 'auto_decode' parameter (defaults to True), indicates + whether the library should attempt to decode the body + of Messages to a Unicode string if there's a 'content_encoding' + property for the message. If there's no 'content_encoding' + property, or the decode raises an Exception, the plain string + is left as the message body. + + """ + if channel_id is None: + channel_id = connection._get_free_channel_id() + AMQP_LOGGER.debug('using channel_id: %d' % channel_id) + + super(Channel, self).__init__(connection, channel_id) + + self.default_ticket = 0 + self.is_open = False + self.active = True # Flow control + self.alerts = Queue() + self.returned_messages = Queue() + self.callbacks = {} + self.auto_decode = auto_decode + + self._x_open() + + + def _do_close(self): + """ + Tear down this object, after we've agreed to close with the server. + + """ + AMQP_LOGGER.debug('Closed channel #%d' % self.channel_id) + self.is_open = False + del self.connection.channels[self.channel_id] + self.channel_id = self.connection = None + self.callbacks = {} + + + ################# + + def _alert(self, args): + """ + This method allows the server to send a non-fatal warning to + the client. This is used for methods that are normally + asynchronous and thus do not have confirmations, and for which + the server may detect errors that need to be reported. Fatal + errors are handled as channel or connection exceptions; non- + fatal errors are sent through this method. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + details: table + + detailed information for warning + + A set of fields that provide more information about + the problem. The meaning of these fields are defined + on a per-reply-code basis (TO BE DEFINED). + + """ + reply_code = args.read_short() + reply_text = args.read_shortstr() + details = args.read_table() + + self.alerts.put((reply_code, reply_text, details)) + + + def close(self, reply_code=0, reply_text='', method_sig=(0, 0)): + """ + request a channel close + + This method indicates that the sender wants to close the + channel. This may be due to internal conditions (e.g. a forced + shut-down) or due to an error handling a specific method, i.e. + an exception. When a close is due to an exception, the sender + provides the class and method id of the method which caused + the exception. + + RULE: + + After sending this method any received method except + Channel.Close-OK MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with Channel.Close-OK.. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + if not self.is_open: + # already closed + return + + args = AMQPWriter() + args.write_short(reply_code) + args.write_shortstr(reply_text) + args.write_short(method_sig[0]) # class_id + args.write_short(method_sig[1]) # method_id + self._send_method((20, 40), args) + return self.wait(allowed_methods=[ + (20, 41), # Channel.close_ok + ]) + + + def _close(self, args): + """ + request a channel close + + This method indicates that the sender wants to close the + channel. This may be due to internal conditions (e.g. a forced + shut-down) or due to an error handling a specific method, i.e. + an exception. When a close is due to an exception, the sender + provides the class and method id of the method which caused + the exception. + + RULE: + + After sending this method any received method except + Channel.Close-OK MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with Channel.Close-OK.. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + reply_code = args.read_short() + reply_text = args.read_shortstr() + class_id = args.read_short() + method_id = args.read_short() + +# self.close_ok() + + +# def close_ok(self): +# """ +# confirm a channel close +# +# This method confirms a Channel.Close method and tells the +# recipient that it is safe to release resources for the channel +# and close the socket. +# +# RULE: +# +# A peer that detects a socket closure without having +# received a Channel.Close-Ok handshake method SHOULD log +# the error. +# +# """ + self._send_method((20, 41)) + self._do_close() + + raise AMQPChannelException(reply_code, reply_text, + (class_id, method_id)) + + + def _close_ok(self, args): + """ + confirm a channel close + + This method confirms a Channel.Close method and tells the + recipient that it is safe to release resources for the channel + and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Channel.Close-Ok handshake method SHOULD log + the error. + + """ + self._do_close() + + + def flow(self, active): + """ + enable/disable flow from peer + + This method asks the peer to pause or restart the flow of + content data. This is a simple flow-control mechanism that a + peer can use to avoid oveflowing its queues or otherwise + finding itself receiving more messages than it can process. + Note that this method is not intended for window control. The + peer that receives a request to stop sending content should + finish sending the current content, if any, and then wait + until it receives a Flow restart method. + + RULE: + + When a new channel is opened, it is active. Some + applications assume that channels are inactive until + started. To emulate this behaviour a client MAY open the + channel, then pause it. + + RULE: + + When sending content data in multiple frames, a peer + SHOULD monitor the channel for incoming methods and + respond to a Channel.Flow as rapidly as possible. + + RULE: + + A peer MAY use the Channel.Flow method to throttle + incoming content data for internal reasons, for example, + when exchangeing data over a slower connection. + + RULE: + + The peer that requests a Channel.Flow method MAY + disconnect and/or ban a peer that does not respect the + request. + + PARAMETERS: + active: boolean + + start/stop content frames + + If True, the peer starts sending content frames. If + False, the peer stops sending content frames. + + """ + args = AMQPWriter() + args.write_bit(active) + self._send_method((20, 20), args) + return self.wait(allowed_methods=[ + (20, 21), # Channel.flow_ok + ]) + + + def _flow(self, args): + """ + enable/disable flow from peer + + This method asks the peer to pause or restart the flow of + content data. This is a simple flow-control mechanism that a + peer can use to avoid oveflowing its queues or otherwise + finding itself receiving more messages than it can process. + Note that this method is not intended for window control. The + peer that receives a request to stop sending content should + finish sending the current content, if any, and then wait + until it receives a Flow restart method. + + RULE: + + When a new channel is opened, it is active. Some + applications assume that channels are inactive until + started. To emulate this behaviour a client MAY open the + channel, then pause it. + + RULE: + + When sending content data in multiple frames, a peer + SHOULD monitor the channel for incoming methods and + respond to a Channel.Flow as rapidly as possible. + + RULE: + + A peer MAY use the Channel.Flow method to throttle + incoming content data for internal reasons, for example, + when exchangeing data over a slower connection. + + RULE: + + The peer that requests a Channel.Flow method MAY + disconnect and/or ban a peer that does not respect the + request. + + PARAMETERS: + active: boolean + + start/stop content frames + + If True, the peer starts sending content frames. If + False, the peer stops sending content frames. + + """ + self.active = args.read_bit() + + self._x_flow_ok(self.active) + + + def _x_flow_ok(self, active): + """ + confirm a flow method + + Confirms to the peer that a flow command was received and + processed. + + PARAMETERS: + active: boolean + + current flow setting + + Confirms the setting of the processed flow method: + True means the peer will start sending or continue + to send content frames; False means it will not. + + """ + args = AMQPWriter() + args.write_bit(active) + self._send_method((20, 21), args) + + + def _flow_ok(self, args): + """ + confirm a flow method + + Confirms to the peer that a flow command was received and + processed. + + PARAMETERS: + active: boolean + + current flow setting + + Confirms the setting of the processed flow method: + True means the peer will start sending or continue + to send content frames; False means it will not. + + """ + return args.read_bit() + + + def _x_open(self, out_of_band=''): + """ + open a channel for use + + This method opens a virtual connection (a channel). + + RULE: + + This method MUST NOT be called when the channel is already + open. + + PARAMETERS: + out_of_band: shortstr + + out-of-band settings + + Configures out-of-band transfers on this channel. The + syntax and meaning of this field will be formally + defined at a later date. + + """ + if self.is_open: + return + + args = AMQPWriter() + args.write_shortstr(out_of_band) + self._send_method((20, 10), args) + return self.wait(allowed_methods=[ + (20, 11), # Channel.open_ok + ]) + + + def _open_ok(self, args): + """ + signal that the channel is ready + + This method signals to the client that the channel is ready + for use. + + """ + self.is_open = True + AMQP_LOGGER.debug('Channel open') + + + ############# + # + # Access + # + # + # work with access tickets + # + # The protocol control access to server resources using access + # tickets. A client must explicitly request access tickets before + # doing work. An access ticket grants a client the right to use a + # specific set of resources - called a "realm" - in specific ways. + # + # GRAMMAR: + # + # access = C:REQUEST S:REQUEST-OK + # + # + + def access_request(self, realm, exclusive=False, + passive=False, active=False, write=False, read=False): + """ + request an access ticket + + This method requests an access ticket for an access realm. The + server responds by granting the access ticket. If the client + does not have access rights to the requested realm this causes + a connection exception. Access tickets are a per-channel + resource. + + RULE: + + The realm name MUST start with either "/data" (for + application resources) or "/admin" (for server + administration resources). If the realm starts with any + other path, the server MUST raise a connection exception + with reply code 403 (access refused). + + RULE: + + The server MUST implement the /data realm and MAY + implement the /admin realm. The mapping of resources to + realms is not defined in the protocol - this is a server- + side configuration issue. + + PARAMETERS: + realm: shortstr + + name of requested realm + + RULE: + + If the specified realm is not known to the server, + the server must raise a channel exception with + reply code 402 (invalid path). + + exclusive: boolean + + request exclusive access + + Request exclusive access to the realm. If the server + cannot grant this - because there are other active + tickets for the realm - it raises a channel exception. + + passive: boolean + + request passive access + + Request message passive access to the specified access + realm. Passive access lets a client get information + about resources in the realm but not to make any + changes to them. + + active: boolean + + request active access + + Request message active access to the specified access + realm. Acvtive access lets a client get create and + delete resources in the realm. + + write: boolean + + request write access + + Request write access to the specified access realm. + Write access lets a client publish messages to all + exchanges in the realm. + + read: boolean + + request read access + + Request read access to the specified access realm. + Read access lets a client consume messages from queues + in the realm. + + The most recently requested ticket is used as the channel's + default ticket for any method that requires a ticket. + + """ + args = AMQPWriter() + args.write_shortstr(realm) + args.write_bit(exclusive) + args.write_bit(passive) + args.write_bit(active) + args.write_bit(write) + args.write_bit(read) + self._send_method((30, 10), args) + return self.wait(allowed_methods=[ + (30, 11), # Channel.access_request_ok + ]) + + + def _access_request_ok(self, args): + """ + grant access to server resources + + This method provides the client with an access ticket. The + access ticket is valid within the current channel and for the + lifespan of the channel. + + RULE: + + The client MUST NOT use access tickets except within the + same channel as originally granted. + + RULE: + + The server MUST isolate access tickets per channel and + treat an attempt by a client to mix these as a connection + exception. + + PARAMETERS: + ticket: short + + """ + self.default_ticket = args.read_short() + return self.default_ticket + + + ############# + # + # Exchange + # + # + # work with exchanges + # + # Exchanges match and distribute messages across queues. + # Exchanges can be configured in the server or created at runtime. + # + # GRAMMAR: + # + # exchange = C:DECLARE S:DECLARE-OK + # / C:DELETE S:DELETE-OK + # + # RULE: + # + # The server MUST implement the direct and fanout exchange + # types, and predeclare the corresponding exchanges named + # amq.direct and amq.fanout in each virtual host. The server + # MUST also predeclare a direct exchange to act as the default + # exchange for content Publish methods and for default queue + # bindings. + # + # RULE: + # + # The server SHOULD implement the topic exchange type, and + # predeclare the corresponding exchange named amq.topic in + # each virtual host. + # + # RULE: + # + # The server MAY implement the system exchange type, and + # predeclare the corresponding exchanges named amq.system in + # each virtual host. If the client attempts to bind a queue to + # the system exchange, the server MUST raise a connection + # exception with reply code 507 (not allowed). + # + # RULE: + # + # The default exchange MUST be defined as internal, and be + # inaccessible to the client except by specifying an empty + # exchange name in a content Publish method. That is, the + # server MUST NOT let clients make explicit bindings to this + # exchange. + # + # + + def exchange_declare(self, exchange, type, passive=False, durable=False, + auto_delete=True, internal=False, nowait=False, + arguments=None, ticket=None): + """ + declare exchange, create if needed + + This method creates an exchange if it does not already exist, + and if the exchange exists, verifies that it is of the correct + and expected class. + + RULE: + + The server SHOULD support a minimum of 16 exchanges per + virtual host and ideally, impose no limit except as + defined by available resources. + + PARAMETERS: + exchange: shortstr + + RULE: + + Exchange names starting with "amq." are reserved + for predeclared and standardised exchanges. If + the client attempts to create an exchange starting + with "amq.", the server MUST raise a channel + exception with reply code 403 (access refused). + + type: shortstr + + exchange type + + Each exchange belongs to one of a set of exchange + types implemented by the server. The exchange types + define the functionality of the exchange - i.e. how + messages are routed through it. It is not valid or + meaningful to attempt to change the type of an + existing exchange. + + RULE: + + If the exchange already exists with a different + type, the server MUST raise a connection exception + with a reply code 507 (not allowed). + + RULE: + + If the server does not support the requested + exchange type it MUST raise a connection exception + with a reply code 503 (command invalid). + + passive: boolean + + do not create exchange + + If set, the server will not create the exchange. The + client can use this to check whether an exchange + exists without modifying the server state. + + RULE: + + If set, and the exchange does not already exist, + the server MUST raise a channel exception with + reply code 404 (not found). + + durable: boolean + + request a durable exchange + + If set when creating a new exchange, the exchange will + be marked as durable. Durable exchanges remain active + when a server restarts. Non-durable exchanges + (transient exchanges) are purged if/when a server + restarts. + + RULE: + + The server MUST support both durable and transient + exchanges. + + RULE: + + The server MUST ignore the durable field if the + exchange already exists. + + auto_delete: boolean + + auto-delete when unused + + If set, the exchange is deleted when all queues have + finished using it. + + RULE: + + The server SHOULD allow for a reasonable delay + between the point when it determines that an + exchange is not being used (or no longer used), + and the point when it deletes the exchange. At + the least it must allow a client to create an + exchange and then bind a queue to it, with a small + but non-zero delay between these two actions. + + RULE: + + The server MUST ignore the auto-delete field if + the exchange already exists. + + internal: boolean + + create internal exchange + + If set, the exchange may not be used directly by + publishers, but only when bound to other exchanges. + Internal exchanges are used to construct wiring that + is not visible to applications. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for declaration + + A set of arguments for the declaration. The syntax and + semantics of these arguments depends on the server + implementation. This field is ignored if passive is + True. + + ticket: short + + When a client defines a new exchange, this belongs to + the access realm of the ticket used. All further work + done with that exchange must be done with an access + ticket for the same realm. + + RULE: + + The client MUST provide a valid access ticket + giving "active" access to the realm in which the + exchange exists or will be created, or "passive" + access if the if-exists flag is set. + + """ + if arguments is None: + arguments = {} + + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(exchange) + args.write_shortstr(type) + args.write_bit(passive) + args.write_bit(durable) + args.write_bit(auto_delete) + args.write_bit(internal) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((40, 10), args) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 11), # Channel.exchange_declare_ok + ]) + + + def _exchange_declare_ok(self, args): + """ + confirms an exchange declaration + + This method confirms a Declare method and confirms the name of + the exchange, essential for automatically-named exchanges. + + """ + pass + + + def exchange_delete(self, exchange, if_unused=False, + nowait=False, ticket=None): + """ + delete an exchange + + This method deletes an exchange. When an exchange is deleted + all queue bindings on the exchange are cancelled. + + PARAMETERS: + exchange: shortstr + + RULE: + + The exchange MUST exist. Attempting to delete a + non-existing exchange causes a channel exception. + + if_unused: boolean + + delete only if unused + + If set, the server will only delete the exchange if it + has no queue bindings. If the exchange has queue + bindings the server does not delete it but raises a + channel exception instead. + + RULE: + + If set, the server SHOULD delete the exchange but + only if it has no queue bindings. + + RULE: + + If set, the server SHOULD raise a channel + exception if the exchange is in use. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + ticket: short + + RULE: + + The client MUST provide a valid access ticket + giving "active" access rights to the exchange's + access realm. + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(exchange) + args.write_bit(if_unused) + args.write_bit(nowait) + self._send_method((40, 20), args) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 21), # Channel.exchange_delete_ok + ]) + + + def _exchange_delete_ok(self, args): + """ + confirm deletion of an exchange + + This method confirms the deletion of an exchange. + + """ + pass + + + ############# + # + # Queue + # + # + # work with queues + # + # Queues store and forward messages. Queues can be configured in + # the server or created at runtime. Queues must be attached to at + # least one exchange in order to receive messages from publishers. + # + # GRAMMAR: + # + # queue = C:DECLARE S:DECLARE-OK + # / C:BIND S:BIND-OK + # / C:PURGE S:PURGE-OK + # / C:DELETE S:DELETE-OK + # + # RULE: + # + # A server MUST allow any content class to be sent to any + # queue, in any mix, and queue and delivery these content + # classes independently. Note that all methods that fetch + # content off queues are specific to a given content class. + # + # + + def queue_bind(self, queue, exchange, routing_key='', + nowait=False, arguments=None, ticket=None): + """ + bind queue to an exchange + + This method binds a queue to an exchange. Until a queue is + bound it will not receive any messages. In a classic + messaging model, store-and-forward queues are bound to a dest + exchange and subscription queues are bound to a dest_wild + exchange. + + RULE: + + A server MUST allow ignore duplicate bindings - that is, + two or more bind methods for a specific queue, with + identical arguments - without treating these as an error. + + RULE: + + If a bind fails, the server MUST raise a connection + exception. + + RULE: + + The server MUST NOT allow a durable queue to bind to a + transient exchange. If the client attempts this the server + MUST raise a channel exception. + + RULE: + + Bindings for durable queues are automatically durable and + the server SHOULD restore such bindings after a server + restart. + + RULE: + + If the client attempts to an exchange that was declared as + internal, the server MUST raise a connection exception + with reply code 530 (not allowed). + + RULE: + + The server SHOULD support at least 4 bindings per queue, + and ideally, impose no limit except as defined by + available resources. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to bind. If the queue + name is empty, refers to the current queue for the + channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + If the queue does not exist the server MUST raise + a channel exception with reply code 404 (not + found). + + exchange: shortstr + + The name of the exchange to bind to. + + RULE: + + If the exchange does not exist the server MUST + raise a channel exception with reply code 404 (not + found). + + routing_key: shortstr + + message routing key + + Specifies the routing key for the binding. The + routing key is used for routing messages depending on + the exchange configuration. Not all exchanges use a + routing key - refer to the specific exchange + documentation. If the routing key is empty and the + queue name is empty, the routing key will be the + current queue for the channel, which is the last + declared queue. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for binding + + A set of arguments for the binding. The syntax and + semantics of these arguments depends on the exchange + class. + + ticket: short + + The client provides a valid access ticket giving + "active" access rights to the queue's access realm. + + """ + if arguments is None: + arguments = {} + + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(queue) + args.write_shortstr(exchange) + args.write_shortstr(routing_key) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((50, 20), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 21), # Channel.queue_bind_ok + ]) + + + def _queue_bind_ok(self, args): + """ + confirm bind successful + + This method confirms that the bind was successful. + + """ + pass + + + def queue_declare(self, queue='', passive=False, durable=False, + exclusive=False, auto_delete=True, nowait=False, + arguments=None, ticket=None): + """ + declare queue, create if needed + + This method creates or checks a queue. When creating a new + queue the client can specify various properties that control + the durability of the queue and its contents, and the level of + sharing for the queue. + + RULE: + + The server MUST create a default binding for a newly- + created queue to the default exchange, which is an + exchange of type 'direct'. + + RULE: + + The server SHOULD support a minimum of 256 queues per + virtual host and ideally, impose no limit except as + defined by available resources. + + PARAMETERS: + queue: shortstr + + RULE: + + The queue name MAY be empty, in which case the + server MUST create a new queue with a unique + generated name and return this to the client in + the Declare-Ok method. + + RULE: + + Queue names starting with "amq." are reserved for + predeclared and standardised server queues. If + the queue name starts with "amq." and the passive + option is False, the server MUST raise a connection + exception with reply code 403 (access refused). + + passive: boolean + + do not create queue + + If set, the server will not create the queue. The + client can use this to check whether a queue exists + without modifying the server state. + + RULE: + + If set, and the queue does not already exist, the + server MUST respond with a reply code 404 (not + found) and raise a channel exception. + + durable: boolean + + request a durable queue + + If set when creating a new queue, the queue will be + marked as durable. Durable queues remain active when + a server restarts. Non-durable queues (transient + queues) are purged if/when a server restarts. Note + that durable queues do not necessarily hold persistent + messages, although it does not make sense to send + persistent messages to a transient queue. + + RULE: + + The server MUST recreate the durable queue after a + restart. + + RULE: + + The server MUST support both durable and transient + queues. + + RULE: + + The server MUST ignore the durable field if the + queue already exists. + + exclusive: boolean + + request an exclusive queue + + Exclusive queues may only be consumed from by the + current connection. Setting the 'exclusive' flag + always implies 'auto-delete'. + + RULE: + + The server MUST support both exclusive (private) + and non-exclusive (shared) queues. + + RULE: + + The server MUST raise a channel exception if + 'exclusive' is specified and the queue already + exists and is owned by a different connection. + + auto_delete: boolean + + auto-delete queue when unused + + If set, the queue is deleted when all consumers have + finished using it. Last consumer can be cancelled + either explicitly or because its channel is closed. If + there was no consumer ever on the queue, it won't be + deleted. + + RULE: + + The server SHOULD allow for a reasonable delay + between the point when it determines that a queue + is not being used (or no longer used), and the + point when it deletes the queue. At the least it + must allow a client to create a queue and then + create a consumer to read from it, with a small + but non-zero delay between these two actions. The + server should equally allow for clients that may + be disconnected prematurely, and wish to re- + consume from the same queue without losing + messages. We would recommend a configurable + timeout, with a suitable default value being one + minute. + + RULE: + + The server MUST ignore the auto-delete field if + the queue already exists. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for declaration + + A set of arguments for the declaration. The syntax and + semantics of these arguments depends on the server + implementation. This field is ignored if passive is + True. + + ticket: short + + When a client defines a new queue, this belongs to the + access realm of the ticket used. All further work + done with that queue must be done with an access + ticket for the same realm. + + The client provides a valid access ticket giving + "active" access to the realm in which the queue exists + or will be created, or "passive" access if the if- + exists flag is set. + + Returns a tuple containing 3 items: + the name of the queue (essential for automatically-named queues) + message count + consumer count + + """ + if arguments is None: + arguments = {} + + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(queue) + args.write_bit(passive) + args.write_bit(durable) + args.write_bit(exclusive) + args.write_bit(auto_delete) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((50, 10), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 11), # Channel.queue_declare_ok + ]) + + + def _queue_declare_ok(self, args): + """ + confirms a queue definition + + This method confirms a Declare method and confirms the name of + the queue, essential for automatically-named queues. + + PARAMETERS: + queue: shortstr + + Reports the name of the queue. If the server generated + a queue name, this field contains that name. + + message_count: long + + number of messages in queue + + Reports the number of messages in the queue, which + will be zero for newly-created queues. + + consumer_count: long + + number of consumers + + Reports the number of active consumers for the queue. + Note that consumers can suspend activity + (Channel.Flow) in which case they do not appear in + this count. + + """ + queue = args.read_shortstr() + message_count = args.read_long() + consumer_count = args.read_long() + + return queue, message_count, consumer_count + + + def queue_delete(self, queue='', if_unused=False, if_empty=False, + nowait=False, ticket=None): + """ + delete a queue + + This method deletes a queue. When a queue is deleted any + pending messages are sent to a dead-letter queue if this is + defined in the server configuration, and all consumers on the + queue are cancelled. + + RULE: + + The server SHOULD use a dead-letter queue to hold messages + that were pending on a deleted queue, and MAY provide + facilities for a system administrator to move these + messages back to an active queue. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to delete. If the + queue name is empty, refers to the current queue for + the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + The queue must exist. Attempting to delete a non- + existing queue causes a channel exception. + + if_unused: boolean + + delete only if unused + + If set, the server will only delete the queue if it + has no consumers. If the queue has consumers the + server does does not delete it but raises a channel + exception instead. + + RULE: + + The server MUST respect the if-unused flag when + deleting a queue. + + if_empty: boolean + + delete only if empty + + If set, the server will only delete the queue if it + has no messages. If the queue is not empty the server + raises a channel exception. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + ticket: short + + The client provides a valid access ticket giving + "active" access rights to the queue's access realm. + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + + args.write_shortstr(queue) + args.write_bit(if_unused) + args.write_bit(if_empty) + args.write_bit(nowait) + self._send_method((50, 40), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 41), # Channel.queue_delete_ok + ]) + + + def _queue_delete_ok(self, args): + """ + confirm deletion of a queue + + This method confirms the deletion of a queue. + + PARAMETERS: + message_count: long + + number of messages purged + + Reports the number of messages purged. + + """ + return args.read_long() + + + def queue_purge(self, queue='', nowait=False, ticket=None): + """ + purge a queue + + This method removes all messages from a queue. It does not + cancel consumers. Purged messages are deleted without any + formal "undo" mechanism. + + RULE: + + A call to purge MUST result in an empty queue. + + RULE: + + On transacted channels the server MUST not purge messages + that have already been sent to a client but not yet + acknowledged. + + RULE: + + The server MAY implement a purge queue or log that allows + system administrators to recover accidentally-purged + messages. The server SHOULD NOT keep purged messages in + the same storage spaces as the live messages since the + volumes of purged messages may get very large. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to purge. If the + queue name is empty, refers to the current queue for + the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + The queue must exist. Attempting to purge a non- + existing queue causes a channel exception. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + ticket: short + + The access ticket must be for the access realm that + holds the queue. + + RULE: + + The client MUST provide a valid access ticket + giving "read" access rights to the queue's access + realm. Note that purging a queue is equivalent to + reading all messages and discarding them. + + if nowait is False, returns a message_count + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(queue) + args.write_bit(nowait) + self._send_method((50, 30), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 31), # Channel.queue_purge_ok + ]) + + + def _queue_purge_ok(self, args): + """ + confirms a queue purge + + This method confirms the purge of a queue. + + PARAMETERS: + message_count: long + + number of messages purged + + Reports the number of messages purged. + + """ + return args.read_long() + + + ############# + # + # Basic + # + # + # work with basic content + # + # The Basic class provides methods that support an industry- + # standard messaging model. + # + # GRAMMAR: + # + # basic = C:QOS S:QOS-OK + # / C:CONSUME S:CONSUME-OK + # / C:CANCEL S:CANCEL-OK + # / C:PUBLISH content + # / S:RETURN content + # / S:DELIVER content + # / C:GET ( S:GET-OK content / S:GET-EMPTY ) + # / C:ACK + # / C:REJECT + # + # RULE: + # + # The server SHOULD respect the persistent property of basic + # messages and SHOULD make a best-effort to hold persistent + # basic messages on a reliable storage mechanism. + # + # RULE: + # + # The server MUST NOT discard a persistent basic message in + # case of a queue overflow. The server MAY use the + # Channel.Flow method to slow or stop a basic message + # publisher when necessary. + # + # RULE: + # + # The server MAY overflow non-persistent basic messages to + # persistent storage and MAY discard or dead-letter non- + # persistent basic messages on a priority basis if the queue + # size exceeds some configured limit. + # + # RULE: + # + # The server MUST implement at least 2 priority levels for + # basic messages, where priorities 0-4 and 5-9 are treated as + # two distinct levels. The server MAY implement up to 10 + # priority levels. + # + # RULE: + # + # The server MUST deliver messages of the same priority in + # order irrespective of their individual persistence. + # + # RULE: + # + # The server MUST support both automatic and explicit + # acknowledgements on Basic content. + # + + def basic_ack(self, delivery_tag, multiple=False): + """ + acknowledge one or more messages + + This method acknowledges one or more messages delivered via + the Deliver or Get-Ok methods. The client can ask to confirm + a single message or a set of messages up to and including a + specific message. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + multiple: boolean + + acknowledge multiple messages + + If set to True, the delivery tag is treated as "up to + and including", so that the client can acknowledge + multiple messages with a single method. If set to + False, the delivery tag refers to a single message. + If the multiple field is True, and the delivery tag + is zero, tells the server to acknowledge all + outstanding mesages. + + RULE: + + The server MUST validate that a non-zero delivery- + tag refers to an delivered message, and raise a + channel exception if this is not the case. + + """ + args = AMQPWriter() + args.write_longlong(delivery_tag) + args.write_bit(multiple) + self._send_method((60, 80), args) + + + def basic_cancel(self, consumer_tag, nowait=False): + """ + end a queue consumer + + This method cancels a consumer. This does not affect already + delivered messages, but it does mean the server will not send + any more messages for that consumer. The client may receive + an abitrary number of messages in between sending the cancel + method and receiving the cancel-ok reply. + + RULE: + + If the queue no longer exists when the client sends a + cancel command, or the consumer has been cancelled for + other reasons, this command has no effect. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + """ + args = AMQPWriter() + args.write_shortstr(consumer_tag) + args.write_bit(nowait) + self._send_method((60, 30), args) + return self.wait(allowed_methods=[ + (60, 31), # Channel.basic_cancel_ok + ]) + + + def _basic_cancel_ok(self, args): + """ + confirm a cancelled consumer + + This method confirms that the cancellation was completed. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + """ + consumer_tag = args.read_shortstr() + del self.callbacks[consumer_tag] + + + def basic_consume(self, queue='', consumer_tag='', no_local=False, + no_ack=False, exclusive=False, nowait=False, + callback=None, ticket=None): + """ + start a queue consumer + + This method asks the server to start a "consumer", which is a + transient request for messages from a specific queue. + Consumers last as long as the channel they were created on, or + until the client cancels them. + + RULE: + + The server SHOULD support at least 16 consumers per queue, + unless the queue was declared as private, and ideally, + impose no limit except as defined by available resources. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to consume from. If + the queue name is null, refers to the current queue + for the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + consumer_tag: shortstr + + Specifies the identifier for the consumer. The + consumer tag is local to a connection, so two clients + can use the same consumer tags. If this field is empty + the server will generate a unique tag. + + RULE: + + The tag MUST NOT refer to an existing consumer. If + the client attempts to create two consumers with + the same non-empty tag the server MUST raise a + connection exception with reply code 530 (not + allowed). + + no_local: boolean + + do not deliver own messages + + If the no-local field is set the server will not send + messages to the client that published them. + + no_ack: boolean + + no acknowledgement needed + + If this field is set the server does not expect + acknowledgments for messages. That is, when a message + is delivered to the client the server automatically and + silently acknowledges it on behalf of the client. This + functionality increases performance but at the cost of + reliability. Messages can get lost if a client dies + before it can deliver them to the application. + + exclusive: boolean + + request exclusive access + + Request exclusive consumer access, meaning only this + consumer can access the queue. + + RULE: + + If the server cannot grant exclusive access to the + queue when asked, - because there are other + consumers active - it MUST raise a channel + exception with return code 403 (access refused). + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + callback: Python callable + + function/method called with each delivered message + + For each message delivered by the broker, the + callable will be called with a Message object + as the single argument. If no callable is specified, + messages are quietly discarded, no_ack should probably + be set to True in that case. + + ticket: short + + RULE: + + The client MUST provide a valid access ticket + giving "read" access rights to the realm for the + queue. + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(queue) + args.write_shortstr(consumer_tag) + args.write_bit(no_local) + args.write_bit(no_ack) + args.write_bit(exclusive) + args.write_bit(nowait) + self._send_method((60, 20), args) + + if not nowait: + consumer_tag = self.wait(allowed_methods=[ + (60, 21), # Channel.basic_consume_ok + ]) + + self.callbacks[consumer_tag] = callback + + return consumer_tag + + + def _basic_consume_ok(self, args): + """ + confirm a new consumer + + The server provides the client with a consumer tag, which is + used by the client for methods called on the consumer at a + later stage. + + PARAMETERS: + consumer_tag: shortstr + + Holds the consumer tag specified by the client or + provided by the server. + + """ + return args.read_shortstr() + + + def _basic_deliver(self, args, msg): + """ + notify the client of a consumer message + + This method delivers a message to the client, via a consumer. + In the asynchronous message delivery model, the client starts + a consumer using the Consume method, then the server responds + with Deliver methods as and when messages arrive for that + consumer. + + RULE: + + The server SHOULD track the number of times a message has + been delivered to clients and when a message is + redelivered a certain number of times - e.g. 5 times - + without being acknowledged, the server SHOULD consider the + message to be unprocessable (possibly causing client + applications to abort), and move the message to a dead + letter queue. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + redelivered: boolean + + message is being redelivered + + This indicates that the message has been previously + delivered to this or another client. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + """ + consumer_tag = args.read_shortstr() + delivery_tag = args.read_longlong() + redelivered = args.read_bit() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + msg.delivery_info = { + 'channel': self, + 'consumer_tag': consumer_tag, + 'delivery_tag': delivery_tag, + 'redelivered': redelivered, + 'exchange': exchange, + 'routing_key': routing_key, + } + + func = self.callbacks.get(consumer_tag, None) + if func is not None: + func(msg) + + + def basic_get(self, queue='', no_ack=False, ticket=None): + """ + direct access to a queue + + This method provides a direct access to the messages in a + queue using a synchronous dialogue that is designed for + specific types of application where synchronous functionality + is more important than performance. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to consume from. If + the queue name is null, refers to the current queue + for the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + no_ack: boolean + + no acknowledgement needed + + If this field is set the server does not expect + acknowledgments for messages. That is, when a message + is delivered to the client the server automatically and + silently acknowledges it on behalf of the client. This + functionality increases performance but at the cost of + reliability. Messages can get lost if a client dies + before it can deliver them to the application. + + ticket: short + + RULE: + + The client MUST provide a valid access ticket + giving "read" access rights to the realm for the + queue. + + Non-blocking, returns a message object, or None. + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(queue) + args.write_bit(no_ack) + self._send_method((60, 70), args) + return self.wait(allowed_methods=[ + (60, 71), # Channel.basic_get_ok + (60, 72), # Channel.basic_get_empty + ]) + + + def _basic_get_empty(self, args): + """ + indicate no messages available + + This method tells the client that the queue has no messages + available for the client. + + PARAMETERS: + cluster_id: shortstr + + Cluster id + + For use by cluster applications, should not be used by + client applications. + + """ + cluster_id = args.read_shortstr() + + + def _basic_get_ok(self, args, msg): + """ + provide client with a message + + This method delivers a message to the client following a get + method. A message delivered by 'get-ok' must be acknowledged + unless the no-ack option was set in the get method. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + redelivered: boolean + + message is being redelivered + + This indicates that the message has been previously + delivered to this or another client. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. If empty, the message + was published to the default exchange. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + message_count: long + + number of messages pending + + This field reports the number of messages pending on + the queue, excluding the message being delivered. + Note that this figure is indicative, not reliable, and + can change arbitrarily as messages are added to the + queue and removed by other clients. + + """ + delivery_tag = args.read_longlong() + redelivered = args.read_bit() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + message_count = args.read_long() + + msg.delivery_info = { + 'delivery_tag': delivery_tag, + 'redelivered': redelivered, + 'exchange': exchange, + 'routing_key': routing_key, + 'message_count': message_count + } + + return msg + + + def basic_publish(self, msg, exchange='', routing_key='', + mandatory=False, immediate=False, ticket=None): + """ + publish a message + + This method publishes a message to a specific exchange. The + message will be routed to queues as defined by the exchange + configuration and distributed to any active consumers when the + transaction, if any, is committed. + + PARAMETERS: + exchange: shortstr + + Specifies the name of the exchange to publish to. The + exchange name can be empty, meaning the default + exchange. If the exchange name is specified, and that + exchange does not exist, the server will raise a + channel exception. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + RULE: + + If the exchange was declared as an internal + exchange, the server MUST raise a channel + exception with a reply code 403 (access refused). + + RULE: + + The exchange MAY refuse basic content in which + case it MUST raise a channel exception with reply + code 540 (not implemented). + + routing_key: shortstr + + Message routing key + + Specifies the routing key for the message. The + routing key is used for routing messages depending on + the exchange configuration. + + mandatory: boolean + + indicate mandatory routing + + This flag tells the server how to react if the message + cannot be routed to a queue. If this flag is True, the + server will return an unroutable message with a Return + method. If this flag is False, the server silently + drops the message. + + RULE: + + The server SHOULD implement the mandatory flag. + + immediate: boolean + + request immediate delivery + + This flag tells the server how to react if the message + cannot be routed to a queue consumer immediately. If + this flag is set, the server will return an + undeliverable message with a Return method. If this + flag is zero, the server will queue the message, but + with no guarantee that it will ever be consumed. + + RULE: + + The server SHOULD implement the immediate flag. + + ticket: short + + RULE: + + The client MUST provide a valid access ticket + giving "write" access rights to the access realm + for the exchange. + + """ + args = AMQPWriter() + if ticket is not None: + args.write_short(ticket) + else: + args.write_short(self.default_ticket) + args.write_shortstr(exchange) + args.write_shortstr(routing_key) + args.write_bit(mandatory) + args.write_bit(immediate) + + self._send_method((60, 40), args, msg) + + + def basic_qos(self, prefetch_size, prefetch_count, a_global): + """ + specify quality of service + + This method requests a specific quality of service. The QoS + can be specified for the current channel or for all channels + on the connection. The particular properties and semantics of + a qos method always depend on the content class semantics. + Though the qos method could in principle apply to both peers, + it is currently meaningful only for the server. + + PARAMETERS: + prefetch_size: long + + prefetch window in octets + + The client can request that messages be sent in + advance so that when the client finishes processing a + message, the following message is already held + locally, rather than needing to be sent down the + channel. Prefetching gives a performance improvement. + This field specifies the prefetch window size in + octets. The server will send a message in advance if + it is equal to or smaller in size than the available + prefetch size (and also falls into other prefetch + limits). May be set to zero, meaning "no specific + limit", although other prefetch limits may still + apply. The prefetch-size is ignored if the no-ack + option is set. + + RULE: + + The server MUST ignore this setting when the + client is not processing any messages - i.e. the + prefetch size does not limit the transfer of + single messages to a client, only the sending in + advance of more messages while the client still + has one or more unacknowledged messages. + + prefetch_count: short + + prefetch window in messages + + Specifies a prefetch window in terms of whole + messages. This field may be used in combination with + the prefetch-size field; a message will only be sent + in advance if both prefetch windows (and those at the + channel and connection level) allow it. The prefetch- + count is ignored if the no-ack option is set. + + RULE: + + The server MAY send less data in advance than + allowed by the client's specified prefetch windows + but it MUST NOT send more. + + a_global: boolean + + apply to entire connection + + By default the QoS settings apply to the current + channel only. If this field is set, they are applied + to the entire connection. + + """ + args = AMQPWriter() + args.write_long(prefetch_size) + args.write_short(prefetch_count) + args.write_bit(a_global) + self._send_method((60, 10), args) + return self.wait(allowed_methods=[ + (60, 11), # Channel.basic_qos_ok + ]) + + + def _basic_qos_ok(self, args): + """ + confirm the requested qos + + This method tells the client that the requested QoS levels + could be handled by the server. The requested QoS applies to + all active consumers until a new QoS is defined. + + """ + pass + + + def basic_recover(self, requeue=False): + """ + redeliver unacknowledged messages + + This method asks the broker to redeliver all unacknowledged + messages on a specified channel. Zero or more messages may be + redelivered. This method is only allowed on non-transacted + channels. + + RULE: + + The server MUST set the redelivered flag on all messages + that are resent. + + RULE: + + The server MUST raise a channel exception if this is + called on a transacted channel. + + PARAMETERS: + requeue: boolean + + requeue the message + + If this field is False, the message will be redelivered + to the original recipient. If this field is True, the + server will attempt to requeue the message, + potentially then delivering it to an alternative + subscriber. + + """ + args = AMQPWriter() + args.write_bit(requeue) + self._send_method((60, 100), args) + + + def basic_reject(self, delivery_tag, requeue): + """ + reject an incoming message + + This method allows a client to reject a message. It can be + used to interrupt and cancel large incoming messages, or + return untreatable messages to their original queue. + + RULE: + + The server SHOULD be capable of accepting and process the + Reject method while sending message content with a Deliver + or Get-Ok method. I.e. the server should read and process + incoming methods while sending output frames. To cancel a + partially-send content, the server sends a content body + frame of size 1 (i.e. with no data except the frame-end + octet). + + RULE: + + The server SHOULD interpret this method as meaning that + the client is unable to process the message at this time. + + RULE: + + A client MUST NOT use this method as a means of selecting + messages to process. A rejected message MAY be discarded + or dead-lettered, not necessarily passed to another + client. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + requeue: boolean + + requeue the message + + If this field is False, the message will be discarded. + If this field is True, the server will attempt to + requeue the message. + + RULE: + + The server MUST NOT deliver the message to the + same client within the context of the current + channel. The recommended strategy is to attempt + to deliver the message to an alternative consumer, + and if that is not possible, to move the message + to a dead-letter queue. The server MAY use more + sophisticated tracking to hold the message on the + queue and redeliver it to the same client at a + later stage. + + """ + args = AMQPWriter() + args.write_longlong(delivery_tag) + args.write_bit(requeue) + self._send_method((60, 90), args) + + + def _basic_return(self, args, msg): + """ + return a failed message + + This method returns an undeliverable message that was + published with the "immediate" flag set, or an unroutable + message published with the "mandatory" flag set. The reply + code and text provide information about the reason that the + message was undeliverable. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + """ + reply_code = args.read_short() + reply_text = args.read_shortstr() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + self.returned_messages.put( + (reply_code, reply_text, exchange, routing_key, msg) + ) + + + ############# + # + # Tx + # + # + # work with standard transactions + # + # Standard transactions provide so-called "1.5 phase commit". We + # can ensure that work is never lost, but there is a chance of + # confirmations being lost, so that messages may be resent. + # Applications that use standard transactions must be able to + # detect and ignore duplicate messages. + # + # GRAMMAR: + # + # tx = C:SELECT S:SELECT-OK + # / C:COMMIT S:COMMIT-OK + # / C:ROLLBACK S:ROLLBACK-OK + # + # RULE: + # + # An client using standard transactions SHOULD be able to + # track all messages received within a reasonable period, and + # thus detect and reject duplicates of the same message. It + # SHOULD NOT pass these to the application layer. + # + # + + def tx_commit(self): + """ + commit the current transaction + + This method commits all messages published and acknowledged in + the current transaction. A new transaction starts immediately + after a commit. + + """ + self._send_method((90, 20)) + return self.wait(allowed_methods=[ + (90, 21), # Channel.tx_commit_ok + ]) + + + def _tx_commit_ok(self, args): + """ + confirm a successful commit + + This method confirms to the client that the commit succeeded. + Note that if a commit fails, the server raises a channel + exception. + + """ + pass + + + def tx_rollback(self): + """ + abandon the current transaction + + This method abandons all messages published and acknowledged + in the current transaction. A new transaction starts + immediately after a rollback. + + """ + self._send_method((90, 30)) + return self.wait(allowed_methods=[ + (90, 31), # Channel.tx_rollback_ok + ]) + + + def _tx_rollback_ok(self, args): + """ + confirm a successful rollback + + This method confirms to the client that the rollback + succeeded. Note that if an rollback fails, the server raises a + channel exception. + + """ + pass + + + def tx_select(self): + """ + select standard transaction mode + + This method sets the channel to use standard transactions. + The client must use this method at least once on a channel + before using the Commit or Rollback methods. + + """ + self._send_method((90, 10)) + return self.wait(allowed_methods=[ + (90, 11), # Channel.tx_select_ok + ]) + + + def _tx_select_ok(self, args): + """ + confirm transaction mode + + This method confirms to the client that the channel was + successfully set to use standard transactions. + + """ + pass + + + _METHOD_MAP = { + (20, 11): _open_ok, + (20, 20): _flow, + (20, 21): _flow_ok, + (20, 30): _alert, + (20, 40): _close, + (20, 41): _close_ok, + (30, 11): _access_request_ok, + (40, 11): _exchange_declare_ok, + (40, 21): _exchange_delete_ok, + (50, 11): _queue_declare_ok, + (50, 21): _queue_bind_ok, + (50, 31): _queue_purge_ok, + (50, 41): _queue_delete_ok, + (60, 11): _basic_qos_ok, + (60, 21): _basic_consume_ok, + (60, 31): _basic_cancel_ok, + (60, 50): _basic_return, + (60, 60): _basic_deliver, + (60, 71): _basic_get_ok, + (60, 72): _basic_get_empty, + (90, 11): _tx_select_ok, + (90, 21): _tx_commit_ok, + (90, 31): _tx_rollback_ok, + } diff --git a/vendor/amqplib/client_0_8/connection.py b/vendor/amqplib/client_0_8/connection.py new file mode 100644 index 000000000000..f41587566c5d --- /dev/null +++ b/vendor/amqplib/client_0_8/connection.py @@ -0,0 +1,826 @@ +""" +AMQP 0-8 Connections + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +import logging + +from abstract_channel import AbstractChannel +from channel import Channel +from exceptions import * +from method_framing import MethodReader, MethodWriter +from serialization import AMQPReader, AMQPWriter +from transport import create_transport + +__all__ = [ + 'Connection', + ] + +# +# Client property info that gets sent to the server on connection startup +# +LIBRARY_PROPERTIES = { + 'library': 'Python amqplib', + 'library_version': '0.6.1', + } + +AMQP_LOGGER = logging.getLogger('amqplib') + + +class Connection(AbstractChannel): + """ + The connection class provides methods for a client to establish a + network connection to a server, and for both peers to operate the + connection thereafter. + + GRAMMAR: + + connection = open-connection *use-connection close-connection + open-connection = C:protocol-header + S:START C:START-OK + *challenge + S:TUNE C:TUNE-OK + C:OPEN S:OPEN-OK | S:REDIRECT + challenge = S:SECURE C:SECURE-OK + use-connection = *channel + close-connection = C:CLOSE S:CLOSE-OK + / S:CLOSE C:CLOSE-OK + + """ + def __init__(self, + host='localhost', + userid='guest', + password='guest', + login_method='AMQPLAIN', + login_response=None, + virtual_host='/', + locale='en_US', + client_properties=None, + ssl=False, + insist=False, + connect_timeout=None, + **kwargs): + """ + Create a connection to the specified host, which should be + a 'host[:port]', such as 'localhost', or '1.2.3.4:5672' + (defaults to 'localhost', if a port is not specified then + 5672 is used) + + If login_response is not specified, one is built up for you from + userid and password if they are present. + + """ + if (login_response is None) \ + and (userid is not None) \ + and (password is not None): + login_response = AMQPWriter() + login_response.write_table({'LOGIN': userid, 'PASSWORD': password}) + login_response = login_response.getvalue()[4:] #Skip the length + #at the beginning + + d = {} + d.update(LIBRARY_PROPERTIES) + if client_properties: + d.update(client_properties) + + self.known_hosts = '' + + while True: + self.channels = {} + # The connection object itself is treated as channel 0 + super(Connection, self).__init__(self, 0) + + self.transport = None + + # Properties set in the Tune method + self.channel_max = 65535 + self.frame_max = 131072 + self.heartbeat = 0 + + # Properties set in the Start method + self.version_major = 0 + self.version_minor = 0 + self.server_properties = {} + self.mechanisms = [] + self.locales = [] + + # Let the transport.py module setup the actual + # socket connection to the broker. + # + self.transport = create_transport(host, connect_timeout, ssl) + + self.method_reader = MethodReader(self.transport) + self.method_writer = MethodWriter(self.transport, self.frame_max) + + self.wait(allowed_methods=[ + (10, 10), # start + ]) + + self._x_start_ok(d, login_method, login_response, locale) + + self._wait_tune_ok = True + while self._wait_tune_ok: + self.wait(allowed_methods=[ + (10, 20), # secure + (10, 30), # tune + ]) + + host = self._x_open(virtual_host, insist=insist) + if host is None: + # we weren't redirected + return + + # we were redirected, close the socket, loop and try again + try: + self.close() + except Exception: + pass + + + def _do_close(self): + self.transport.close() + self.transport = None + + temp_list = [x for x in self.channels.values() if x is not self] + for ch in temp_list: + ch._do_close() + + self.connection = self.channels = None + + + def _get_free_channel_id(self): + for i in xrange(1, self.channel_max+1): + if i not in self.channels: + return i + raise AMQPException('No free channel ids, current=%d, channel_max=%d' + % (len(self.channels), self.channel_max)) + + + def _wait_method(self, channel_id, allowed_methods): + """ + Wait for a method from the server destined for + a particular channel. + + """ + # + # Check the channel's deferred methods + # + method_queue = self.channels[channel_id].method_queue + + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None) \ + or (method_sig in allowed_methods) \ + or (method_sig == (20, 40)): + method_queue.remove(queued_method) + return queued_method + + # + # Nothing queued, need to wait for a method from the peer + # + while True: + channel, method_sig, args, content = \ + self.method_reader.read_method() + + if (channel == channel_id) \ + and ((allowed_methods is None) \ + or (method_sig in allowed_methods) \ + or (method_sig == (20, 40))): + return method_sig, args, content + + # + # Not the channel and/or method we were looking for. Queue + # this method for later + # + self.channels[channel].method_queue.append((method_sig, args, content)) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if channel == 0: + self.wait() + + + def channel(self, channel_id=None): + """ + Fetch a Channel object identified by the numeric channel_id, or + create that object if it doesn't already exist. + + """ + if channel_id in self.channels: + return self.channels[channel_id] + + return Channel(self, channel_id) + + + ################# + + def close(self, reply_code=0, reply_text='', method_sig=(0, 0)): + """ + request a connection close + + This method indicates that the sender wants to close the + connection. This may be due to internal conditions (e.g. a + forced shut-down) or due to an error handling a specific + method, i.e. an exception. When a close is due to an + exception, the sender provides the class and method id of the + method which caused the exception. + + RULE: + + After sending this method any received method except the + Close-OK method MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with the Close-OK method. + + RULE: + + When a server receives the Close method from a client it + MUST delete all server-side resources associated with the + client's context. A client CANNOT reconnect to a context + after sending or receiving a Close method. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + if self.transport is None: + # already closed + return + + args = AMQPWriter() + args.write_short(reply_code) + args.write_shortstr(reply_text) + args.write_short(method_sig[0]) # class_id + args.write_short(method_sig[1]) # method_id + self._send_method((10, 60), args) + return self.wait(allowed_methods=[ + (10, 61), # Connection.close_ok + ]) + + + def _close(self, args): + """ + request a connection close + + This method indicates that the sender wants to close the + connection. This may be due to internal conditions (e.g. a + forced shut-down) or due to an error handling a specific + method, i.e. an exception. When a close is due to an + exception, the sender provides the class and method id of the + method which caused the exception. + + RULE: + + After sending this method any received method except the + Close-OK method MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with the Close-OK method. + + RULE: + + When a server receives the Close method from a client it + MUST delete all server-side resources associated with the + client's context. A client CANNOT reconnect to a context + after sending or receiving a Close method. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + reply_code = args.read_short() + reply_text = args.read_shortstr() + class_id = args.read_short() + method_id = args.read_short() + + self._x_close_ok() + + raise AMQPConnectionException(reply_code, reply_text, (class_id, method_id)) + + + def _x_close_ok(self): + """ + confirm a connection close + + This method confirms a Connection.Close method and tells the + recipient that it is safe to release resources for the + connection and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Close-Ok handshake method SHOULD log the error. + + """ + self._send_method((10, 61)) + self._do_close() + + + def _close_ok(self, args): + """ + confirm a connection close + + This method confirms a Connection.Close method and tells the + recipient that it is safe to release resources for the + connection and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Close-Ok handshake method SHOULD log the error. + + """ + self._do_close() + + + def _x_open(self, virtual_host, capabilities='', insist=False): + """ + open connection to virtual host + + This method opens a connection to a virtual host, which is a + collection of resources, and acts to separate multiple + application domains within a server. + + RULE: + + The client MUST open the context before doing any work on + the connection. + + PARAMETERS: + virtual_host: shortstr + + virtual host name + + The name of the virtual host to work with. + + RULE: + + If the server supports multiple virtual hosts, it + MUST enforce a full separation of exchanges, + queues, and all associated entities per virtual + host. An application, connected to a specific + virtual host, MUST NOT be able to access resources + of another virtual host. + + RULE: + + The server SHOULD verify that the client has + permission to access the specified virtual host. + + RULE: + + The server MAY configure arbitrary limits per + virtual host, such as the number of each type of + entity that may be used, per connection and/or in + total. + + capabilities: shortstr + + required capabilities + + The client may specify a number of capability names, + delimited by spaces. The server can use this string + to how to process the client's connection request. + + insist: boolean + + insist on connecting to server + + In a configuration with multiple load-sharing servers, + the server may respond to a Connection.Open method + with a Connection.Redirect. The insist option tells + the server that the client is insisting on a + connection to the specified server. + + RULE: + + When the client uses the insist option, the server + SHOULD accept the client connection unless it is + technically unable to do so. + + """ + args = AMQPWriter() + args.write_shortstr(virtual_host) + args.write_shortstr(capabilities) + args.write_bit(insist) + self._send_method((10, 40), args) + return self.wait(allowed_methods=[ + (10, 41), # Connection.open_ok + (10, 50), # Connection.redirect + ]) + + + def _open_ok(self, args): + """ + signal that the connection is ready + + This method signals to the client that the connection is ready + for use. + + PARAMETERS: + known_hosts: shortstr + + """ + self.known_hosts = args.read_shortstr() + AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts) + return None + + + def _redirect(self, args): + """ + asks the client to use a different server + + This method redirects the client to another server, based on + the requested virtual host and/or capabilities. + + RULE: + + When getting the Connection.Redirect method, the client + SHOULD reconnect to the host specified, and if that host + is not present, to any of the hosts specified in the + known-hosts list. + + PARAMETERS: + host: shortstr + + server to connect to + + Specifies the server to connect to. This is an IP + address or a DNS name, optionally followed by a colon + and a port number. If no port number is specified, the + client should use the default port number for the + protocol. + + known_hosts: shortstr + + """ + host = args.read_shortstr() + self.known_hosts = args.read_shortstr() + AMQP_LOGGER.debug('Redirected to [%s], known_hosts [%s]' % (host, self.known_hosts)) + return host + + + def _secure(self, args): + """ + security mechanism challenge + + The SASL protocol works by exchanging challenges and responses + until both peers have received sufficient information to + authenticate each other. This method challenges the client to + provide more information. + + PARAMETERS: + challenge: longstr + + security challenge data + + Challenge information, a block of opaque binary data + passed to the security mechanism. + + """ + challenge = args.read_longstr() + + + def _x_secure_ok(self, response): + """ + security mechanism response + + This method attempts to authenticate, passing a block of SASL + data for the security mechanism at the server side. + + PARAMETERS: + response: longstr + + security response data + + A block of opaque data passed to the security + mechanism. The contents of this data are defined by + the SASL security mechanism. + + """ + args = AMQPWriter() + args.write_longstr(response) + self._send_method((10, 21), args) + + + def _start(self, args): + """ + start connection negotiation + + This method starts the connection negotiation process by + telling the client the protocol version that the server + proposes, along with a list of security mechanisms which the + client can use for authentication. + + RULE: + + If the client cannot handle the protocol version suggested + by the server it MUST close the socket connection. + + RULE: + + The server MUST provide a protocol version that is lower + than or equal to that requested by the client in the + protocol header. If the server cannot support the + specified protocol it MUST NOT send this method, but MUST + close the socket connection. + + PARAMETERS: + version_major: octet + + protocol major version + + The protocol major version that the server agrees to + use, which cannot be higher than the client's major + version. + + version_minor: octet + + protocol major version + + The protocol minor version that the server agrees to + use, which cannot be higher than the client's minor + version. + + server_properties: table + + server properties + + mechanisms: longstr + + available security mechanisms + + A list of the security mechanisms that the server + supports, delimited by spaces. Currently ASL supports + these mechanisms: PLAIN. + + locales: longstr + + available message locales + + A list of the message locales that the server + supports, delimited by spaces. The locale defines the + language in which the server will send reply texts. + + RULE: + + All servers MUST support at least the en_US + locale. + + """ + self.version_major = args.read_octet() + self.version_minor = args.read_octet() + self.server_properties = args.read_table() + self.mechanisms = args.read_longstr().split(' ') + self.locales = args.read_longstr().split(' ') + + AMQP_LOGGER.debug('Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s' + % (self.version_major, self.version_minor, + str(self.server_properties), self.mechanisms, self.locales)) + + + def _x_start_ok(self, client_properties, mechanism, response, locale): + """ + select security mechanism and locale + + This method selects a SASL security mechanism. ASL uses SASL + (RFC2222) to negotiate authentication and encryption. + + PARAMETERS: + client_properties: table + + client properties + + mechanism: shortstr + + selected security mechanism + + A single security mechanisms selected by the client, + which must be one of those specified by the server. + + RULE: + + The client SHOULD authenticate using the highest- + level security profile it can handle from the list + provided by the server. + + RULE: + + The mechanism field MUST contain one of the + security mechanisms proposed by the server in the + Start method. If it doesn't, the server MUST close + the socket. + + response: longstr + + security response data + + A block of opaque data passed to the security + mechanism. The contents of this data are defined by + the SASL security mechanism. For the PLAIN security + mechanism this is defined as a field table holding two + fields, LOGIN and PASSWORD. + + locale: shortstr + + selected message locale + + A single message local selected by the client, which + must be one of those specified by the server. + + """ + args = AMQPWriter() + args.write_table(client_properties) + args.write_shortstr(mechanism) + args.write_longstr(response) + args.write_shortstr(locale) + self._send_method((10, 11), args) + + + def _tune(self, args): + """ + propose connection tuning parameters + + This method proposes a set of connection configuration values + to the client. The client can accept and/or adjust these. + + PARAMETERS: + channel_max: short + + proposed maximum channels + + The maximum total number of channels that the server + allows per connection. Zero means that the server does + not impose a fixed limit, but the number of allowed + channels may be limited by available server resources. + + frame_max: long + + proposed maximum frame size + + The largest frame size that the server proposes for + the connection. The client can negotiate a lower + value. Zero means that the server does not impose any + specific limit but may reject very large frames if it + cannot allocate resources for them. + + RULE: + + Until the frame-max has been negotiated, both + peers MUST accept frames of up to 4096 octets + large. The minimum non-zero value for the frame- + max field is 4096. + + heartbeat: short + + desired heartbeat delay + + The delay, in seconds, of the connection heartbeat + that the server wants. Zero means the server does not + want a heartbeat. + + """ + self.channel_max = args.read_short() or self.channel_max + self.frame_max = args.read_long() or self.frame_max + self.method_writer.frame_max = self.frame_max + self.heartbeat = args.read_short() + + self._x_tune_ok(self.channel_max, self.frame_max, 0) + + + def _x_tune_ok(self, channel_max, frame_max, heartbeat): + """ + negotiate connection tuning parameters + + This method sends the client's connection tuning parameters to + the server. Certain fields are negotiated, others provide + capability information. + + PARAMETERS: + channel_max: short + + negotiated maximum channels + + The maximum total number of channels that the client + will use per connection. May not be higher than the + value specified by the server. + + RULE: + + The server MAY ignore the channel-max value or MAY + use it for tuning its resource allocation. + + frame_max: long + + negotiated maximum frame size + + The largest frame size that the client and server will + use for the connection. Zero means that the client + does not impose any specific limit but may reject very + large frames if it cannot allocate resources for them. + Note that the frame-max limit applies principally to + content frames, where large contents can be broken + into frames of arbitrary size. + + RULE: + + Until the frame-max has been negotiated, both + peers must accept frames of up to 4096 octets + large. The minimum non-zero value for the frame- + max field is 4096. + + heartbeat: short + + desired heartbeat delay + + The delay, in seconds, of the connection heartbeat + that the client wants. Zero means the client does not + want a heartbeat. + + """ + args = AMQPWriter() + args.write_short(channel_max) + args.write_long(frame_max) + args.write_short(heartbeat) + self._send_method((10, 31), args) + self._wait_tune_ok = False + + + _METHOD_MAP = { + (10, 10): _start, + (10, 20): _secure, + (10, 30): _tune, + (10, 41): _open_ok, + (10, 50): _redirect, + (10, 60): _close, + (10, 61): _close_ok, + } diff --git a/vendor/amqplib/client_0_8/exceptions.py b/vendor/amqplib/client_0_8/exceptions.py new file mode 100644 index 000000000000..58d0b5f7ef7b --- /dev/null +++ b/vendor/amqplib/client_0_8/exceptions.py @@ -0,0 +1,105 @@ +""" +Exceptions used by amqplib.client_0_8 + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + + +__all__ = [ + 'AMQPException', + 'AMQPConnectionException', + 'AMQPChannelException', + ] + + +class AMQPException(Exception): + def __init__(self, reply_code, reply_text, method_sig): + Exception.__init__(self) + self.amqp_reply_code = reply_code + self.amqp_reply_text = reply_text + self.amqp_method_sig = method_sig + self.args = ( + reply_code, + reply_text, + method_sig, + METHOD_NAME_MAP.get(method_sig, '') + ) + + +class AMQPConnectionException(AMQPException): + pass + + +class AMQPChannelException(AMQPException): + pass + + +METHOD_NAME_MAP = { + (10, 10): 'Connection.start', + (10, 11): 'Connection.start_ok', + (10, 20): 'Connection.secure', + (10, 21): 'Connection.secure_ok', + (10, 30): 'Connection.tune', + (10, 31): 'Connection.tune_ok', + (10, 40): 'Connection.open', + (10, 41): 'Connection.open_ok', + (10, 50): 'Connection.redirect', + (10, 60): 'Connection.close', + (10, 61): 'Connection.close_ok', + (20, 10): 'Channel.open', + (20, 11): 'Channel.open_ok', + (20, 20): 'Channel.flow', + (20, 21): 'Channel.flow_ok', + (20, 30): 'Channel.alert', + (20, 40): 'Channel.close', + (20, 41): 'Channel.close_ok', + (30, 10): 'Channel.access_request', + (30, 11): 'Channel.access_request_ok', + (40, 10): 'Channel.exchange_declare', + (40, 11): 'Channel.exchange_declare_ok', + (40, 20): 'Channel.exchange_delete', + (40, 21): 'Channel.exchange_delete_ok', + (50, 10): 'Channel.queue_declare', + (50, 11): 'Channel.queue_declare_ok', + (50, 20): 'Channel.queue_bind', + (50, 21): 'Channel.queue_bind_ok', + (50, 30): 'Channel.queue_purge', + (50, 31): 'Channel.queue_purge_ok', + (50, 40): 'Channel.queue_delete', + (50, 41): 'Channel.queue_delete_ok', + (60, 10): 'Channel.basic_qos', + (60, 11): 'Channel.basic_qos_ok', + (60, 20): 'Channel.basic_consume', + (60, 21): 'Channel.basic_consume_ok', + (60, 30): 'Channel.basic_cancel', + (60, 31): 'Channel.basic_cancel_ok', + (60, 40): 'Channel.basic_publish', + (60, 50): 'Channel.basic_return', + (60, 60): 'Channel.basic_deliver', + (60, 70): 'Channel.basic_get', + (60, 71): 'Channel.basic_get_ok', + (60, 72): 'Channel.basic_get_empty', + (60, 80): 'Channel.basic_ack', + (60, 90): 'Channel.basic_reject', + (60, 100): 'Channel.basic_recover', + (90, 10): 'Channel.tx_select', + (90, 11): 'Channel.tx_select_ok', + (90, 20): 'Channel.tx_commit', + (90, 21): 'Channel.tx_commit_ok', + (90, 30): 'Channel.tx_rollback', + (90, 31): 'Channel.tx_rollback_ok', +} diff --git a/vendor/amqplib/client_0_8/method_framing.py b/vendor/amqplib/client_0_8/method_framing.py new file mode 100644 index 000000000000..9f69ee50c77e --- /dev/null +++ b/vendor/amqplib/client_0_8/method_framing.py @@ -0,0 +1,244 @@ +""" +Convert between frames and higher-level AMQP methods + +""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +from Queue import Empty, Queue +from struct import pack, unpack + +try: + from collections import defaultdict +except: + class defaultdict(dict): + """ + Mini-implementation of collections.defaultdict that + appears in Python 2.5 and up. + + """ + def __init__(self, default_factory): + dict.__init__(self) + self.default_factory = default_factory + + def __getitem__(self, key): + try: + return dict.__getitem__(self, key) + except KeyError: + result = self.default_factory() + dict.__setitem__(self, key, result) + return result + + +from basic_message import Message +from exceptions import * +from serialization import AMQPReader + +__all__ = [ + 'MethodReader', + ] + +# +# MethodReader needs to know which methods are supposed +# to be followed by content headers and bodies. +# +_CONTENT_METHODS = [ + (60, 50), # Basic.return + (60, 60), # Basic.deliver + (60, 71), # Basic.get_ok + ] + + +class _PartialMessage(object): + """ + Helper class to build up a multi-frame method. + + """ + def __init__(self, method_sig, args): + self.method_sig = method_sig + self.args = args + self.msg = Message() + self.body_parts = [] + self.body_received = 0 + self.body_size = None + self.complete = False + + + def add_header(self, payload): + class_id, weight, self.body_size = unpack('>HHQ', payload[:12]) + self.msg._load_properties(payload[12:]) + self.complete = (self.body_size == 0) + + + def add_payload(self, payload): + self.body_parts.append(payload) + self.body_received += len(payload) + + if self.body_received == self.body_size: + self.msg.body = ''.join(self.body_parts) + self.complete = True + + +class MethodReader(object): + """ + Helper class to receive frames from the broker, combine them if + necessary with content-headers and content-bodies into complete methods. + + Normally a method is represented as a tuple containing + (channel, method_sig, args, content). + + In the case of a framing error, an AMQPConnectionException is placed + in the queue. + + In the case of unexpected frames, a tuple made up of + (channel, AMQPChannelException) is placed in the queue. + + """ + def __init__(self, source): + self.source = source + self.queue = Queue() + self.running = False + self.partial_messages = {} + # For each channel, which type is expected next + self.expected_types = defaultdict(lambda:1) + + + def _next_method(self): + """ + Read the next method from the source, once one complete method has + been assembled it is placed in the internal queue. + + """ + while self.queue.empty(): + try: + frame_type, channel, payload = self.source.read_frame() + except Exception, e: + # + # Connection was closed? Framing Error? + # + self.queue.put(e) + break + + if self.expected_types[channel] != frame_type: + self.queue.put(( + channel, + Exception('Received frame type %s while expecting type: %s' % + (frame_type, self.expected_types[channel]) + ) + )) + elif frame_type == 1: + self._process_method_frame(channel, payload) + elif frame_type == 2: + self._process_content_header(channel, payload) + elif frame_type == 3: + self._process_content_body(channel, payload) + + + def _process_method_frame(self, channel, payload): + """ + Process Method frames + + """ + method_sig = unpack('>HH', payload[:4]) + args = AMQPReader(payload[4:]) + + if method_sig in _CONTENT_METHODS: + # + # Save what we've got so far and wait for the content-header + # + self.partial_messages[channel] = _PartialMessage(method_sig, args) + self.expected_types[channel] = 2 + else: + self.queue.put((channel, method_sig, args, None)) + + + def _process_content_header(self, channel, payload): + """ + Process Content Header frames + + """ + partial = self.partial_messages[channel] + partial.add_header(payload) + + if partial.complete: + # + # a bodyless message, we're done + # + self.queue.put((channel, partial.method_sig, partial.args, partial.msg)) + del self.partial_messages[channel] + self.expected_types[channel] = 1 + else: + # + # wait for the content-body + # + self.expected_types[channel] = 3 + + + def _process_content_body(self, channel, payload): + """ + Process Content Body frames + + """ + partial = self.partial_messages[channel] + partial.add_payload(payload) + if partial.complete: + # + # Stick the message in the queue and go back to + # waiting for method frames + # + self.queue.put((channel, partial.method_sig, partial.args, partial.msg)) + del self.partial_messages[channel] + self.expected_types[channel] = 1 + + + def read_method(self): + """ + Read a method from the peer. + + """ + self._next_method() + m = self.queue.get() + if isinstance(m, Exception): + raise m + return m + + +class MethodWriter(object): + """ + Convert AMQP methods into AMQP frames and send them out + to the peer. + + """ + def __init__(self, dest, frame_max): + self.dest = dest + self.frame_max = frame_max + + + def write_method(self, channel, method_sig, args, content=None): + payload = pack('>HH', method_sig[0], method_sig[1]) + args + + self.dest.write_frame(1, channel, payload) + + if content: + body = content.body + payload = pack('>HHQ', method_sig[0], 0, len(body)) + \ + content._serialize_properties() + + self.dest.write_frame(2, channel, payload) + + while body: + payload, body = body[:self.frame_max - 8], body[self.frame_max -8:] + self.dest.write_frame(3, channel, payload) diff --git a/vendor/amqplib/client_0_8/serialization.py b/vendor/amqplib/client_0_8/serialization.py new file mode 100644 index 000000000000..3936dcd7a8a4 --- /dev/null +++ b/vendor/amqplib/client_0_8/serialization.py @@ -0,0 +1,530 @@ +""" +Convert between bytestreams and higher-level AMQP types. + +2007-11-05 Barry Pederson + +""" +# Copyright (C) 2007 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +import string +from datetime import datetime +from decimal import Decimal +from struct import pack, unpack +from time import mktime + +try: + from cStringIO import StringIO +except: + from StringIO import StringIO + + +DUMP_CHARS = string.letters + string.digits + string.punctuation + +def _hexdump(s): + """ + Present just for debugging help. + + """ + while s: + x, s = s[:16], s[16:] + + hex = ['%02x' % ord(ch) for ch in x] + hex = ' '.join(hex).ljust(50) + + char_dump = [] + for ch in x: + if ch in DUMP_CHARS: + char_dump.append(ch) + else: + char_dump.append('.') + + print hex + ''.join(char_dump) + + +class AMQPReader(object): + """ + Read higher-level AMQP types from a bytestream. + + """ + def __init__(self, source): + """ + Source should be either a file-like object with a read() method, or + a plain (non-unicode) string. + + """ + if isinstance(source, str): + self.input = StringIO(source) + elif hasattr(source, 'read'): + self.input = source + else: + raise ValueError('AMQPReader needs a file-like object or plain string') + + self.bitcount = self.bits = 0 + + + def close(self): + self.input.close() + + + def read(self, n): + """ + Read n bytes. + + """ + self.bitcount = self.bits = 0 + return self.input.read(n) + + + def read_bit(self): + """ + Read a single boolean value. + + """ + if not self.bitcount: + self.bits = ord(self.input.read(1)) + self.bitcount = 8 + result = (self.bits & 1) == 1 + self.bits >>= 1 + self.bitcount -= 1 + return result + + + def read_octet(self): + """ + Read one byte, return as an integer + + """ + self.bitcount = self.bits = 0 + return unpack('B', self.input.read(1))[0] + + + def read_short(self): + """ + Read an unsigned 16-bit integer + + """ + self.bitcount = self.bits = 0 + return unpack('>H', self.input.read(2))[0] + + + def read_long(self): + """ + Read an unsigned 32-bit integer + + """ + self.bitcount = self.bits = 0 + return unpack('>I', self.input.read(4))[0] + + + def read_longlong(self): + """ + Read an unsigned 64-bit integer + + """ + self.bitcount = self.bits = 0 + return unpack('>Q', self.input.read(8))[0] + + + def read_shortstr(self): + """ + Read a utf-8 encoded string that's stored in up to + 255 bytes. Return it decoded as a Python unicode object. + + """ + self.bitcount = self.bits = 0 + slen = unpack('B', self.input.read(1))[0] + return self.input.read(slen).decode('utf-8') + + + def read_longstr(self): + """ + Read a string that's up to 2**32 bytes, the encoding + isn't specified in the AMQP spec, so just return it as + a plain Python string. + + """ + self.bitcount = self.bits = 0 + slen = unpack('>I', self.input.read(4))[0] + return self.input.read(slen) + + + def read_table(self): + """ + Read an AMQP table, and return as a Python dictionary. + + """ + self.bitcount = self.bits = 0 + tlen = unpack('>I', self.input.read(4))[0] + table_data = AMQPReader(self.input.read(tlen)) + result = {} + while table_data.input.tell() < tlen: + name = table_data.read_shortstr() + ftype = table_data.input.read(1) + if ftype == 'S': + val = table_data.read_longstr() + elif ftype == 'I': + val = unpack('>i', table_data.input.read(4))[0] + elif ftype == 'D': + d = table_data.read_octet() + n = unpack('>i', table_data.input.read(4))[0] + val = Decimal(n) / Decimal(10 ** d) + elif ftype == 'T': + val = table_data.read_timestamp() + elif ftype == 'F': + val = table_data.read_table() # recurse + result[name] = val + return result + + + def read_timestamp(self): + """ + Read and AMQP timestamp, which is a 64-bit integer representing + seconds since the Unix epoch in 1-second resolution. Return as + a Python datetime.datetime object, expressed as localtime. + + """ + return datetime.fromtimestamp(self.read_longlong()) + + +class AMQPWriter(object): + """ + Convert higher-level AMQP types to bytestreams. + + """ + def __init__(self, dest=None): + """ + dest may be a file-type object (with a write() method). If None + then a StringIO is created, and the contents can be accessed with + this class's getvalue() method. + + """ + if dest is None: + self.out = StringIO() + else: + self.out = dest + + self.bits = [] + self.bitcount = 0 + + + def _flushbits(self): + if self.bits: + for b in self.bits: + self.out.write(pack('B', b)) + self.bits = [] + self.bitcount = 0 + + + def close(self): + """ + Pass through if possible to any file-like destinations. + + """ + if hasattr(self.out, 'close'): + self.out.close() + + + def flush(self): + """ + Pass through if possible to any file-like destinations. + + """ + if hasattr(self.out, 'flush'): + self.out.flush() + + + def getvalue(self): + """ + Get what's been encoded so far if we're working with a StringIO. + + """ + self._flushbits() + return self.out.getvalue() + + + def write(self, s): + """ + Write a plain Python string, with no special encoding. + + """ + self._flushbits() + self.out.write(s) + + + def write_bit(self, b): + """ + Write a boolean value. + + """ + if b: + b = 1 + else: + b = 0 + shift = self.bitcount % 8 + if shift == 0: + self.bits.append(0) + self.bits[-1] |= (b << shift) + self.bitcount += 1 + + + def write_octet(self, n): + """ + Write an integer as an unsigned 8-bit value. + + """ + if (n < 0) or (n > 255): + raise ValueError('Octet out of range 0..255') + self._flushbits() + self.out.write(pack('B', n)) + + + def write_short(self, n): + """ + Write an integer as an unsigned 16-bit value. + + """ + if (n < 0) or (n > 65535): + raise ValueError('Octet out of range 0..65535') + self._flushbits() + self.out.write(pack('>H', n)) + + + def write_long(self, n): + """ + Write an integer as an unsigned2 32-bit value. + + """ + if (n < 0) or (n >= (2**32)): + raise ValueError('Octet out of range 0..2**31-1') + self._flushbits() + self.out.write(pack('>I', n)) + + + def write_longlong(self, n): + """ + Write an integer as an unsigned 64-bit value. + + """ + if (n < 0) or (n >= (2**64)): + raise ValueError('Octet out of range 0..2**64-1') + self._flushbits() + self.out.write(pack('>Q', n)) + + + def write_shortstr(self, s): + """ + Write a string up to 255 bytes long after encoding. If passed + a unicode string, encode as UTF-8. + + """ + self._flushbits() + if isinstance(s, unicode): + s = s.encode('utf-8') + if len(s) > 255: + raise ValueError('String too long') + self.write_octet(len(s)) + self.out.write(s) + + + def write_longstr(self, s): + """ + Write a string up to 2**32 bytes long after encoding. If passed + a unicode string, encode as UTF-8. + + """ + self._flushbits() + if isinstance(s, unicode): + s = s.encode('utf-8') + self.write_long(len(s)) + self.out.write(s) + + + def write_table(self, d): + """ + Write out a Python dictionary made of up string keys, and values + that are strings, signed integers, Decimal, datetime.datetime, or + sub-dictionaries following the same constraints. + + """ + self._flushbits() + table_data = AMQPWriter() + for k, v in d.items(): + table_data.write_shortstr(k) + if isinstance(v, basestring): + if isinstance(v, unicode): + v = v.encode('utf-8') + table_data.write('S') + table_data.write_longstr(v) + elif isinstance(v, (int, long)): + table_data.write('I') + table_data.write(pack('>i', v)) + elif isinstance(v, Decimal): + table_data.write('D') + sign, digits, exponent = v.as_tuple() + v = 0 + for d in digits: + v = (v * 10) + d + if sign: + v = -v + table_data.write_octet(-exponent) + table_data.write(pack('>i', v)) + elif isinstance(v, datetime): + table_data.write('T') + table_data.write_timestamp(v) + ## FIXME: timezone ? + elif isinstance(v, dict): + table_data.write('F') + table_data.write_table(v) + table_data = table_data.getvalue() + self.write_long(len(table_data)) + self.out.write(table_data) + + + def write_timestamp(self, v): + """ + Write out a Python datetime.datetime object as a 64-bit integer + representing seconds since the Unix epoch. + + """ + self.out.write(pack('>q', long(mktime(v.timetuple())))) + + +class GenericContent(object): + """ + Abstract base class for AMQP content. Subclasses should + override the PROPERTIES attribute. + + """ + PROPERTIES = [ + ('dummy', 'shortstr'), + ] + + def __init__(self, **props): + """ + Save the properties appropriate to this AMQP content type + in a 'properties' dictionary. + + """ + d = {} + for propname, _ in self.PROPERTIES: + if propname in props: + d[propname] = props[propname] + # FIXME: should we ignore unknown properties? + + self.properties = d + + + def __eq__(self, other): + """ + Check if this object has the same properties as another + content object. + + """ + return (self.properties == other.properties) + + + def __getattr__(self, name): + """ + Look for additional properties in the 'properties' + dictionary, and if present - the 'delivery_info' + dictionary. + + """ + if name in self.properties: + return self.properties[name] + + if ('delivery_info' in self.__dict__) \ + and (name in self.delivery_info): + return self.delivery_info[name] + + raise AttributeError(name) + + + def __ne__(self, other): + """ + Just return the opposite of __eq__ + + """ + return not self.__eq__(other) + + + def _load_properties(self, raw_bytes): + """ + Given the raw bytes containing the property-flags and property-list + from a content-frame-header, parse and insert into a dictionary + stored in this object as an attribute named 'properties'. + + """ + r = AMQPReader(raw_bytes) + + # + # Read 16-bit shorts until we get one with a low bit set to zero + # + flags = [] + while True: + flag_bits = r.read_short() + flags.append(flag_bits) + if flag_bits & 1 == 0: + break + + shift = 0 + d = {} + for key, proptype in self.PROPERTIES: + if shift == 0: + if not flags: + break + flag_bits, flags = flags[0], flags[1:] + shift = 15 + if flag_bits & (1 << shift): + d[key] = getattr(r, 'read_' + proptype)() + shift -= 1 + + self.properties = d + + + def _serialize_properties(self): + """ + serialize the 'properties' attribute (a dictionary) into + the raw bytes making up a set of property flags and a + property list, suitable for putting into a content frame header. + + """ + shift = 15 + flag_bits = 0 + flags = [] + raw_bytes = AMQPWriter() + for key, proptype in self.PROPERTIES: + val = self.properties.get(key, None) + if val is not None: + if shift == 0: + flags.append(flag_bits) + flag_bits = 0 + shift = 15 + + flag_bits |= (1 << shift) + if proptype != 'bit': + getattr(raw_bytes, 'write_' + proptype)(val) + + shift -= 1 + + flags.append(flag_bits) + result = AMQPWriter() + for flag_bits in flags: + result.write_short(flag_bits) + result.write(raw_bytes.getvalue()) + + return result.getvalue() diff --git a/vendor/amqplib/client_0_8/transport.py b/vendor/amqplib/client_0_8/transport.py new file mode 100644 index 000000000000..3c82f456b1e6 --- /dev/null +++ b/vendor/amqplib/client_0_8/transport.py @@ -0,0 +1,220 @@ +""" +Read/Write AMQP frames over network transports. + +2009-01-14 Barry Pederson + +""" +# Copyright (C) 2009 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 + +import socket + +# +# See if Python 2.6+ SSL support is available +# +try: + import ssl + HAVE_PY26_SSL = True +except: + HAVE_PY26_SSL = False + +from struct import pack, unpack + +AMQP_PORT = 5672 + +# Yes, Advanced Message Queuing Protocol Protocol is redundant +AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x09\x01' + + +class _AbstractTransport(object): + """ + Common superclass for TCP and SSL transports + + """ + def __init__(self, host, connect_timeout): + if ':' in host: + host, port = host.split(':', 1) + port = int(port) + else: + port = AMQP_PORT + + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.settimeout(connect_timeout) + + try: + self.sock.connect((host, port)) + except socket.error: + self.sock.close() + raise + self.sock.settimeout(None) + + self._setup_transport() + + self._write(AMQP_PROTOCOL_HEADER) + + + def __del__(self): + self.close() + + + def _read(self, n): + """ + Read exactly n bytes from the peer + + """ + raise NotImplementedError('Must be overriden in subclass') + + + def _setup_transport(self): + """ + Do any additional initialization of the class (used + by the subclasses). + + """ + pass + + + def _write(self, s): + """ + Completely write a string to the peer. + + """ + raise NotImplementedError('Must be overriden in subclass') + + + def close(self): + if self.sock is not None: + self.sock.close() + self.sock = None + + + def read_frame(self): + """ + Read an AMQP frame. + + """ + frame_type, channel, size = unpack('>BHI', self._read(7)) + payload = self._read(size) + ch = self._read(1) + if ch == '\xce': + return frame_type, channel, payload + else: + raise Exception('Framing Error, received 0x%02x while expecting 0xce' % ord(ch)) + + + def write_frame(self, frame_type, channel, payload): + """ + Write out an AMQP frame. + + """ + size = len(payload) + self._write(pack('>BHI%dsB' % size, + frame_type, channel, size, payload, 0xce)) + + +class SSLTransport(_AbstractTransport): + """ + Transport that works over SSL + + """ + def _setup_transport(self): + """ + Wrap the socket in an SSL object, either the + new Python 2.6 version, or the older Python 2.5 and + lower version. + + """ + if HAVE_PY26_SSL: + self.sslobj = ssl.wrap_socket(self.sock) + self.sslobj.do_handshake() + else: + self.sslobj = socket.ssl(self.sock) + + + def _read(self, n): + """ + It seems that SSL Objects read() method may not supply as much + as you're asking for, at least with extremely large messages. + somewhere > 16K - found this in the test_channel.py test_large + unittest. + + """ + result = self.sslobj.read(n) + + while len(result) < n: + s = self.sslobj.read(n - len(result)) + if not s: + raise IOError('Socket closed') + result += s + + return result + + + def _write(self, s): + """ + Write a string out to the SSL socket fully. + + """ + while s: + n = self.sslobj.write(s) + if not n: + raise IOError('Socket closed') + s = s[n:] + + + +class TCPTransport(_AbstractTransport): + """ + Transport that deals directly with TCP socket. + + """ + def _setup_transport(self): + """ + Setup to _write() directly to the socket, and + do our own buffered reads. + + """ + self._write = self.sock.sendall + self._read_buffer = '' + + + def _read(self, n): + """ + Read exactly n bytes from the socket + + """ + while len(self._read_buffer) < n: + s = self.sock.recv(65536) + if not s: + raise IOError('Socket closed') + self._read_buffer += s + + result = self._read_buffer[:n] + self._read_buffer = self._read_buffer[n:] + + return result + + +def create_transport(host, connect_timeout, ssl=False): + """ + Given a few parameters from the Connection constructor, + select and create a subclass of _AbstractTransport. + + """ + if ssl: + return SSLTransport(host, connect_timeout) + else: + return TCPTransport(host, connect_timeout) diff --git a/vendor/anyjson/__init__.py b/vendor/anyjson/__init__.py new file mode 100644 index 000000000000..fdfeece63c99 --- /dev/null +++ b/vendor/anyjson/__init__.py @@ -0,0 +1,124 @@ +"""Wraps the best available JSON implementation available in a common +interface""" + +import sys + +__version__ = "0.2.2" +__author__ = "Rune Halvorsen " +__homepage__ = "http://bitbucket.org/runeh/anyjson/" +__docformat__ = "restructuredtext" + +implementation = None + +""" +.. function:: serialize(obj) + + Serialize the object to JSON. + +.. function:: deserialize(str) + + Deserialize JSON-encoded object to a Python object. + +.. function:: force_implementation(name) + + Load a specific json module. This is useful for testing and not much else + +.. attribute:: implementation + + The json implementation object. This is probably not useful to you, + except to get the name of the implementation in use. The name is + available through `implementation.name`. + +.. data:: _modules + + List of known json modules, and the names of their serialize/unserialize + methods, as well as the exception they throw. Exception can be either + an exception class or a string. +""" +_modules = [("cjson", "encode", "EncodeError", "decode", "DecodeError"), + ("jsonlib2", "write", "WriteError", "read", "ReadError"), + ("jsonlib", "write", "WriteError", "read", "ReadError"), + ("simplejson", "dumps", TypeError, "loads", ValueError), + ("json", "dumps", TypeError, "loads", ValueError), + ("django.utils.simplejson", "dumps", TypeError, "loads", + ValueError)] +_fields = ("modname", "encoder", "encerror", "decoder", "decerror") + + +class _JsonImplementation(object): + """Incapsulates a JSON implementation""" + + def __init__(self, modspec): + modinfo = dict(zip(_fields, modspec)) + + # No try block. We want importerror to end up at caller + module = self._attempt_load(modinfo["modname"]) + + self.implementation = modinfo["modname"] + self._encode = getattr(module, modinfo["encoder"]) + self._decode = getattr(module, modinfo["decoder"]) + self._encode_error = modinfo["encerror"] + self._decode_error = modinfo["decerror"] + + if isinstance(modinfo["encerror"], basestring): + self._encode_error = getattr(module, modinfo["encerror"]) + if isinstance(modinfo["decerror"], basestring): + self._decode_error = getattr(module, modinfo["decerror"]) + + self.name = modinfo["modname"] + + def __str__(self): + return "<_JsonImplementation instance using %s>" % self.name + + def _attempt_load(self, modname): + """Attempt to load module name modname, returning it on success, + throwing ImportError if module couldn't be imported""" + __import__(modname) + return sys.modules[modname] + + def serialize(self, data): + """Serialize the datastructure to json. Returns a string. Raises + TypeError if the object could not be serialized.""" + try: + return self._encode(data) + except self._encode_error, exc: + raise TypeError(*exc.args) + + def deserialize(self, s): + """deserialize the string to python data types. Raises + ValueError if the string vould not be parsed.""" + try: + return self._decode(s) + except self._decode_error, exc: + raise ValueError(*exc.args) + + +def force_implementation(modname): + """Forces anyjson to use a specific json module if it's available""" + global implementation + for name, spec in [(e[0], e) for e in _modules]: + if name == modname: + implementation = _JsonImplementation(spec) + return + raise ImportError("No module named: %s" % modname) + + +if __name__ == "__main__": + # If run as a script, we do nothing but print an error message. + # We do NOT try to load a compatible module because that may throw an + # exception, which renders the package uninstallable with easy_install + # (It trys to execfile the script when installing, to make sure it works) + print "Running anyjson as a stand alone script is not supported" + sys.exit(1) +else: + for modspec in _modules: + try: + implementation = _JsonImplementation(modspec) + break + except ImportError: + pass + else: + raise ImportError("No supported JSON module found") + + serialize = lambda value: implementation.serialize(value) + deserialize = lambda value: implementation.deserialize(value) diff --git a/vendor/boto/README b/vendor/boto/README new file mode 100644 index 000000000000..48f68490acb9 --- /dev/null +++ b/vendor/boto/README @@ -0,0 +1,53 @@ +boto 1.9a +22-Dec-2009 + +Copyright (c) 2006-2009 Mitch Garnaat + +http://code.google.com/p/boto + +Boto is a Python package that provides interfaces to Amazon Web Services. +At the moment, boto supports: + + * S3 (Simple Storage Service) via the REST API + * SQS (SimpleQueue Service) via the Query API + * EC2 (Elastic Compute Cloud) via the Query API + * Mechanical Turk via the Query API + * SimpleDB via the Query API. + * CloudFront via the REST API + * CloudWatch via the Query API + * AutoScale via the Query API + * Elastic Load Balancer via the Query API + +The intent is to support additional services in the future. + +The goal of boto is to provide a very simple, easy to use, lightweight +wrapper around the Amazon services. Not all features supported by the +Amazon Web Services will be supported in boto. Basically, those +features I need to do what I want to do are supported first. Other +features and requests are welcome and will be accomodated to the best +of my ability. Patches and contributions are welcome! + +Boto was written using Python 2.5.1 on Mac OSX. It has also been tested +on Linux Ubuntu using Python 2.5.1. Boto requires no additional +libraries or packages other than those that are distributed with Python 2.5.1. +Efforts are made to keep boto compatible with Python 2.4.x but no +guarantees are made. Boto should also run on Python 2.6, albeit with +a few deprecation warnings. + +There is some documentation for boto, mainly in the form of tutorials. +Check in the doc directory of the distribution. You can also check out +the unit tests in the tests directory of the distribution for examples of use. + +You AWS credentials can be passed into the methods that create S3 and SQS +connections. Alternatively, boto will check for the existance of the +following environment variables to ascertain your credentials: + +AWS_ACCESS_KEY_ID - Your AWS Access Key ID +AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key + +Changes + +Rather than list changes in the README file, I have decided to refer people to the +excellent subversion browsing available on googlecode. + +http://code.google.com/p/boto/source/browse diff --git a/vendor/boto/bin/bundle_image b/vendor/boto/bin/bundle_image new file mode 100755 index 000000000000..709697908268 --- /dev/null +++ b/vendor/boto/bin/bundle_image @@ -0,0 +1,27 @@ +#!/usr/bin/env python +from boto.manage.server import Server +if __name__ == "__main__": + from optparse import OptionParser + parser = OptionParser(version="%prog 1.0", usage="Usage: %prog [options] instance-id [instance-id-2]") + + # Commands + parser.add_option("-b", "--bucket", help="Destination Bucket", dest="bucket", default=None) + parser.add_option("-p", "--prefix", help="AMI Prefix", dest="prefix", default=None) + parser.add_option("-k", "--key", help="Private Key File", dest="key_file", default=None) + parser.add_option("-c", "--cert", help="Public Certificate File", dest="cert_file", default=None) + parser.add_option("-s", "--size", help="AMI Size", dest="size", default=None) + parser.add_option("-i", "--ssh-key", help="SSH Keyfile", dest="ssh_key", default=None) + parser.add_option("-u", "--user-name", help="SSH Username", dest="uname", default="root") + parser.add_option("-n", "--name", help="Name of Image", dest="name") + (options, args) = parser.parse_args() + + for instance_id in args: + try: + s = Server.find(instance_id=instance_id).next() + print "Found old server object" + except StopIteration: + print "New Server Object Created" + s = Server.create_from_instance_id(instance_id, options.name) + assert(s.hostname is not None) + b = s.get_bundler(uname=options.uname) + b.bundle(bucket=options.bucket,prefix=options.prefix,key_file=options.key_file,cert_file=options.cert_file,size=int(options.size),ssh_key=options.ssh_key) diff --git a/vendor/boto/bin/cfadmin b/vendor/boto/bin/cfadmin new file mode 100644 index 000000000000..d44e7405e8d2 --- /dev/null +++ b/vendor/boto/bin/cfadmin @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# Author: Chris Moyer +# +# cfadmin is similar to sdbadmin for CloudFront, it's a simple +# console utility to perform the most frequent tasks with CloudFront +# +def _print_distributions(dists): + """Internal function to print out all the distributions provided""" + print "%-12s %-50s %s" % ("Status", "Domain Name", "Origin") + print "-"*80 + for d in dists: + print "%-12s %-50s %-30s" % (d.status, d.domain_name, d.origin) + for cname in d.cnames: + print " "*12, "CNAME => %s" % cname + print "" + +def help(cf, fnc=None): + """Print help message, optionally about a specific function""" + import inspect + self = sys.modules['__main__'] + if fnc: + try: + cmd = getattr(self, fnc) + except: + cmd = None + if not inspect.isfunction(cmd): + print "No function named: %s found" % fnc + sys.exit(2) + (args, varargs, varkw, defaults) = inspect.getargspec(cmd) + print cmd.__doc__ + print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]])) + else: + print "Usage: cfadmin [command]" + for cname in dir(self): + if not cname.startswith("_"): + cmd = getattr(self, cname) + if inspect.isfunction(cmd): + doc = cmd.__doc__ + print "\t%s - %s" % (cname, doc) + sys.exit(1) + +def ls(cf): + """List all distributions and streaming distributions""" + print "Standard Distributions" + _print_distributions(cf.get_all_distributions()) + print "Streaming Distributions" + _print_distributions(cf.get_all_streaming_distributions()) + + +if __name__ == "__main__": + import boto + import sys + cf = boto.connect_cloudfront() + self = sys.modules['__main__'] + if len(sys.argv) >= 2: + try: + cmd = getattr(self, sys.argv[1]) + except: + cmd = None + args = sys.argv[2:] + else: + cmd = help + args = [] + if not cmd: + cmd = help + try: + cmd(cf, *args) + except TypeError, e: + print e + help(cf, cmd.__name__) diff --git a/vendor/boto/bin/elbadmin b/vendor/boto/bin/elbadmin new file mode 100755 index 000000000000..5c139eece366 --- /dev/null +++ b/vendor/boto/bin/elbadmin @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Elastic Load Balancer Tool +# +VERSION="0.1" +usage = """%prog [options] [command] +Commands: + list|ls List all Elastic Load Balancers + delete Delete ELB + get Get all instances associated with + create Create an ELB + add Add in ELB + remove|rm Remove from ELB + enable|en Enable Zone for ELB + disable Disable Zone for ELB +""" + +def list(elb): + """List all ELBs""" + print "%-20s %s" % ("Name", "DNS Name") + print "-"*80 + for b in elb.get_all_load_balancers(): + print "%-20s %s" % (b.name, b.dns_name) + +def get(elb, name): + """Get details about ELB """ + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + + print "Name: %s" % b.name + print "DNS Name: %s" % b.dns_name + + print + + print "Listeners" + print "---------" + print "%-8s %-8s %s" % ("IN", "OUT", "PROTO") + for l in b.listeners: + print "%-8s %-8s %s" % (l[0], l[1], l[2]) + + print + + print " Zones " + print "---------" + for z in b.availability_zones: + print z + + print + + print "Instances" + print "---------" + for i in b.instances: + print i.id + + print + +def create(elb, name, zones, listeners): + """Create an ELB named """ + l_list = [] + for l in listeners: + l = l.split(",") + l_list.append((int(l[0]), int(l[1]), l[2])) + + b = elb.create_load_balancer(name, zones, l_list) + return get(elb, name) + +def delete(elb, name): + """Delete this ELB""" + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + b.delete() + print "Load Balancer %s deleted" % name + +def add_instance(elb, name, instance): + """Add to ELB """ + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + b.register_instances([instance]) + return get(elb, name) + + +def remove_instance(elb, name, instance): + """Remove instance from elb """ + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + b.deregister_instances([instance]) + return get(elb, name) + +def enable_zone(elb, name, zone): + """Enable for elb""" + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + b.enable_zones([zone]) + return get(elb, name) + +def disable_zone(elb, name, zone): + """Disable for elb""" + b = elb.get_all_load_balancers(name) + if len(b) < 1: + print "No load balancer by the name of %s found" % name + return + b = b[0] + b.disable_zones([zone]) + return get(elb, name) + + + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + import sys + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=VERSION, usage=usage) + parser.add_option("-z", "--zone", help="Operate on zone", action="append", default=[], dest="zones") + parser.add_option("-l", "--listener", help="Specify Listener in,out,proto", action="append", default=[], dest="listeners") + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + + elb = boto.connect_elb() + command = args[0].lower() + if command in ("ls", "list"): + list(elb) + elif command == "get": + get(elb, args[1]) + elif command == "create": + create(elb, args[1], options.zones, options.listeners) + elif command == "delete": + delete(elb, args[1]) + elif command in ("add", "put"): + add_instance(elb, args[1], args[2]) + elif command in ("rm", "remove"): + remove_instance(elb, args[1], args[2]) + elif command in ("en", "enable"): + enable_zone(elb, args[1], args[2]) + elif command == "disable": + disable_zone(elb, args[1], args[2]) diff --git a/vendor/boto/bin/fetch_file b/vendor/boto/bin/fetch_file new file mode 100755 index 000000000000..6b8c4da90ff7 --- /dev/null +++ b/vendor/boto/bin/fetch_file @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +if __name__ == "__main__": + from optparse import OptionParser + parser = OptionParser(version="0.1", usage="Usage: %prog [options] url") + parser.add_option("-o", "--out-file", help="Output file", dest="outfile") + + (options, args) = parser.parse_args() + if len(args) < 1: + parser.print_help() + exit(1) + from boto.utils import fetch_file + f = fetch_file(args[0]) + if options.outfile: + open(options.outfile, "w").write(f.read()) + else: + print f.read() diff --git a/vendor/boto/bin/kill_instance b/vendor/boto/bin/kill_instance new file mode 100644 index 000000000000..7418d461891a --- /dev/null +++ b/vendor/boto/bin/kill_instance @@ -0,0 +1,12 @@ +#!/usr/bin/env python +def kill_instance(instance_id): + """Kill an instance given it's instance ID""" + import boto + ec2 = boto.connect_ec2() + print "Stopping instance: %s" % instance_id + ec2.terminate_instances([instance_id]) + + +if __name__ == "__main__": + import sys + kill_instance(sys.argv[1]) diff --git a/vendor/boto/bin/launch_instance b/vendor/boto/bin/launch_instance new file mode 100755 index 000000000000..69a9f81b1539 --- /dev/null +++ b/vendor/boto/bin/launch_instance @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Utility to launch an EC2 Instance +# +VERSION="0.1" + +import boto.pyami.config +from boto.utils import fetch_file +import re, os +import ConfigParser + +class Config(boto.pyami.config.Config): + """A special config class that also adds import abilities + Directly in the config file. To have a config file import + another config file, simply use "#import " where + is either a relative path or a full URL to another config + """ + + def __init__(self): + ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'}) + + def add_config(self, file_url): + """Add a config file to this configuration + :param file_url: URL for the file to add, or a local path + :type file_url: str + """ + if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", file_url): + if not file_url.startswith("/"): + file_url = os.path.join(os.getcwd(), file_url) + file_url = "file://%s" % file_url + (base_url, file_name) = file_url.rsplit("/", 1) + base_config = fetch_file(file_url) + base_config.seek(0) + for line in base_config.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + self.add_config("%s/%s" % (base_url, match.group(1))) + base_config.seek(0) + self.readfp(base_config) + + def add_creds(self, ec2): + """Add the credentials to this config if they don't already exist""" + if not self.has_section('Credentials'): + self.add_section('Credentials') + self.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + + + def __str__(self): + """Get config as string""" + from StringIO import StringIO + s = StringIO() + self.write(s) + return s.getvalue() + + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=VERSION, usage="%prog [options] config_url") + parser.add_option("-c", "--max-count", help="Maximum number of this type of instance to launch", dest="max_count", default="1") + parser.add_option("--min-count", help="Minimum number of this type of instance to launch", dest="min_count", default="1") + parser.add_option("-g", "--groups", help="Security Groups to add this instance to", action="append", dest="groups") + parser.add_option("-a", "--ami", help="AMI to launch", dest="ami_id") + parser.add_option("-t", "--type", help="Type of Instance (default m1.small)", dest="type", default="m1.small") + parser.add_option("-k", "--key", help="Keypair", dest="key_name") + parser.add_option("-z", "--zone", help="Zone (default us-east-1a)", dest="zone", default="us-east-1a") + parser.add_option("-i", "--ip", help="Elastic IP", dest="elastic_ip") + parser.add_option("-n", "--no-add-cred", help="Don't add a credentials section", default=False, action="store_true", dest="nocred") + + (options, args) = parser.parse_args() + + if len(args) < 1: + import sys + parser.print_help() + sys.exit(1) + file_url = os.path.expanduser(args[0]) + ec2 = boto.connect_ec2() + + cfg = Config() + cfg.add_config(file_url) + if not options.nocred: + cfg.add_creds(ec2) + + iobj = IObject() + if options.ami_id: + ami = ec2.get_image(options.ami_id) + else: + ami_id = options.ami_id + l = [(a, a.id, a.location) for a in ec2.get_all_images()] + ami = iobj.choose_from_list(l, prompt='Choose AMI') + + if options.key_name: + key_name = options.key_name + else: + l = [(k, k.name, '') for k in ec2.get_all_key_pairs()] + key_name = iobj.choose_from_list(l, prompt='Choose Keypair').name + + if options.groups: + groups = options.groups + else: + groups = [] + l = [(g, g.name, g.description) for g in ec2.get_all_security_groups()] + g = iobj.choose_from_list(l, prompt='Choose Primary Security Group') + while g != None: + groups.append(g) + l.remove((g, g.name, g.description)) + g = iobj.choose_from_list(l, prompt='Choose Additional Security Group (0 to quit)') + + r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count), + key_name=key_name, user_data=str(cfg), + security_groups=groups, instance_type=options.type, + placement=options.zone) diff --git a/vendor/boto/bin/list_instances b/vendor/boto/bin/list_instances new file mode 100755 index 000000000000..19e1c9b1d2b6 --- /dev/null +++ b/vendor/boto/bin/list_instances @@ -0,0 +1,10 @@ +#!/usr/bin/env python +import boto +ec2 = boto.connect_ec2() + +print "%-15s %-15s %-30s %s" % ("ID", 'Zone', "Groups", "Hostname") +print "-"*105 +for r in ec2.get_all_instances(): + groups = [g.id for g in r.groups] + for i in r.instances: + print "%-15s %-15s %-30s %s" % (i.id, i.placement, ','.join(groups), i.public_dns_name) diff --git a/vendor/boto/bin/pyami_sendmail b/vendor/boto/bin/pyami_sendmail new file mode 100755 index 000000000000..78e30039b771 --- /dev/null +++ b/vendor/boto/bin/pyami_sendmail @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Send Mail from a PYAMI instance, or anything that has a boto.cfg +# properly set up +# +VERSION="0.1" +usage = """%prog [options] +Sends whatever is on stdin to the recipient specified by your boto.cfg +or whoevery you specify in the options here. +""" + +if __name__ == "__main__": + from boto.utils import notify + import sys + from optparse import OptionParser + parser = OptionParser(version=VERSION, usage=usage) + parser.add_option("-t", "--to", help="Optional to address to send to (default from your boto.cfg)", action="store", default=None, dest="to") + parser.add_option("-s", "--subject", help="Optional Subject to send this report as", action="store", default="Report", dest="subject") + parser.add_option("-f", "--file", help="Optionally, read from a file instead of STDIN", action="store", default=None, dest="file") + + (options, args) = parser.parse_args() + if options.file: + body = open(options.file, 'r').read() + else: + body = sys.stdin.read() + + notify(options.subject, body=body, to_string=options.to) diff --git a/vendor/boto/bin/s3put b/vendor/boto/bin/s3put new file mode 100755 index 000000000000..b5467d96b25f --- /dev/null +++ b/vendor/boto/bin/s3put @@ -0,0 +1,196 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt, sys, os +import boto +from boto.exception import S3ResponseError + +usage_string = """ +SYNOPSIS + s3put [-a/--access_key ] [-s/--secret_key ] + -b/--bucket [-c/--callback ] + [-d/--debug ] [-i/--ignore ] + [-n/--no_op] [-p/--prefix ] [-q/--quiet] + [-g/--grant grant] [-w/--no_overwrite] path + + Where + access_key - Your AWS Access Key ID. If not supplied, boto will + use the value of the environment variable + AWS_ACCESS_KEY_ID + secret_key - Your AWS Secret Access Key. If not supplied, boto + will use the value of the environment variable + AWS_SECRET_ACCESS_KEY + bucket_name - The name of the S3 bucket the file(s) should be + copied to. + path - A path to a directory or file that represents the items + to be uploaded. If the path points to an individual file, + that file will be uploaded to the specified bucket. If the + path points to a directory, s3_it will recursively traverse + the directory and upload all files to the specified bucket. + debug_level - 0 means no debug output (default), 1 means normal + debug output from boto, and 2 means boto debug output + plus request/response output from httplib + ignore_dirs - a comma-separated list of directory names that will + be ignored and not uploaded to S3. + num_cb - The number of progress callbacks to display. The default + is zero which means no callbacks. If you supplied a value + of "-c 10" for example, the progress callback would be + called 10 times for each file transferred. + prefix - A file path prefix that will be stripped from the full + path of the file when determining the key name in S3. + For example, if the full path of a file is: + /home/foo/bar/fie.baz + and the prefix is specified as "-p /home/foo/" the + resulting key name in S3 will be: + /bar/fie.baz + The prefix must end in a trailing separator and if it + does not then one will be added. + grant - A canned ACL policy that will be granted on each file + transferred to S3. The value of provided must be one + of the "canned" ACL policies supported by S3: + private|public-read|public-read-write|authenticated-read + no_overwrite - No files will be overwritten on S3, if the file/key + exists on s3 it will be kept. This is useful for + resuming interrupted transfers. Note this is not a + sync, even if the file has been updated locally if + the key exists on s3 the file on s3 will not be + updated. + + If the -n option is provided, no files will be transferred to S3 but + informational messages will be printed about what would happen. +""" +def usage(): + print usage_string + sys.exit() + +def submit_cb(bytes_so_far, total_bytes): + print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes) + +def get_key_name(fullpath, prefix): + key_name = fullpath[len(prefix):] + l = key_name.split(os.sep) + return '/'.join(l) + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:np:qs:vw', + ['access_key', 'bucket', 'callback', 'debug', 'help', 'grant', + 'ignore', 'no_op', 'prefix', 'quiet', 'secret_key', 'no_overwrite']) + except: + usage() + ignore_dirs = [] + aws_access_key_id = None + aws_secret_access_key = None + bucket_name = '' + total = 0 + debug = 0 + cb = None + num_cb = 0 + quiet = False + no_op = False + prefix = '/' + grant = None + no_overwrite = False + for o, a in opts: + if o in ('-h', '--help'): + usage() + sys.exit() + if o in ('-a', '--access_key'): + aws_access_key_id = a + if o in ('-b', '--bucket'): + bucket_name = a + if o in ('-c', '--callback'): + num_cb = int(a) + cb = submit_cb + if o in ('-d', '--debug'): + debug = int(a) + if o in ('-g', '--grant'): + grant = a + if o in ('-i', '--ignore'): + ignore_dirs = a.split(',') + if o in ('-n', '--no_op'): + no_op = True + if o in ('w', '--no_overwrite'): + no_overwrite = True + if o in ('-p', '--prefix'): + prefix = a + if prefix[-1] != os.sep: + prefix = prefix + os.sep + if o in ('-q', '--quiet'): + quiet = True + if o in ('-s', '--secret_key'): + aws_secret_access_key = a + if len(args) != 1: + print usage() + path = os.path.expanduser(args[0]) + path = os.path.expandvars(path) + path = os.path.abspath(path) + if bucket_name: + c = boto.connect_s3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + c.debug = debug + b = c.get_bucket(bucket_name) + if os.path.isdir(path): + if no_overwrite: + if not quiet: + print 'Getting list of existing keys to check against' + keys = [] + for key in b.list(): + keys.append(key.name) + for root, dirs, files in os.walk(path): + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for file in files: + fullpath = os.path.join(root, file) + key_name = get_key_name(fullpath, prefix) + copy_file = True + if no_overwrite: + if key_name in keys: + copy_file = False + if not quiet: + print 'Skipping %s as it exists in s3' % file + if copy_file: + if not quiet: + print 'Copying %s to %s/%s' % (file, bucket_name, key_name) + if not no_op: + k = b.new_key(key_name) + k.set_contents_from_filename(fullpath, cb=cb, + num_cb=num_cb, policy=grant) + total += 1 + elif os.path.isfile(path): + key_name = os.path.split(path)[1] + copy_file = True + if no_overwrite: + if b.get_key(key_name): + copy_file = False + if not quiet: + print 'Skipping %s as it exists in s3' % path + if copy_file: + k = b.new_key(key_name) + k.set_contents_from_filename(path, cb=cb, num_cb=num_cb, policy=grant) + else: + print usage() + +if __name__ == "__main__": + main() + diff --git a/vendor/boto/bin/sdbadmin b/vendor/boto/bin/sdbadmin new file mode 100755 index 000000000000..e8ff9b52bd31 --- /dev/null +++ b/vendor/boto/bin/sdbadmin @@ -0,0 +1,168 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://kopertop.blogspot.com/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Tools to dump and recover an SDB domain +# +VERSION = "%prog version 1.0" +import boto +import time +from boto import sdb + +def choice_input(options, default=None, title=None): + """ + Choice input + """ + if title == None: + title = "Please choose" + print title + objects = [] + for n, obj in enumerate(options): + print "%s: %s" % (n, obj) + objects.append(obj) + choice = int(raw_input(">>> ")) + try: + choice = objects[choice] + except: + choice = default + return choice + +def confirm(message="Are you sure?"): + choice = raw_input("%s [yN] " % message) + return choice and len(choice) > 0 and choice[0].lower() == "y" + + +def dump_db(domain, file_name): + """ + Dump SDB domain to file + """ + doc = domain.to_xml(open(file_name, "w")) + +def empty_db(domain): + """ + Remove all entries from domain + """ + for item in domain: + item.delete() + +def load_db(domain, file): + """ + Load a domain from a file, this doesn't overwrite any existing + data in the file so if you want to do a full recovery and restore + you need to call empty_db before calling this + + :param domain: The SDB Domain object to load to + :param file: The File to load the DB from + """ + domain.from_xml(file) + +def create_db(domain_name, region_name): + """Create a new DB + + :param domain: Name of the domain to create + :type domain: str + """ + sdb = boto.sdb.connect_to_region(region_name) + return sdb.create_domain(domain_name) + +if __name__ == "__main__": + from optparse import OptionParser + parser = OptionParser(version=VERSION, usage="Usage: %prog [--dump|--load|--empty|--list|-l] [options]") + + # Commands + parser.add_option("--dump", help="Dump domain to file", dest="dump", default=False, action="store_true") + parser.add_option("--load", help="Load domain contents from file", dest="load", default=False, action="store_true") + parser.add_option("--empty", help="Empty all contents of domain", dest="empty", default=False, action="store_true") + parser.add_option("-l", "--list", help="List All domains", dest="list", default=False, action="store_true") + parser.add_option("-c", "--create", help="Create domain", dest="create", default=False, action="store_true") + + parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains") + parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains") + parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name") + parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name") + (options, args) = parser.parse_args() + + if options.create: + for domain_name in options.domains: + create_db(domain_name, options.region_name) + exit() + + sdb = boto.sdb.connect_to_region(options.region_name) + if options.list: + for db in sdb.get_all_domains(): + print db + exit() + + if not options.dump and not options.load and not options.empty: + parser.print_help() + exit() + + + + + # + # Setup + # + if options.domains: + domains = [] + for domain_name in options.domains: + domains.append(sdb.get_domain(domain_name)) + elif options.all_domains: + domains = sdb.get_all_domains() + else: + domains = [choice_input(options=sdb.get_all_domains(), title="No domain specified, please choose one")] + + + # + # Execute the commands + # + stime = time.time() + if options.empty: + if confirm("WARNING!!! Are you sure you want to empty the following domains?: %s" % domains): + stime = time.time() + for domain in domains: + print "--------> Emptying %s <--------" % domain.name + empty_db(domain) + else: + print "Canceling operations" + exit() + + if options.dump: + for domain in domains: + print "--------> Dumping %s <---------" % domain.name + if options.file_name: + file_name = options.file_name + else: + file_name = "%s.db" % domain.name + dump_db(domain, file_name) + + if options.load: + for domain in domains: + print "---------> Loading %s <----------" % domain.name + if options.file_name: + file_name = options.file_name + else: + file_name = "%s.db" % domain.name + load_db(domain, open(file_name, "rb")) + + + total_time = round(time.time() - stime, 2) + print "--------> Finished in %s <--------" % total_time diff --git a/vendor/boto/bin/taskadmin b/vendor/boto/bin/taskadmin new file mode 100755 index 000000000000..5d5302adc910 --- /dev/null +++ b/vendor/boto/bin/taskadmin @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + +# +# Task/Job Administration utility +# +VERSION="0.1" +__version__ = VERSION +usage = """%prog [options] [command] +Commands: + list|ls List all Tasks in SDB + delete Delete Task with id + get Get Task + create|mk Create a new Task with command running every +""" + +def list(): + """List all Tasks in SDB""" + from boto.manage.task import Task + print "%-8s %-40s %s" % ("Hour", "Name", "Command") + print "-"*100 + for t in Task.all(): + print "%-8s %-40s %s" % (t.hour, t.name, t.command) + +def get(name): + """Get a task + :param name: The name of the task to fetch + :type name: str + """ + from boto.manage.task import Task + q = Task.find() + q.filter("name like", "%s%%" % name) + for t in q: + print "="*80 + print "| ", t.id + print "|%s" % ("-"*79) + print "| Name: ", t.name + print "| Hour: ", t.hour + print "| Command: ", t.command + if t.last_executed: + print "| Last Run: ", t.last_executed.ctime() + print "| Last Status: ", t.last_status + print "| Last Run Log: ", t.last_output + print "="*80 + +def delete(id): + from boto.manage.task import Task + t = Task.get_by_id(id) + print "Deleting task: %s" % t.name + if raw_input("Are you sure? ").lower() in ["y", "yes"]: + t.delete() + print "Deleted" + else: + print "Canceled" + +def create(name, hour, command): + """Create a new task + :param name: Name of the task to create + :type name: str + :param hour: What hour to run it at, "*" for every hour + :type hour: str + :param command: The command to execute + :type command: str + """ + from boto.manage.task import Task + t = Task() + t.name = name + t.hour = hour + t.command = command + t.put() + print "Created task: %s" % t.id + +if __name__ == "__main__": + try: + import readline + except ImportError: + pass + import boto + import sys + from optparse import OptionParser + from boto.mashups.iobject import IObject + parser = OptionParser(version=__version__, usage=usage) + + (options, args) = parser.parse_args() + + if len(args) < 1: + parser.print_help() + sys.exit(1) + + command = args[0].lower() + if command in ("ls", "list"): + list() + elif command == "get": + get(args[1]) + elif command == "create": + create(args[1], args[2], args[3]) + elif command == "delete": + delete(args[1]) diff --git a/vendor/boto/boto/__init__.py b/vendor/boto/boto/__init__.py new file mode 100644 index 000000000000..051c90365321 --- /dev/null +++ b/vendor/boto/boto/__init__.py @@ -0,0 +1,292 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.config import Config, BotoConfigLocations +import os, sys +import logging +import logging.config + +Version = '1.9b' +UserAgent = 'Boto/%s (%s)' % (Version, sys.platform) +config = Config() + +def init_logging(): + for file in BotoConfigLocations: + try: + logging.config.fileConfig(os.path.expanduser(file)) + except: + pass + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +log = logging.getLogger('boto') +log.addHandler(NullHandler()) +init_logging() + +# convenience function to set logging to a particular file +def set_file_logger(name, filepath, level=logging.INFO, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.FileHandler(filepath) + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + +def set_stream_logger(name, level=logging.DEBUG, format_string=None): + global log + if not format_string: + format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s" + logger = logging.getLogger(name) + logger.setLevel(level) + fh = logging.StreamHandler() + fh.setLevel(level) + formatter = logging.Formatter(format_string) + fh.setFormatter(formatter) + logger.addHandler(fh) + log = logger + +def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sqs.connection.SQSConnection` + :return: A connection to Amazon's SQS + """ + from boto.sqs.connection import SQSConnection + return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.s3.connection.S3Connection` + :return: A connection to Amazon's S3 + """ + from boto.s3.connection import S3Connection + return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Amazon's EC2 + """ + from boto.ec2.connection import EC2Connection + return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.elb.ELBConnection` + :return: A connection to Amazon's Load Balancing Service + """ + from boto.ec2.elb import ELBConnection + return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection` + :return: A connection to Amazon's Auto Scaling Service + """ + from boto.ec2.autoscale import AutoScaleConnection + return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection` + :return: A connection to Amazon's EC2 Monitoring service + """ + from boto.ec2.cloudwatch import CloudWatchConnection + return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sdb.connection.SDBConnection` + :return: A connection to Amazon's SDB + """ + from boto.sdb.connection import SDBConnection + return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.fps.connection import FPSConnection + return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.fps.connection.FPSConnection` + :return: A connection to FPS + """ + from boto.cloudfront import CloudFrontConnection + return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.vpc.VPCConnection` + :return: A connection to VPC + """ + from boto.vpc import VPCConnection + return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.rds.RDSConnection` + :return: A connection to RDS + """ + from boto.rds import RDSConnection + return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.emr.EmrConnection` + :return: A connection to Elastic mapreduce + """ + from boto.emr import EmrConnection + return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + +def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.sns.SNSConnection` + :return: A connection to Amazon's SNS + """ + from boto.sns import SNSConnection + return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs) + + +def check_extensions(module_name, module_path): + """ + This function checks for extensions to boto modules. It should be called in the + __init__.py file of all boto modules. See: + http://code.google.com/p/boto/wiki/ExtendModules + + for details. + """ + option_name = '%s_extend' % module_name + version = config.get('Boto', option_name, None) + if version: + dirname = module_path[0] + path = os.path.join(dirname, version) + if os.path.isdir(path): + log.info('extending module %s with: %s' % (module_name, path)) + module_path.insert(0, path) + +_aws_cache = {} + +def _get_aws_conn(service): + global _aws_cache + conn = _aws_cache.get(service) + if not conn: + meth = getattr(sys.modules[__name__], 'connect_'+service) + conn = meth() + _aws_cache[service] = conn + return conn + +def lookup(service, name): + global _aws_cache + conn = _get_aws_conn(service) + obj = _aws_cache.get('.'.join((service,name)), None) + if not obj: + obj = conn.lookup(name) + _aws_cache['.'.join((service,name))] = obj + return obj + diff --git a/vendor/boto/boto/cloudfront/__init__.py b/vendor/boto/boto/cloudfront/__init__.py new file mode 100644 index 000000000000..28309ff93534 --- /dev/null +++ b/vendor/boto/boto/cloudfront/__init__.py @@ -0,0 +1,223 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import xml.sax +import base64 +import time +from boto.connection import AWSAuthConnection +from boto import handler +from boto.cloudfront.distribution import Distribution, DistributionSummary, DistributionConfig +from boto.cloudfront.distribution import StreamingDistribution, StreamingDistributionSummary, StreamingDistributionConfig +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.identity import OriginAccessIdentitySummary +from boto.cloudfront.identity import OriginAccessIdentityConfig +from boto.resultset import ResultSet +from boto.cloudfront.exception import CloudFrontServerError + +class CloudFrontConnection(AWSAuthConnection): + + DefaultHost = 'cloudfront.amazonaws.com' + Version = '2009-12-01' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + port=None, proxy=None, proxy_port=None, + host=DefaultHost, debug=0): + AWSAuthConnection.__init__(self, host, + aws_access_key_id, aws_secret_access_key, + True, port, proxy, proxy_port, debug=debug) + + def get_etag(self, response): + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + return response_headers[key] + return None + + def add_aws_auth_header(self, headers, method, path): + if not headers.has_key('Date'): + headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", + time.gmtime()) + + hmac = self.hmac.copy() + hmac.update(headers['Date']) + b64_hmac = base64.encodestring(hmac.digest()).strip() + headers['Authorization'] = "AWS %s:%s" % (self.aws_access_key_id, b64_hmac) + + # Generics + + def _get_all_objects(self, resource, tags): + if not tags: + tags=[('DistributionSummary', DistributionSummary)] + response = self.make_request('GET', '/%s/%s' % (self.Version, resource)) + body = response.read() + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + rs = ResultSet(tags) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def _get_info(self, id, resource, dist_class): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = dist_class(connection=self) + response_headers = response.msg + for key in response_headers.keys(): + if key.lower() == 'etag': + d.etag = response_headers[key] + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _get_config(self, id, resource, config_class): + uri = '/%s/%s/%s/config' % (self.Version, resource, id) + response = self.make_request('GET', uri) + body = response.read() + if response.status >= 300: + raise CloudFrontServerError(response.status, response.reason, body) + d = config_class(connection=self) + d.etag = self.get_etag(response) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + + def _set_config(self, distribution_id, etag, config): + if isinstance(config, StreamingDistributionConfig): + resource = 'streaming-distribution' + else: + resource = 'distribution' + uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id) + headers = {'If-Match' : etag, 'Content-Type' : 'text/xml'} + response = self.make_request('PUT', uri, headers, config.to_xml()) + body = response.read() + return self.get_etag(response) + if response.status != 200: + raise CloudFrontServerError(response.status, response.reason, body) + + def _create_object(self, config, resource, dist_class): + response = self.make_request('POST', '/%s/%s' % (self.Version, resource), + {'Content-Type' : 'text/xml'}, data=config.to_xml()) + body = response.read() + if response.status == 201: + d = dist_class(connection=self) + h = handler.XmlHandler(d, self) + xml.sax.parseString(body, h) + return d + else: + raise CloudFrontServerError(response.status, response.reason, body) + + def _delete_object(self, id, etag, resource): + uri = '/%s/%s/%s' % (self.Version, resource, id) + response = self.make_request('DELETE', uri, {'If-Match' : etag}) + body = response.read() + if response.status != 204: + raise CloudFrontServerError(response.status, response.reason, body) + + # Distributions + + def get_all_distributions(self): + tags=[('DistributionSummary', DistributionSummary)] + return self._get_all_objects('distribution', tags) + + def get_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'distribution', Distribution) + + def get_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'distribution', + DistributionConfig) + + def set_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_distribution(self, origin, enabled, caller_reference='', + cnames=None, comment=''): + config = DistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment) + return self._create_object(config, 'distribution', Distribution) + + def delete_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, 'distribution') + + # Streaming Distributions + + def get_all_streaming_distributions(self): + tags=[('StreamingDistributionSummary', StreamingDistributionSummary)] + return self._get_all_objects('streaming-distribution', tags) + + def get_streaming_distribution_info(self, distribution_id): + return self._get_info(distribution_id, 'streaming-distribution', + StreamingDistribution) + + def get_streaming_distribution_config(self, distribution_id): + return self._get_config(distribution_id, 'streaming-distribution', + StreamingDistributionConfig) + + def set_streaming_distribution_config(self, distribution_id, etag, config): + return self._set_config(distribution_id, etag, config) + + def create_streaming_distribution(self, origin, enabled, + caller_reference='', + cnames=None, comment=''): + config = StreamingDistributionConfig(origin=origin, enabled=enabled, + caller_reference=caller_reference, + cnames=cnames, comment=comment) + return self._create_object(config, 'streaming-distribution', + StreamingDistribution) + + def delete_streaming_distribution(self, distribution_id, etag): + return self._delete_object(distribution_id, etag, 'streaming-distribution') + + # Origin Access Identity + + def get_all_origin_access_identity(self): + tags=[('CloudFrontOriginAccessIdentitySummary', + OriginAccessIdentitySummary)] + return self._get_all_objects('origin-access-identity/cloudfront', tags) + + def get_origin_access_identity_info(self, access_id): + return self._get_info(access_id, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def get_origin_access_identity_config(self, access_id): + return self._get_config(access_id, + 'origin-access-identity/cloudfront', + OriginAccessIdentityConfig) + + def set_origin_access_identity_config(self, access_id, + etag, config): + return self._set_config(access_id, etag, config) + + def create_origin_access_identity(self, caller_reference='', comment=''): + config = OriginAccessIdentityConfig(caller_reference=caller_reference, + comment=comment) + return self._create_object(config, 'origin-access-identity/cloudfront', + OriginAccessIdentity) + + def delete_origin_access_identity(self, access_id, etag): + return self._delete_object(access_id, etag, + 'origin-access-identity/cloudfront') + + diff --git a/vendor/boto/boto/cloudfront/distribution.py b/vendor/boto/boto/cloudfront/distribution.py new file mode 100644 index 000000000000..ead6e3623531 --- /dev/null +++ b/vendor/boto/boto/cloudfront/distribution.py @@ -0,0 +1,470 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid +from boto.cloudfront.identity import OriginAccessIdentity +from boto.cloudfront.object import Object, StreamingObject +from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners +from boto.cloudfront.logging import LoggingInfo +from boto.s3.acl import ACL + +class DistributionConfig: + + def __init__(self, connection=None, origin='', enabled=False, + caller_reference='', cnames=None, comment='', + origin_access_identity=None, trusted_signers=None): + self.connection = connection + self.origin = origin + self.enabled = enabled + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.cnames = [] + if cnames: + self.cnames = cnames + self.comment = comment + self.origin_access_identity = origin_access_identity + self.trusted_signers = trusted_signers + self.logging = None + + def get_oai_value(self): + if isinstance(self.origin_access_identity, OriginAccessIdentity): + return self.origin_access_identity.uri() + else: + return self.origin_access_identity + + def to_xml(self): + s = '\n' + s += '\n' + s += ' %s\n' % self.origin + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + if self.origin_access_identity: + val = self.get_oai_value() + s += '%s\n' % val + if self.trusted_signers: + s += '\n' + for signer in self.trusted_signers: + if signer == 'Self': + s += ' \n' + else: + s += ' %s\n' % signer + s += '\n' + if self.logging: + s += '\n' + s += ' %s\n' % self.logging.bucket + s += ' %s\n' % self.logging.prefix + s += '\n' + s += '\n' + return s + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + elif name == 'Logging': + self.logging = LoggingInfo() + return self.logging + else: + return None + + def endElement(self, name, value, connection): + if name == 'CNAME': + self.cnames.append(value) + elif name == 'Origin': + self.origin = value + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'CallerReference': + self.caller_reference = value + elif name == 'OriginAccessIdentity': + self.origin_access_identity = value + else: + setattr(self, name, value) + +class StreamingDistributionConfig(DistributionConfig): + + def __init__(self, connection=None, origin='', enabled=False, + caller_reference='', cnames=None, comment=''): + DistributionConfig.__init__(self, connection, origin, + enabled, caller_reference, + cnames, comment) + + def to_xml(self): + s = '\n' + s += '\n' + s += ' %s\n' % self.origin + s += ' %s\n' % self.caller_reference + for cname in self.cnames: + s += ' %s\n' % cname + if self.comment: + s += ' %s\n' % self.comment + s += ' ' + if self.enabled: + s += 'true' + else: + s += 'false' + s += '\n' + s += '\n' + return s + + def startElement(self, name, attrs, connection): + pass + +class DistributionSummary: + + def __init__(self, connection=None, domain_name='', id='', + last_modified_time=None, status='', origin='', + cname='', comment='', enabled=False): + self.connection = connection + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.origin = origin + self.enabled = enabled + self.cnames = [] + if cname: + self.cnames.append(cname) + self.comment = comment + self.trusted_signers = None + self.etag = None + self.streaming = False + + def startElement(self, name, attrs, connection): + if name == 'TrustedSigners': + self.trusted_signers = TrustedSigners() + return self.trusted_signers + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'Status': + self.status = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'DomainName': + self.domain_name = value + elif name == 'Origin': + self.origin = value + elif name == 'CNAME': + self.cnames.append(value) + elif name == 'Comment': + self.comment = value + elif name == 'Enabled': + if value.lower() == 'true': + self.enabled = True + else: + self.enabled = False + elif name == 'StreamingDistributionSummary': + self.streaming = True + else: + setattr(self, name, value) + + def get_distribution(self): + return self.connection.get_distribution_info(self.id) + +class StreamingDistributionSummary(DistributionSummary): + + def get_distribution(self): + return self.connection.get_streaming_distribution_info(self.id) + +class Distribution: + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + self.connection = connection + self.config = config + self.domain_name = domain_name + self.id = id + self.last_modified_time = last_modified_time + self.status = status + self.active_signers = None + self.etag = None + self._bucket = None + + def startElement(self, name, attrs, connection): + if name == 'DistributionConfig': + self.config = DistributionConfig() + return self.config + elif name == 'ActiveTrustedSigners': + self.active_signers = ActiveTrustedSigners() + return self.active_signers + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'LastModifiedTime': + self.last_modified_time = value + elif name == 'Status': + self.status = value + elif name == 'DomainName': + self.domain_name = value + else: + setattr(self, name, value) + + def update(self, enabled=None, cnames=None, comment=None, + origin_access_identity=None, + trusted_signers=None): + """ + Update the configuration of the Distribution. + + :type enabled: bool + :param enabled: Whether the Distribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + :type origin_access_identity: :class:`boto.cloudfront.identity.OriginAccessIdentity` + :param origin_access_identity: The CloudFront origin access identity + associated with the distribution. This + must be provided if you want the + distribution to serve private content. + + :type trusted_signers: :class:`boto.cloudfront.signers.TrustedSigner` + :param trusted_signers: The AWS users who are authorized to sign + URL's for private content in this Distribution. + + """ + new_config = DistributionConfig(self.connection, self.config.origin, + self.config.enabled, self.config.caller_reference, + self.config.cnames, self.config.comment, + self.config.origin_access_identity, + self.config.trusted_signers) + if enabled != None: + new_config.enabled = enabled + if cnames != None: + new_config.cnames = cnames + if comment != None: + new_config.comment = comment + if origin_access_identity != None: + new_config.origin_access_identity = origin_access_identity + if trusted_signers: + new_config.trusted_signers = trusted_signers + self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config) + self.config = new_config + self._object_class = Object + + def enable(self): + """ + Deactivate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=True) + + def disable(self): + """ + Activate the Distribution. A convenience wrapper around + the update method. + """ + self.update(enabled=False) + + def delete(self): + """ + Delete this CloudFront Distribution. The content + associated with the Distribution is not deleted from + the underlying Origin bucket in S3. + """ + self.connection.delete_distribution(self.id, self.etag) + + def _get_bucket(self): + if not self._bucket: + bucket_name = self.config.origin.split('.')[0] + from boto.s3.connection import S3Connection + s3 = S3Connection(self.connection.aws_access_key_id, + self.connection.aws_secret_access_key, + proxy=self.connection.proxy, + proxy_port=self.connection.proxy_port, + proxy_user=self.connection.proxy_user, + proxy_pass=self.connection.proxy_pass) + self._bucket = s3.get_bucket(bucket_name) + self._bucket.distribution = self + self._bucket.set_key_class(self._object_class) + return self._bucket + + def get_objects(self): + """ + Return a list of all content objects in this distribution. + + :rtype: list of :class:`boto.cloudfront.object.Object` + :return: The content objects + """ + bucket = self._get_bucket() + objs = [] + for key in bucket: + objs.append(key) + return objs + + def set_permissions(self, object, replace=False): + """ + Sets the S3 ACL grants for the given object to the appropriate + value based on the type of Distribution. If the Distribution + is serving private content the ACL will be set to include the + Origin Access Identity associated with the Distribution. If + the Distribution is serving public content the content will + be set up with "public-read". + + :type object: :class:`boto.cloudfront.object.Object` + :param enabled: The Object whose ACL is being set + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + if self.config.origin_access_identity: + id = self.config.origin_access_identity.split('/')[-1] + oai = self.connection.get_origin_access_identity_info(id) + policy = object.get_acl() + if replace: + policy.acl = ACL() + policy.acl.add_user_grant('READ', oai.s3_user_id) + object.set_acl(policy) + else: + object.set_canned_acl('public-read') + + def set_permissions_all(self, replace=False): + """ + Sets the S3 ACL grants for all objects in the Distribution + to the appropriate value based on the type of Distribution. + + :type replace: bool + :param replace: If False, the Origin Access Identity will be + appended to the existing ACL for the object. + If True, the ACL for the object will be + completely replaced with one that grants + READ permission to the Origin Access Identity. + + """ + bucket = self._get_bucket() + for key in bucket: + self.set_permissions(key) + + def add_object(self, name, content, headers=None, replace=True): + """ + Adds a new content object to the Distribution. The content + for the object will be copied to a new Key in the S3 Bucket + and the permissions will be set appropriately for the type + of Distribution. + + :type name: str or unicode + :param name: The name or key of the new object. + + :type content: file-like object + :param content: A file-like object that contains the content + for the new object. + + :type headers: dict + :param headers: A dictionary containing additional headers + you would like associated with the new + object in S3. + + :rtype: :class:`boto.cloudfront.object.Object` + :return: The newly created object. + """ + if self.config.origin_access_identity: + policy = 'private' + else: + policy = 'public-read' + bucket = self._get_bucket() + object = bucket.new_key(name) + object.set_contents_from_file(content, headers=headers, policy=policy) + if self.config.origin_access_identity: + self.set_permissions(object, replace) + return object + +class StreamingDistribution(Distribution): + + def __init__(self, connection=None, config=None, domain_name='', + id='', last_modified_time=None, status=''): + Distribution.__init__(self, connection, config, domain_name, + id, last_modified_time, status) + self._object_class = StreamingObject + + def startElement(self, name, attrs, connection): + if name == 'StreamingDistributionConfig': + self.config = StreamingDistributionConfig() + return self.config + else: + return None + + def update(self, enabled=None, cnames=None, comment=None): + """ + Update the configuration of the Distribution. + + :type enabled: bool + :param enabled: Whether the Distribution is active or not. + + :type cnames: list of str + :param cnames: The DNS CNAME's associated with this + Distribution. Maximum of 10 values. + + :type comment: str or unicode + :param comment: The comment associated with the Distribution. + + """ + new_config = StreamingDistributionConfig(self.connection, + self.config.origin, + self.config.enabled, + self.config.caller_reference, + self.config.cnames, + self.config.comment) + if enabled != None: + new_config.enabled = enabled + if cnames != None: + new_config.cnames = cnames + if comment != None: + new_config.comment = comment + + self.etag = self.connection.set_streaming_distribution_config(self.id, + self.etag, + new_config) + self.config = new_config + + def delete(self): + self.connection.delete_streaming_distribution(self.id, self.etag) + + diff --git a/vendor/boto/boto/cloudfront/exception.py b/vendor/boto/boto/cloudfront/exception.py new file mode 100644 index 000000000000..768064210c27 --- /dev/null +++ b/vendor/boto/boto/cloudfront/exception.py @@ -0,0 +1,26 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import BotoServerError + +class CloudFrontServerError(BotoServerError): + + pass diff --git a/vendor/boto/boto/cloudfront/identity.py b/vendor/boto/boto/cloudfront/identity.py new file mode 100644 index 000000000000..1571e87a0a6b --- /dev/null +++ b/vendor/boto/boto/cloudfront/identity.py @@ -0,0 +1,122 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import uuid + +class OriginAccessIdentity: + + def __init__(self, connection=None, config=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.config = config + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + if name == 'CloudFrontOriginAccessIdentityConfig': + self.config = OriginAccessIdentityConfig() + return self.config + else: + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def update(self, comment=None): + new_config = OriginAccessIdentityConfig(self.connection, + self.config.caller_reference, + self.config.comment) + if comment != None: + new_config.comment = comment + self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config) + self.config = new_config + + def delete(self): + return self.connection.delete_origin_access_identity(self.id, self.etag) + + def uri(self): + return 'origin-access-identity/cloudfront/%s' % self.id + +class OriginAccessIdentityConfig: + + def __init__(self, connection=None, caller_reference='', comment=''): + self.connection = connection + if caller_reference: + self.caller_reference = caller_reference + else: + self.caller_reference = str(uuid.uuid4()) + self.comment = comment + + def to_xml(self): + s = '\n' + s += '\n' + s += ' %s\n' % self.caller_reference + if self.comment: + s += ' %s\n' % self.comment + s += '\n' + return s + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Comment': + self.comment = value + elif name == 'CallerReference': + self.caller_reference = value + else: + setattr(self, name, value) + +class OriginAccessIdentitySummary: + + def __init__(self, connection=None, id='', + s3_user_id='', comment=''): + self.connection = connection + self.id = id + self.s3_user_id = s3_user_id + self.comment = comment + self.etag = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Id': + self.id = value + elif name == 'S3CanonicalUserId': + self.s3_user_id = value + elif name == 'Comment': + self.comment = value + else: + setattr(self, name, value) + + def get_origin_access_identity(self): + return self.connection.get_origin_access_identity_info(self.id) + diff --git a/vendor/boto/boto/cloudfront/logging.py b/vendor/boto/boto/cloudfront/logging.py new file mode 100644 index 000000000000..6c2f4fde2fec --- /dev/null +++ b/vendor/boto/boto/cloudfront/logging.py @@ -0,0 +1,38 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class LoggingInfo(object): + + def __init__(self, bucket='', prefix=''): + self.bucket = bucket + self.prefix = prefix + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Bucket': + self.bucket = value + elif name == 'Prefix': + self.prefix = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/cloudfront/object.py b/vendor/boto/boto/cloudfront/object.py new file mode 100644 index 000000000000..3574d1363406 --- /dev/null +++ b/vendor/boto/boto/cloudfront/object.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.key import Key + +class Object(Key): + + def __init__(self, bucket, name=None): + Key.__init__(self, bucket, name=name) + self.distribution = bucket.distribution + + def __repr__(self): + return '' % (self.distribution.config.origin, self.name) + + def url(self, scheme='http'): + url = '%s://' % scheme + url += self.distribution.domain_name + if scheme.lower().startswith('rtmp'): + url += '/cfx/st/' + else: + url += '/' + url += self.name + return url + +class StreamingObject(Object): + + def url(self, scheme='rtmp'): + return Object.url(self, scheme) + + diff --git a/vendor/boto/boto/cloudfront/signers.py b/vendor/boto/boto/cloudfront/signers.py new file mode 100644 index 000000000000..0b0cd50a7672 --- /dev/null +++ b/vendor/boto/boto/cloudfront/signers.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Signer: + + def __init__(self): + self.id = None + self.key_pair_ids = [] + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.id = 'Self' + elif name == 'AwsAccountNumber': + self.id = value + elif name == 'KeyPairId': + self.key_pair_ids.append(value) + +class ActiveTrustedSigners(list): + + def startElement(self, name, attrs, connection): + if name == 'Signer': + s = Signer() + self.append(s) + return s + + def endElement(self, name, value, connection): + pass + +class TrustedSigners(list): + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Self': + self.append(name) + elif name == 'AwsAccountNumber': + self.append(value) + diff --git a/vendor/boto/boto/connection.py b/vendor/boto/boto/connection.py new file mode 100644 index 000000000000..41a3c771aea6 --- /dev/null +++ b/vendor/boto/boto/connection.py @@ -0,0 +1,644 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2008 rPath, Inc. +# Copyright (c) 2009 The Echo Nest Corporation +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Handles basic connections to AWS +""" + +import base64 +import hmac +import httplib +import socket, errno +import re +import sys +import time +import urllib, urlparse +import os +import xml.sax +import Queue +import boto +from boto.exception import BotoClientError, BotoServerError +from boto.resultset import ResultSet +import boto.utils +from boto import config, UserAgent, handler + +# +# the following is necessary because of the incompatibilities +# between Python 2.4, 2.5, and 2.6 as well as the fact that some +# people running 2.4 have installed hashlib as a separate module +# this fix was provided by boto user mccormix. +# see: http://code.google.com/p/boto/issues/detail?id=172 +# for more details. +# +try: + from hashlib import sha1 as sha + from hashlib import sha256 as sha256 + + if sys.version[:3] == "2.4": + # we are using an hmac that expects a .new() method. + class Faker: + def __init__(self, which): + self.which = which + self.digest_size = self.which().digest_size + + def new(self, *args, **kwargs): + return self.which(*args, **kwargs) + + sha = Faker(sha) + sha256 = Faker(sha256) + +except ImportError: + import sha + sha256 = None + +PORTS_BY_SECURITY = { True: 443, False: 80 } + +class ConnectionPool: + def __init__(self, hosts, connections_per_host): + self._hosts = boto.utils.LRUCache(hosts) + self.connections_per_host = connections_per_host + + def __getitem__(self, key): + if key not in self._hosts: + self._hosts[key] = Queue.Queue(self.connections_per_host) + return self._hosts[key] + + def __repr__(self): + return 'ConnectionPool:%s' % ','.join(self._hosts._dict.keys()) + +class AWSAuthConnection: + def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, path='/'): + """ + :type host: string + :param host: The host to make the connection to + + :type aws_access_key_id: string + :param aws_access_key_id: AWS Access Key ID (provided by Amazon) + + :type aws_secret_access_key: string + :param aws_secret_access_key: Secret Access Key (provided by Amazon) + + :type is_secure: boolean + :param is_secure: Whether the connection is over SSL + + :type https_connection_factory: list or tuple + :param https_connection_factory: A pair of an HTTP connection + factory and the exceptions to catch. + The factory should have a similar + interface to L{httplib.HTTPSConnection}. + + :type proxy: + :param proxy: + + :type proxy_port: int + :param proxy_port: The port to use when connecting over a proxy + + :type proxy_user: string + :param proxy_user: The username to connect with on the proxy + + :type proxy_pass: string + :param proxy_pass: The password to use when connection over a proxy. + + :type port: integer + :param port: The port to use to connect + """ + + self.num_retries = 5 + self.is_secure = is_secure + self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass) + # define exceptions from httplib that we want to catch and retry + self.http_exceptions = (httplib.HTTPException, socket.error, socket.gaierror) + # define values in socket exceptions we don't want to catch + self.socket_exception_values = (errno.EINTR,) + if https_connection_factory is not None: + self.https_connection_factory = https_connection_factory[0] + self.http_exceptions += https_connection_factory[1] + else: + self.https_connection_factory = None + if (is_secure): + self.protocol = 'https' + else: + self.protocol = 'http' + self.host = host + self.path = path + if debug: + self.debug = debug + else: + self.debug = config.getint('Boto', 'debug', debug) + if port: + self.port = port + else: + self.port = PORTS_BY_SECURITY[is_secure] + + if aws_access_key_id: + self.aws_access_key_id = aws_access_key_id + elif os.environ.has_key('AWS_ACCESS_KEY_ID'): + self.aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID'] + elif config.has_option('Credentials', 'aws_access_key_id'): + self.aws_access_key_id = config.get('Credentials', 'aws_access_key_id') + + if aws_secret_access_key: + self.aws_secret_access_key = aws_secret_access_key + elif os.environ.has_key('AWS_SECRET_ACCESS_KEY'): + self.aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY'] + elif config.has_option('Credentials', 'aws_secret_access_key'): + self.aws_secret_access_key = config.get('Credentials', 'aws_secret_access_key') + + # initialize an HMAC for signatures, make copies with each request + self.hmac = hmac.new(self.aws_secret_access_key, digestmod=sha) + if sha256: + self.hmac_256 = hmac.new(self.aws_secret_access_key, digestmod=sha256) + else: + self.hmac_256 = None + + # cache up to 20 connections per host, up to 20 hosts + self._pool = ConnectionPool(20, 20) + self._connection = (self.server_name(), self.is_secure) + self._last_rs = None + + def __repr__(self): + return '%s:%s' % (self.__class__.__name__, self.host) + + def _cached_name(self, host, is_secure): + if host is None: + host = self.server_name() + cached_name = is_secure and 'https://' or 'http://' + cached_name += host + return cached_name + + def connection(self): + return self.get_http_connection(*self._connection) + + connection = property(connection) + + def get_path(self, path='/'): + pos = path.find('?') + if pos >= 0: + params = path[pos:] + path = path[:pos] + else: + params = None + if path[-1] == '/': + need_trailing = True + else: + need_trailing = False + path_elements = self.path.split('/') + path_elements.extend(path.split('/')) + path_elements = [p for p in path_elements if p] + path = '/' + '/'.join(path_elements) + if path[-1] != '/' and need_trailing: + path += '/' + if params: + path = path + params + return path + + def server_name(self, port=None): + if not port: + port = self.port + if port == 80: + signature_host = self.host + else: + # This unfortunate little hack can be attributed to + # a difference in the 2.6 version of httplib. In old + # versions, it would append ":443" to the hostname sent + # in the Host header and so we needed to make sure we + # did the same when calculating the V2 signature. In 2.6 + # it no longer does that. Hence, this kludge. + if sys.version[:3] == "2.6" and port == 443: + signature_host = self.host + else: + signature_host = '%s:%d' % (self.host, port) + return signature_host + + def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass): + self.proxy = proxy + self.proxy_port = proxy_port + self.proxy_user = proxy_user + self.proxy_pass = proxy_pass + if os.environ.has_key('http_proxy') and not self.proxy: + pattern = re.compile( + '(?:http://)?' \ + '(?:(?P\w+):(?P.*)@)?' \ + '(?P[\w\-\.]+)' \ + '(?::(?P\d+))?' + ) + match = pattern.match(os.environ['http_proxy']) + if match: + self.proxy = match.group('host') + self.proxy_port = match.group('port') + self.proxy_user = match.group('user') + self.proxy_pass = match.group('pass') + else: + if not self.proxy: + self.proxy = config.get_value('Boto', 'proxy', None) + if not self.proxy_port: + self.proxy_port = config.get_value('Boto', 'proxy_port', None) + if not self.proxy_user: + self.proxy_user = config.get_value('Boto', 'proxy_user', None) + if not self.proxy_pass: + self.proxy_pass = config.get_value('Boto', 'proxy_pass', None) + + if not self.proxy_port and self.proxy: + print "http_proxy environment variable does not specify " \ + "a port, using default" + self.proxy_port = self.port + self.use_proxy = (self.proxy != None) + + def get_http_connection(self, host, is_secure): + queue = self._pool[self._cached_name(host, is_secure)] + try: + return queue.get_nowait() + except Queue.Empty: + return self.new_http_connection(host, is_secure) + + def new_http_connection(self, host, is_secure): + if self.use_proxy: + host = '%s:%d' % (self.proxy, int(self.proxy_port)) + if host is None: + host = self.server_name() + boto.log.debug('establishing HTTP connection') + if is_secure: + if self.use_proxy: + connection = self.proxy_ssl() + elif self.https_connection_factory: + connection = self.https_connection_factory(host) + else: + connection = httplib.HTTPSConnection(host) + else: + connection = httplib.HTTPConnection(host) + if self.debug > 1: + connection.set_debuglevel(self.debug) + # self.connection must be maintained for backwards-compatibility + # however, it must be dynamically pulled from the connection pool + # set a private variable which will enable that + if host.split(':')[0] == self.host and is_secure == self.is_secure: + self._connection = (host, is_secure) + return connection + + def put_http_connection(self, host, is_secure, connection): + try: + self._pool[self._cached_name(host, is_secure)].put_nowait(connection) + except Queue.Full: + # gracefully fail in case of pool overflow + connection.close() + + def proxy_ssl(self): + host = '%s:%d' % (self.host, self.port) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect((self.proxy, int(self.proxy_port))) + except: + raise + sock.sendall("CONNECT %s HTTP/1.0\r\n" % host) + sock.sendall("User-Agent: %s\r\n" % UserAgent) + if self.proxy_user and self.proxy_pass: + for k, v in self.get_proxy_auth_header().items(): + sock.sendall("%s: %s\r\n" % (k, v)) + sock.sendall("\r\n") + resp = httplib.HTTPResponse(sock, strict=True) + resp.begin() + + if resp.status != 200: + # Fake a socket error, use a code that make it obvious it hasn't + # been generated by the socket library + raise socket.error(-71, + "Error talking to HTTP proxy %s:%s: %s (%s)" % + (self.proxy, self.proxy_port, resp.status, resp.reason)) + + # We can safely close the response, it duped the original socket + resp.close() + + h = httplib.HTTPConnection(host) + + # Wrap the socket in an SSL socket + if hasattr(httplib, 'ssl'): + sslSock = httplib.ssl.SSLSocket(sock) + else: # Old Python, no ssl module + sslSock = socket.ssl(sock, None, None) + sslSock = httplib.FakeSocket(sock, sslSock) + # This is a bit unclean + h.sock = sslSock + return h + + def prefix_proxy_to_path(self, path, host=None): + path = self.protocol + '://' + (host or self.server_name()) + path + return path + + def get_proxy_auth_header(self): + auth = base64.encodestring(self.proxy_user+':'+self.proxy_pass) + return {'Proxy-Authorization': 'Basic %s' % auth} + + def _mexe(self, method, path, data, headers, host=None, sender=None): + """ + mexe - Multi-execute inside a loop, retrying multiple times to handle + transient Internet errors by simply trying again. + Also handles redirects. + + This code was inspired by the S3Utils classes posted to the boto-users + Google group by Larry Bates. Thanks! + """ + boto.log.debug('Method: %s' % method) + boto.log.debug('Path: %s' % path) + boto.log.debug('Data: %s' % data) + boto.log.debug('Headers: %s' % headers) + boto.log.debug('Host: %s' % host) + response = None + body = None + e = None + num_retries = config.getint('Boto', 'num_retries', self.num_retries) + i = 0 + connection = self.get_http_connection(host, self.is_secure) + while i <= num_retries: + try: + if callable(sender): + response = sender(connection, method, path, data, headers) + else: + connection.request(method, path, data, headers) + response = connection.getresponse() + location = response.getheader('location') + # -- gross hack -- + # httplib gets confused with chunked responses to HEAD requests + # so I have to fake it out + if method == 'HEAD' and getattr(response, 'chunked', False): + response.chunked = 0 + if response.status == 500 or response.status == 503: + boto.log.debug('received %d response, retrying in %d seconds' % (response.status, 2**i)) + body = response.read() + elif response.status == 408: + body = response.read() + print '-------------------------' + print ' 4 0 8 ' + print 'path=%s' % path + print body + print '-------------------------' + elif response.status < 300 or response.status >= 400 or \ + not location: + self.put_http_connection(host, self.is_secure, connection) + return response + else: + scheme, host, path, params, query, fragment = \ + urlparse.urlparse(location) + if query: + path += '?' + query + boto.log.debug('Redirecting: %s' % scheme + '://' + host + path) + connection = self.get_http_connection(host, + scheme == 'https') + continue + except KeyboardInterrupt: + sys.exit('Keyboard Interrupt') + except self.http_exceptions, e: + boto.log.debug('encountered %s exception, reconnecting' % \ + e.__class__.__name__) + connection = self.new_http_connection(host, self.is_secure) + time.sleep(2**i) + i += 1 + # If we made it here, it's because we have exhausted our retries and stil haven't + # succeeded. So, if we have a response object, use it to raise an exception. + # Otherwise, raise the exception that must have already happened. + if response: + raise BotoServerError(response.status, response.reason, body) + elif e: + raise e + else: + raise BotoClientError('Please report this exception as a Boto Issue!') + + def make_request(self, method, path, headers=None, data='', host=None, + auth_path=None, sender=None): + path = self.get_path(path) + if headers == None: + headers = {} + else: + headers = headers.copy() + headers['User-Agent'] = UserAgent + if not headers.has_key('Content-Length'): + headers['Content-Length'] = str(len(data)) + if self.use_proxy: + path = self.prefix_proxy_to_path(path, host) + if self.proxy_user and self.proxy_pass and not self.is_secure: + # If is_secure, we don't have to set the proxy authentication + # header here, we did that in the CONNECT to the proxy. + headers.update(self.get_proxy_auth_header()) + request_string = auth_path or path + self.add_aws_auth_header(headers, method, request_string) + return self._mexe(method, path, data, headers, host, sender) + + def add_aws_auth_header(self, headers, method, path): + path = self.get_path(path) + if not headers.has_key('Date'): + headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", + time.gmtime()) + + c_string = boto.utils.canonical_string(method, path, headers) + boto.log.debug('Canonical: %s' % c_string) + hmac = self.hmac.copy() + hmac.update(c_string) + b64_hmac = base64.encodestring(hmac.digest()).strip() + headers['Authorization'] = "AWS %s:%s" % (self.aws_access_key_id, b64_hmac) + + def close(self): + """(Optional) Close any open HTTP connections. This is non-destructive, + and making a new request will open a connection again.""" + + boto.log.debug('closing all HTTP connections') + self.connection = None # compat field + + +class AWSQueryConnection(AWSAuthConnection): + + APIVersion = '' + SignatureVersion = '1' + ResponseError = BotoServerError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=None, debug=0, + https_connection_factory=None, path='/'): + AWSAuthConnection.__init__(self, host, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + debug, https_connection_factory, path) + + def get_utf8_value(self, value): + if not isinstance(value, str) and not isinstance(value, unicode): + value = str(value) + if isinstance(value, unicode): + return value.encode('utf-8') + else: + return value + + def calc_signature_0(self, params): + boto.log.debug('using calc_signature_0') + hmac = self.hmac.copy() + s = params['Action'] + params['Timestamp'] + hmac.update(s) + keys = params.keys() + keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + val = self.get_utf8_value(params[key]) + pairs.append(key + '=' + urllib.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + def calc_signature_1(self, params): + boto.log.debug('using calc_signature_1') + hmac = self.hmac.copy() + keys = params.keys() + keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower())) + pairs = [] + for key in keys: + hmac.update(key) + val = self.get_utf8_value(params[key]) + hmac.update(val) + pairs.append(key + '=' + urllib.quote(val)) + qs = '&'.join(pairs) + return (qs, base64.b64encode(hmac.digest())) + + def calc_signature_2(self, params, verb, path): + boto.log.debug('using calc_signature_2') + string_to_sign = '%s\n%s\n%s\n' % (verb, self.server_name().lower(), path) + if self.hmac_256: + hmac = self.hmac_256.copy() + params['SignatureMethod'] = 'HmacSHA256' + else: + hmac = self.hmac.copy() + params['SignatureMethod'] = 'HmacSHA1' + keys = params.keys() + keys.sort() + pairs = [] + for key in keys: + val = self.get_utf8_value(params[key]) + pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~')) + qs = '&'.join(pairs) + boto.log.debug('query string: %s' % qs) + string_to_sign += qs + boto.log.debug('string_to_sign: %s' % string_to_sign) + hmac.update(string_to_sign) + b64 = base64.b64encode(hmac.digest()) + boto.log.debug('len(b64)=%d' % len(b64)) + boto.log.debug('base64 encoded digest: %s' % b64) + return (qs, b64) + + def get_signature(self, params, verb, path): + if self.SignatureVersion == '0': + t = self.calc_signature_0(params) + elif self.SignatureVersion == '1': + t = self.calc_signature_1(params) + elif self.SignatureVersion == '2': + t = self.calc_signature_2(params, verb, path) + else: + raise BotoClientError('Unknown Signature Version: %s' % self.SignatureVersion) + return t + + def make_request(self, action, params=None, path='/', verb='GET'): + headers = {} + if params == None: + params = {} + params['Action'] = action + params['Version'] = self.APIVersion + params['AWSAccessKeyId'] = self.aws_access_key_id + params['SignatureVersion'] = self.SignatureVersion + params['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()) + qs, signature = self.get_signature(params, verb, self.get_path(path)) + if verb == 'POST': + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + request_body = qs + '&Signature=' + urllib.quote(signature) + qs = path + else: + request_body = '' + qs = path + '?' + qs + '&Signature=' + urllib.quote(signature) + return AWSAuthConnection.make_request(self, verb, qs, + data=request_body, + headers=headers) + + def build_list_params(self, params, items, label): + if isinstance(items, str): + items = [items] + for i in range(1, len(items)+1): + params['%s.%d' % (label, i)] = items[i-1] + + # generics + + def get_list(self, action, params, markers, path='/', parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet(markers) + h = handler.XmlHandler(rs, parent) + xml.sax.parseString(body, h) + return rs + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_object(self, action, params, cls, path='/', parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + obj = cls(parent) + h = handler.XmlHandler(obj, parent) + xml.sax.parseString(body, h) + return obj + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_status(self, action, params, path='/', parent=None, verb='GET'): + if not parent: + parent = self + response = self.make_request(action, params, path, verb) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet() + h = handler.XmlHandler(rs, parent) + xml.sax.parseString(body, h) + return rs.status + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + diff --git a/vendor/boto/boto/contrib/__init__.py b/vendor/boto/boto/contrib/__init__.py new file mode 100644 index 000000000000..303dbb66c9ab --- /dev/null +++ b/vendor/boto/boto/contrib/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/vendor/boto/boto/contrib/m2helpers.py b/vendor/boto/boto/contrib/m2helpers.py new file mode 100644 index 000000000000..82d2730515ee --- /dev/null +++ b/vendor/boto/boto/contrib/m2helpers.py @@ -0,0 +1,52 @@ +# Copyright (c) 2006,2007 Jon Colverson +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module was contributed by Jon Colverson. It provides a couple of helper +functions that allow you to use M2Crypto's implementation of HTTPSConnection +rather than the default version in httplib.py. The main benefit is that +M2Crypto's version verifies the certificate of the server. + +To use this feature, do something like this: + +from boto.ec2.connection import EC2Connection + +ec2 = EC2Connection(ACCESS_KEY_ID, SECRET_ACCESS_KEY, + https_connection_factory=https_connection_factory(cafile=CA_FILE)) + +See http://code.google.com/p/boto/issues/detail?id=57 for more details. +""" +from M2Crypto import SSL +from M2Crypto.httpslib import HTTPSConnection + +def secure_context(cafile=None, capath=None): + ctx = SSL.Context() + ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth=9) + if ctx.load_verify_locations(cafile=cafile, capath=capath) != 1: + raise Exception("Couldn't load certificates") + return ctx + +def https_connection_factory(cafile=None, capath=None): + def factory(*args, **kwargs): + return HTTPSConnection( + ssl_context=secure_context(cafile=cafile, capath=capath), + *args, **kwargs) + return (factory, (SSL.SSLError,)) diff --git a/vendor/boto/boto/contrib/ymlmessage.py b/vendor/boto/boto/contrib/ymlmessage.py new file mode 100644 index 000000000000..b9a2c932622e --- /dev/null +++ b/vendor/boto/boto/contrib/ymlmessage.py @@ -0,0 +1,52 @@ +# Copyright (c) 2006,2007 Chris Moyer +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module was contributed by Chris Moyer. It provides a subclass of the +SQS Message class that supports YAML as the body of the message. + +This module requires the yaml module. +""" +from boto.sqs.message import Message +import yaml + +class YAMLMessage(Message): + """ + The YAMLMessage class provides a YAML compatible message. Encoding and + decoding are handled automaticaly. + + Access this message data like such: + + m.data = [ 1, 2, 3] + m.data[0] # Returns 1 + + This depends on the PyYAML package + """ + + def __init__(self, queue=None, body='', xml_attrs=None): + self.data = None + Message.__init__(self, queue, body) + + def set_body(self, body): + self.data = yaml.load(body) + + def get_body(self): + return yaml.dump(self.data) diff --git a/vendor/boto/boto/ec2/__init__.py b/vendor/boto/boto/ec2/__init__.py new file mode 100644 index 000000000000..8bb3f537909e --- /dev/null +++ b/vendor/boto/boto/ec2/__init__.py @@ -0,0 +1,52 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +service from AWS. +""" +from boto.ec2.connection import EC2Connection + +def regions(**kw_params): + """ + Get all available regions for the EC2 service. + You may pass any of the arguments accepted by the EC2Connection + object's constructor as keyword arguments and they will be + passed along to the EC2Connection object. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + c = EC2Connection(**kw_params) + return c.get_all_regions() + +def connect_to_region(region_name, **kw_params): + for region in regions(**kw_params): + if region.name == region_name: + return region.connect(**kw_params) + return None + +def get_region(region_name, **kw_params): + for region in regions(**kw_params): + if region.name == region_name: + return region + return None + diff --git a/vendor/boto/boto/ec2/address.py b/vendor/boto/boto/ec2/address.py new file mode 100644 index 000000000000..60ed40675f11 --- /dev/null +++ b/vendor/boto/boto/ec2/address.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic IP Address +""" + +from boto.ec2.ec2object import EC2Object + +class Address(EC2Object): + + def __init__(self, connection=None, public_ip=None, instance_id=None): + EC2Object.__init__(self, connection) + self.connection = connection + self.public_ip = public_ip + self.instance_id = instance_id + + def __repr__(self): + return 'Address:%s' % self.public_ip + + def endElement(self, name, value, connection): + if name == 'publicIp': + self.public_ip = value + elif name == 'instanceId': + self.instance_id = value + else: + setattr(self, name, value) + + def release(self): + return self.connection.release_address(self.public_ip) + + delete = release + + def associate(self, instance_id): + return self.connection.associate_address(instance_id, self.public_ip) + + def disassociate(self): + return self.connection.disassociate_address(self.public_ip) + + diff --git a/vendor/boto/boto/ec2/autoscale/__init__.py b/vendor/boto/boto/ec2/autoscale/__init__.py new file mode 100644 index 000000000000..a06781f1d835 --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/__init__.py @@ -0,0 +1,203 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +Auto Scaling service. +""" + +import boto +from boto.connection import AWSQueryConnection +from boto.ec2.autoscale.request import Request +from boto.ec2.autoscale.trigger import Trigger +from boto.ec2.autoscale.launchconfig import LaunchConfiguration +from boto.ec2.autoscale.group import AutoScalingGroup +from boto.ec2.autoscale.activity import Activity + + +class AutoScaleConnection(AWSQueryConnection): + APIVersion = boto.config.get('Boto', 'autoscale_version', '2009-05-15') + Endpoint = boto.config.get('Boto', 'autoscale_endpoint', + 'autoscaling.amazonaws.com') + SignatureVersion = '2' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=Endpoint, debug=1, + https_connection_factory=None, region=None, path='/'): + """ + Init method to create a new connection to the AutoScaling service. + + B{Note:} The host argument is overridden by the host specified in the + boto configuration file. + """ + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, host, debug, + https_connection_factory, path=path) + + def build_list_params(self, params, items, label): + """ items is a list of dictionaries or strings: + [{'Protocol' : 'HTTP', + 'LoadBalancerPort' : '80', + 'InstancePort' : '80'},..] etc. + or + ['us-east-1b',...] + """ + # different from EC2 list params + for i in xrange(1, len(items)+1): + if isinstance(items[i-1], dict): + for k, v in items[i-1].iteritems(): + params['%s.member.%d.%s' % (label, i, k)] = v + elif isinstance(items[i-1], basestring): + params['%s.member.%d' % (label, i)] = items[i-1] + + def _update_group(self, op, as_group): + params = { + 'AutoScalingGroupName' : as_group.name, + 'Cooldown' : as_group.cooldown, + 'LaunchConfigurationName' : as_group.launch_config_name, + 'MinSize' : as_group.min_size, + 'MaxSize' : as_group.max_size, + } + if op.startswith('Create'): + if as_group.availability_zones: + zones = self.availability_zones + else: + zones = [as_group.availability_zone] + self.build_list_params(params, as_group.load_balancers, + 'LoadBalancerNames') + self.build_list_params(params, zones, + 'AvailabilityZones') + return self.get_object(op, params, Request) + + def create_auto_scaling_group(self, as_group): + """ + Create auto scaling group. + """ + return self._update_group('CreateAutoScalingGroup', as_group) + + def create_launch_configuration(self, launch_config): + """ + Creates a new Launch Configuration. + + :type launch_config: boto.ec2.autoscale.launchconfig.LaunchConfiguration + :param launch_config: LaunchConfiguraiton object. + + """ + params = { + 'ImageId' : launch_config.image_id, + 'KeyName' : launch_config.key_name, + 'LaunchConfigurationName' : launch_config.name, + 'InstanceType' : launch_config.instance_type, + } + if launch_config.user_data: + params['UserData'] = launch_config.user_data + if launch_config.kernel_id: + params['KernelId'] = launch_config.kernel_id + if launch_config.ramdisk_id: + params['RamdiskId'] = launch_config.ramdisk_id + if launch_config.block_device_mappings: + self.build_list_params(params, launch_config.block_device_mappings, + 'BlockDeviceMappings') + self.build_list_params(params, launch_config.security_groups, + 'SecurityGroups') + return self.get_object('CreateLaunchConfiguration', params, + Request) + + def create_trigger(self, trigger): + """ + + """ + params = {'TriggerName' : trigger.name, + 'AutoScalingGroupName' : trigger.autoscale_group.name, + 'MeasureName' : trigger.measure_name, + 'Statistic' : trigger.statistic, + 'Period' : trigger.period, + 'Unit' : trigger.unit, + 'LowerThreshold' : trigger.lower_threshold, + 'LowerBreachScaleIncrement' : trigger.lower_breach_scale_increment, + 'UpperThreshold' : trigger.upper_threshold, + 'UpperBreachScaleIncrement' : trigger.upper_breach_scale_increment, + 'BreachDuration' : trigger.breach_duration} + # dimensions should be a list of tuples + dimensions = [] + for dim in trigger.dimensions: + name, value = dim + dimensions.append(dict(Name=name, Value=value)) + self.build_list_params(params, dimensions, 'Dimensions') + + req = self.get_object('CreateOrUpdateScalingTrigger', params, + Request) + return req + + def get_all_groups(self, names=None): + """ + """ + params = {} + if names: + self.build_list_params(params, names, 'AutoScalingGroupNames') + return self.get_list('DescribeAutoScalingGroups', params, + [('member', AutoScalingGroup)]) + + def get_all_launch_configurations(self, names=None): + """ + """ + params = {} + if names: + self.build_list_params(params, names, 'LaunchConfigurationNames') + return self.get_list('DescribeLaunchConfigurations', params, + [('member', LaunchConfiguration)]) + + def get_all_activities(self, autoscale_group, + activity_ids=None, + max_records=100): + """ + Get all activities for the given autoscaling group. + + :type autoscale_group: str or AutoScalingGroup object + :param autoscale_group: The auto scaling group to get activities on. + + @max_records: int + :param max_records: Maximum amount of activities to return. + """ + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + params = {'AutoScalingGroupName' : name} + if activity_ids: + self.build_list_params(params, activity_ids, 'ActivityIds') + return self.get_list('DescribeScalingActivities', params, + [('member', Activity)]) + + def get_all_triggers(self, autoscale_group): + params = {'AutoScalingGroupName' : autoscale_group} + return self.get_list('DescribeTriggers', params, + [('member', Trigger)]) + + def terminate_instance(self, instance_id, decrement_capacity=True): + params = { + 'InstanceId' : instance_id, + 'ShouldDecrementDesiredCapacity' : decrement_capacity + } + return self.get_object('TerminateInstanceInAutoScalingGroup', params, + Activity) + diff --git a/vendor/boto/boto/ec2/autoscale/activity.py b/vendor/boto/boto/ec2/autoscale/activity.py new file mode 100644 index 000000000000..f895d65e9abe --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/activity.py @@ -0,0 +1,55 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Activity(object): + def __init__(self, connection=None): + self.connection = connection + self.start_time = None + self.activity_id = None + self.progress = None + self.status_code = None + self.cause = None + self.description = None + + def __repr__(self): + return 'Activity:%s status:%s progress:%s' % (self.description, + self.status_code, + self.progress) + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ActivityId': + self.activity_id = value + elif name == 'StartTime': + self.start_time = value + elif name == 'Progress': + self.progress = value + elif name == 'Cause': + self.cause = value + elif name == 'Description': + self.description = value + elif name == 'StatusCode': + self.status_code = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/ec2/autoscale/group.py b/vendor/boto/boto/ec2/autoscale/group.py new file mode 100644 index 000000000000..3fa6d68f53ce --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/group.py @@ -0,0 +1,189 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import weakref + +from boto.ec2.elb.listelement import ListElement +from boto.resultset import ResultSet +from boto.ec2.autoscale.trigger import Trigger +from boto.ec2.autoscale.request import Request + +class Instance(object): + def __init__(self, connection=None): + self.connection = connection + self.instance_id = '' + + def __repr__(self): + return 'Instance:%s' % self.instance_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'InstanceId': + self.instance_id = value + else: + setattr(self, name, value) + + +class AutoScalingGroup(object): + def __init__(self, connection=None, group_name=None, + availability_zone=None, launch_config=None, + availability_zones=None, + load_balancers=None, cooldown=0, + min_size=None, max_size=None): + """ + Creates a new AutoScalingGroup with the specified name. + + You must not have already used up your entire quota of + AutoScalingGroups in order for this call to be successful. Once the + creation request is completed, the AutoScalingGroup is ready to be + used in other calls. + + :type name: str + :param name: Name of autoscaling group. + + :type availability_zone: str + :param availability_zone: An availability zone. DEPRECATED - use the + availability_zones parameter, which expects + a list of availability zone + strings + + :type availability_zone: list + :param availability_zone: List of availability zones. + + :type launch_config: str + :param launch_config: Name of launch configuration name. + + :type load_balancers: list + :param load_balancers: List of load balancers. + + :type minsize: int + :param minsize: Minimum size of group + + :type maxsize: int + :param maxsize: Maximum size of group + + :type cooldown: int + :param cooldown: Amount of time after a Scaling Activity completes + before any further scaling activities can start. + + :rtype: tuple + :return: Updated healthcheck for the instances. + """ + self.name = group_name + self.connection = connection + self.min_size = min_size + self.max_size = max_size + self.created_time = None + self.cooldown = cooldown + self.launch_config = launch_config + if self.launch_config: + self.launch_config_name = self.launch_config.name + else: + self.launch_config_name = None + self.desired_capacity = None + lbs = load_balancers or [] + self.load_balancers = ListElement(lbs) + zones = availability_zones or [] + self.availability_zone = availability_zone + self.availability_zones = ListElement(zones) + self.instances = None + + def __repr__(self): + return 'AutoScalingGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', Instance)]) + return self.instances + elif name == 'LoadBalancerNames': + return self.load_balancers + elif name == 'AvailabilityZones': + return self.availability_zones + else: + return + + def endElement(self, name, value, connection): + if name == 'MinSize': + self.min_size = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'Cooldown': + self.cooldown = value + elif name == 'LaunchConfigurationName': + self.launch_config_name = value + elif name == 'DesiredCapacity': + self.desired_capacity = value + elif name == 'MaxSize': + self.max_size = value + elif name == 'AutoScalingGroupName': + self.name = value + else: + setattr(self, name, value) + + def set_capacity(self, capacity): + """ Set the desired capacity for the group. """ + params = { + 'AutoScalingGroupName' : self.name, + 'DesiredCapacity' : capacity, + } + req = self.connection.get_object('SetDesiredCapacity', params, + Request) + self.connection.last_request = req + return req + + def update(self): + """ Sync local changes with AutoScaling group. """ + return self.connection._update_group('UpdateAutoScalingGroup', self) + + def shutdown_instances(self): + """ Convenience method which shuts down all instances associated with + this group. + """ + self.min_size = 0 + self.max_size = 0 + self.update() + + def get_all_triggers(self): + """ Get all triggers for this auto scaling group. """ + params = {'AutoScalingGroupName' : self.name} + triggers = self.connection.get_list('DescribeTriggers', params, + [('member', Trigger)]) + + # allow triggers to be able to access the autoscale group + for tr in triggers: + tr.autoscale_group = weakref.proxy(self) + + return triggers + + def delete(self): + """ Delete this auto-scaling group. """ + params = {'AutoScalingGroupName' : self.name} + return self.connection.get_object('DeleteAutoScalingGroup', params, + Request) + + def get_activities(self, activity_ids=None, max_records=100): + """ + Get all activies for this group. + """ + return self.connection.get_all_activities(self, activity_ids, max_records) + diff --git a/vendor/boto/boto/ec2/autoscale/instance.py b/vendor/boto/boto/ec2/autoscale/instance.py new file mode 100644 index 000000000000..2e9ae465e4c0 --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/instance.py @@ -0,0 +1,53 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Instance(object): + def __init__(self, connection=None): + self.connection = connection + self.instance_id = '' + self.lifecycle_state = None + self.availability_zone = '' + + def __repr__(self): + return 'Instance:%s' % self.instance_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'InstanceId': + self.instance_id = value + elif name == 'LifecycleState': + self.lifecycle_state = value + elif name == 'AvailabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) + + # BUG: self.get_object is not defined + # BUG: Request is not defined + # def terminate(self): + # """ Terminate this instance. """ + # params = {'LaunchConfigurationName' : self.instance_id} + # return self.get_object('DeleteLaunchConfiguration', params, + # Request) + diff --git a/vendor/boto/boto/ec2/autoscale/launchconfig.py b/vendor/boto/boto/ec2/autoscale/launchconfig.py new file mode 100644 index 000000000000..7587cb64f566 --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/launchconfig.py @@ -0,0 +1,98 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +from boto.ec2.autoscale.request import Request +from boto.ec2.elb.listelement import ListElement + + +class LaunchConfiguration(object): + def __init__(self, connection=None, name=None, image_id=None, + key_name=None, security_groups=None, user_data=None, + instance_type='m1.small', kernel_id=None, + ramdisk_id=None, block_device_mappings=None): + """ + A launch configuration. + + :type name: str + :param name: Name of the launch configuration to create. + + :type image_id: str + :param image_id: Unique ID of the Amazon Machine Image (AMI) which was + assigned during registration. + + :type key_name: str + :param key_name: The name of the EC2 key pair. + + :type security_groups: list + :param security_groups: Names of the security groups with which to + associate the EC2 instances. + + """ + self.connection = connection + self.name = name + self.instance_type = instance_type + self.block_device_mappings = block_device_mappings + self.key_name = key_name + sec_groups = security_groups or [] + self.security_groups = ListElement(sec_groups) + self.image_id = image_id + self.ramdisk_id = ramdisk_id + self.created_time = None + self.kernel_id = kernel_id + self.user_data = user_data + self.created_time = None + + def __repr__(self): + return 'LaunchConfiguration:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'SecurityGroups': + return self.security_groups + else: + return + + def endElement(self, name, value, connection): + if name == 'InstanceType': + self.instance_type = value + elif name == 'LaunchConfigurationName': + self.name = value + elif name == 'KeyName': + self.key_name = value + elif name == 'ImageId': + self.image_id = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'KernelId': + self.kernel_id = value + elif name == 'RamdiskId': + self.ramdisk_id = value + elif name == 'UserData': + self.user_data = value + else: + setattr(self, name, value) + + def delete(self): + """ Delete this launch configuration. """ + params = {'LaunchConfigurationName' : self.name} + return self.connection.get_object('DeleteLaunchConfiguration', params, + Request) + diff --git a/vendor/boto/boto/ec2/autoscale/request.py b/vendor/boto/boto/ec2/autoscale/request.py new file mode 100644 index 000000000000..c066dff5bec6 --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/request.py @@ -0,0 +1,38 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Request(object): + def __init__(self, connection=None): + self.connection = connection + self.request_id = '' + + def __repr__(self): + return 'Request:%s' % self.request_id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'RequestId': + self.request_id = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/ec2/autoscale/trigger.py b/vendor/boto/boto/ec2/autoscale/trigger.py new file mode 100644 index 000000000000..197803d1e3a0 --- /dev/null +++ b/vendor/boto/boto/ec2/autoscale/trigger.py @@ -0,0 +1,137 @@ +# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import weakref + +from boto.ec2.autoscale.request import Request + + +class Trigger(object): + """ + An auto scaling trigger. + """ + + def __init__(self, connection=None, name=None, autoscale_group=None, + dimensions=None, measure_name=None, + statistic=None, unit=None, period=60, + lower_threshold=None, + lower_breach_scale_increment=None, + upper_threshold=None, + upper_breach_scale_increment=None, + breach_duration=None): + """ + Initialize an auto-scaling trigger object. + + :type name: str + :param name: The name for this trigger + + :type autoscale_group: str + :param autoscale_group: The name of the AutoScalingGroup that will be + associated with the trigger. The AutoScalingGroup + that will be affected by the trigger when it is + activated. + + :type dimensions: list + :param dimensions: List of tuples, i.e. + ('ImageId', 'i-13lasde') etc. + + :type measure_name: str + :param measure_name: The measure name associated with the metric used by + the trigger to determine when to activate, for + example, CPU, network I/O, or disk I/O. + + :type statistic: str + :param statistic: The particular statistic used by the trigger when + fetching metric statistics to examine. + + :type period: int + :param period: The period associated with the metric statistics in + seconds. Valid Values: 60 or a multiple of 60. + + :type unit: + :param unit + + :type lower_threshold: + :param lower_threshold + """ + self.name = name + self.connection = connection + self.dimensions = dimensions + self.breach_duration = breach_duration + self.upper_breach_scale_increment = upper_breach_scale_increment + self.created_time = None + self.upper_threshold = upper_threshold + self.status = None + self.lower_threshold = lower_threshold + self.period = period + self.lower_breach_scale_increment = lower_breach_scale_increment + self.statistic = statistic + self.unit = unit + self.namespace = None + if autoscale_group: + self.autoscale_group = weakref.proxy(autoscale_group) + else: + self.autoscale_group = None + self.measure_name = measure_name + + def __repr__(self): + return 'Trigger:%s' % (self.name) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'BreachDuration': + self.breach_duration = value + elif name == 'TriggerName': + self.name = value + elif name == 'Period': + self.period = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'Statistic': + self.statistic = value + elif name == 'Unit': + self.unit = value + elif name == 'Namespace': + self.namespace = value + elif name == 'AutoScalingGroupName': + self.autoscale_group_name = value + elif name == 'MeasureName': + self.measure_name = value + else: + setattr(self, name, value) + + def update(self): + """ Write out differences to trigger. """ + self.connection.create_trigger(self) + + def delete(self): + """ Delete this trigger. """ + params = { + 'TriggerName' : self.name, + 'AutoScalingGroupName' : self.autoscale_group_name, + } + req =self.connection.get_object('DeleteTrigger', params, + Request) + self.connection.last_request = req + return req + diff --git a/vendor/boto/boto/ec2/blockdevicemapping.py b/vendor/boto/boto/ec2/blockdevicemapping.py new file mode 100644 index 000000000000..f315fe98378d --- /dev/null +++ b/vendor/boto/boto/ec2/blockdevicemapping.py @@ -0,0 +1,98 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class BlockDeviceType(object): + + def __init__(self, connection=None): + self.connection = connection + self.ephemeral_name = None + self.volume_id = None + self.snapshot_id = None + self.status = None + self.attach_time = None + self.delete_on_termination = False + self.size = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name =='volumeId': + self.volume_id = value + elif name == 'virtualName': + self.ephemeral_name = value + elif name =='snapshotId': + self.snapshot_id = value + elif name == 'volumeSize': + self.size = int(value) + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'deleteOnTermination': + if value == 'true': + self.delete_on_termination = True + else: + self.delete_on_termination = False + else: + setattr(self, name, value) + +# for backwards compatibility +EBSBlockDeviceType = BlockDeviceType + +class BlockDeviceMapping(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self.current_name = None + self.current_value = None + + def startElement(self, name, attrs, connection): + if name == 'ebs': + self.current_value = BlockDeviceType(self) + return self.current_value + + def endElement(self, name, value, connection): + if name == 'device' or name == 'deviceName': + self.current_name = value + elif name == 'item': + self[self.current_name] = self.current_value + + def build_list_params(self, params, prefix=''): + i = 1 + for dev_name in self: + pre = '%sBlockDeviceMapping.%d' % (prefix, i) + params['%s.DeviceName' % pre] = dev_name + block_dev = self[dev_name] + if block_dev.ephemeral_name: + params['%s.VirtualName' % pre] = block_dev.ephemeral_name + else: + if block_dev.snapshot_id: + params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id + if block_dev.size: + params['%s.Ebs.VolumeSize' % pre] = block_dev.size + if block_dev.delete_on_termination: + params['%s.Ebs.DeleteOnTermination' % pre] = 'true' + else: + params['%s.Ebs.DeleteOnTermination' % pre] = 'false' + i += 1 diff --git a/vendor/boto/boto/ec2/bundleinstance.py b/vendor/boto/boto/ec2/bundleinstance.py new file mode 100644 index 000000000000..96519921e44d --- /dev/null +++ b/vendor/boto/boto/ec2/bundleinstance.py @@ -0,0 +1,78 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Bundle Task +""" + +from boto.ec2.ec2object import EC2Object + +class BundleInstanceTask(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.instance_id = None + self.progress = None + self.start_time = None + self.state = None + self.bucket = None + self.prefix = None + self.upload_policy = None + self.upload_policy_signature = None + self.update_time = None + self.code = None + self.message = None + + def __repr__(self): + return 'BundleInstanceTask:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'bundleId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'progress': + self.progress = value + elif name == 'startTime': + self.start_time = value + elif name == 'state': + self.state = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'uploadPolicy': + self.upload_policy = value + elif name == 'uploadPolicySignature': + self.upload_policy_signature = value + elif name == 'updateTime': + self.update_time = value + elif name == 'code': + self.code = value + elif name == 'message': + self.message = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/ec2/buyreservation.py b/vendor/boto/boto/ec2/buyreservation.py new file mode 100644 index 000000000000..45b21a142421 --- /dev/null +++ b/vendor/boto/boto/ec2/buyreservation.py @@ -0,0 +1,81 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto.ec2 +from boto.sdb.db.property import StringProperty, IntegerProperty +from boto.manage import propget + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge'] + +class BuyReservation(object): + + def get_region(self, params): + if not params.get('region', None): + prop = StringProperty(name='region', verbose_name='EC2 Region', + choices=boto.ec2.regions) + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get(self, params): + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + +if __name__ == "__main__": + obj = BuyReservation() + params = {} + obj.get(params) + offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'], + availability_zone=params['zone'].name) + print '\nThe following Reserved Instances Offerings are available:\n' + for offering in offerings: + offering.describe() + prop = StringProperty(name='offering', verbose_name='Offering', + choices=offerings) + offering = propget.get(prop) + print '\nYou have chosen this offering:' + offering.describe() + unit_price = float(offering.fixed_price) + total_price = unit_price * params['quantity'] + print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price) + answer = raw_input('Are you sure you want to do this? If so, enter YES: ') + if answer.strip().lower() == 'yes': + offering.purchase(params['quantity']) + else: + print 'Purchase cancelled' diff --git a/vendor/boto/boto/ec2/cloudwatch/__init__.py b/vendor/boto/boto/ec2/cloudwatch/__init__.py new file mode 100644 index 000000000000..1cb8719e72c0 --- /dev/null +++ b/vendor/boto/boto/ec2/cloudwatch/__init__.py @@ -0,0 +1,213 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +CloudWatch service from AWS. + +The 5 Minute How-To Guide +------------------------- +First, make sure you have something to monitor. You can either create a +LoadBalancer or enable monitoring on an existing EC2 instance. To enable +monitoring, you can either call the monitor_instance method on the +EC2Connection object or call the monitor method on the Instance object. + +It takes a while for the monitoring data to start accumulating but once +it does, you can do this: + +>>> import boto +>>> c = boto.connect_cloudwatch() +>>> metrics = c.list_metrics() +>>> metrics +[Metric:NetworkIn, + Metric:NetworkOut, + Metric:NetworkOut(InstanceType,m1.small), + Metric:NetworkIn(InstanceId,i-e573e68c), + Metric:CPUUtilization(InstanceId,i-e573e68c), + Metric:DiskWriteBytes(InstanceType,m1.small), + Metric:DiskWriteBytes(ImageId,ami-a1ffb63), + Metric:NetworkOut(ImageId,ami-a1ffb63), + Metric:DiskWriteOps(InstanceType,m1.small), + Metric:DiskReadBytes(InstanceType,m1.small), + Metric:DiskReadOps(ImageId,ami-a1ffb63), + Metric:CPUUtilization(InstanceType,m1.small), + Metric:NetworkIn(ImageId,ami-a1ffb63), + Metric:DiskReadOps(InstanceType,m1.small), + Metric:DiskReadBytes, + Metric:CPUUtilization, + Metric:DiskWriteBytes(InstanceId,i-e573e68c), + Metric:DiskWriteOps(InstanceId,i-e573e68c), + Metric:DiskWriteOps, + Metric:DiskReadOps, + Metric:CPUUtilization(ImageId,ami-a1ffb63), + Metric:DiskReadOps(InstanceId,i-e573e68c), + Metric:NetworkOut(InstanceId,i-e573e68c), + Metric:DiskReadBytes(ImageId,ami-a1ffb63), + Metric:DiskReadBytes(InstanceId,i-e573e68c), + Metric:DiskWriteBytes, + Metric:NetworkIn(InstanceType,m1.small), + Metric:DiskWriteOps(ImageId,ami-a1ffb63)] + +The list_metrics call will return a list of all of the available metrics +that you can query against. Each entry in the list is a Metric object. +As you can see from the list above, some of the metrics are generic metrics +and some have Dimensions associated with them (e.g. InstanceType=m1.small). +The Dimension can be used to refine your query. So, for example, I could +query the metric Metric:CPUUtilization which would create the desired statistic +by aggregating cpu utilization data across all sources of information available +or I could refine that by querying the metric +Metric:CPUUtilization(InstanceId,i-e573e68c) which would use only the data +associated with the instance identified by the instance ID i-e573e68c. + +Because for this example, I'm only monitoring a single instance, the set +of metrics available to me are fairly limited. If I was monitoring many +instances, using many different instance types and AMI's and also several +load balancers, the list of available metrics would grow considerably. + +Once you have the list of available metrics, you can actually +query the CloudWatch system for that metric. Let's choose the CPU utilization +metric for our instance. + +>>> m = metrics[5] +>>> m +Metric:CPUUtilization(InstanceId,i-e573e68c) + +The Metric object has a query method that lets us actually perform +the query against the collected data in CloudWatch. To call that, +we need a start time and end time to control the time span of data +that we are interested in. For this example, let's say we want the +data for the previous hour: + +>>> import datetime +>>> end = datetime.datetime.now() +>>> start = end - datetime.timedelta(hours=1) + +We also need to supply the Statistic that we want reported and +the Units to use for the results. The Statistic can be one of these +values: + +['Minimum', 'Maximum', 'Sum', 'Average', 'Samples'] + +And Units must be one of the following: + +['Seconds', 'Percent', 'Bytes', 'Bits', 'Count', +'Bytes/Second', 'Bits/Second', 'Count/Second'] + +The query method also takes an optional parameter, period. This +parameter controls the granularity (in seconds) of the data returned. +The smallest period is 60 seconds and the value must be a multiple +of 60 seconds. So, let's ask for the average as a percent: + +>>> datapoints = m.query(start, end, 'Average', 'Percent') +>>> len(datapoints) +60 + +Our period was 60 seconds and our duration was one hour so +we should get 60 data points back and we can see that we did. +Each element in the datapoints list is a DataPoint object +which is a simple subclass of a Python dict object. Each +Datapoint object contains all of the information available +about that particular data point. + +>>> d = datapoints[0] +>>> d +{u'Average': 0.0, + u'Samples': 1.0, + u'Timestamp': u'2009-05-21T19:55:00Z', + u'Unit': u'Percent'} + +My server obviously isn't very busy right now! +""" +from boto.connection import AWSQueryConnection +from boto.ec2.cloudwatch.metric import Metric +from boto.ec2.cloudwatch.datapoint import Datapoint +import boto + +class CloudWatchConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2009-05-15') + Endpoint = boto.config.get('Boto', 'cloudwatch_endpoint', 'monitoring.amazonaws.com') + SignatureVersion = '2' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=Endpoint, debug=0, + https_connection_factory=None, path='/'): + """ + Init method to create a new connection to EC2 Monitoring Service. + + B{Note:} The host argument is overridden by the host specified in the boto configuration file. + """ + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, path) + + def build_list_params(self, params, items, label): + if isinstance(items, str): + items = [items] + for i in range(1, len(items)+1): + params[label % i] = items[i-1] + + def get_metric_statistics(self, period, start_time, end_time, measure_name, + namespace, statistics=None, dimensions=None, unit=None): + """ + Get time-series data for one or more statistics of a given metric. + + :type measure_name: string + :param measure_name: CPUUtilization|NetworkIO-in|NetworkIO-out|DiskIO-ALL-read| + DiskIO-ALL-write|DiskIO-ALL-read-bytes|DiskIO-ALL-write-bytes + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {'Period' : period, + 'MeasureName' : measure_name, + 'Namespace' : namespace, + 'StartTime' : start_time.isoformat(), + 'EndTime' : end_time.isoformat()} + if dimensions: + i = 1 + for name in dimensions: + params['Dimensions.member.%d.Name' % i] = name + params['Dimensions.member.%d.Value' % i] = dimensions[name] + i += 1 + if statistics: + self.build_list_params(params, statistics, 'Statistics.member.%d') + return self.get_list('GetMetricStatistics', params, [('member', Datapoint)]) + + def list_metrics(self, next_token=None): + """ + Returns a list of the valid metrics for which there is recorded data available. + + :type next_token: string + :param next_token: A maximum of 500 metrics will be returned at one time. + If more results are available, the ResultSet returned + will contain a non-Null next_token attribute. Passing + that token as a parameter to list_metrics will retrieve + the next page of metrics. + """ + params = {} + if next_token: + params['NextToken'] = next_token + return self.get_list('ListMetrics', params, [('member', Metric)]) + + + diff --git a/vendor/boto/boto/ec2/cloudwatch/datapoint.py b/vendor/boto/boto/ec2/cloudwatch/datapoint.py new file mode 100644 index 000000000000..1860f4a44a30 --- /dev/null +++ b/vendor/boto/boto/ec2/cloudwatch/datapoint.py @@ -0,0 +1,37 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class Datapoint(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ['Average', 'Maximum', 'Minimum', 'Samples', 'Sum']: + self[name] = float(value) + elif name != 'member': + self[name] = value + diff --git a/vendor/boto/boto/ec2/cloudwatch/metric.py b/vendor/boto/boto/ec2/cloudwatch/metric.py new file mode 100644 index 000000000000..e4661f43ee4a --- /dev/null +++ b/vendor/boto/boto/ec2/cloudwatch/metric.py @@ -0,0 +1,71 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +class Dimensions(dict): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Name': + self._name = value + elif name == 'Value': + self[self._name] = value + elif name != 'Dimensions' and name != 'member': + self[name] = value + +class Metric(object): + + Statistics = ['Minimum', 'Maximum', 'Sum', 'Average', 'Samples'] + Units = ['Seconds', 'Percent', 'Bytes', 'Bits', 'Count', + 'Bytes/Second', 'Bits/Second', 'Count/Second'] + + def __init__(self, connection=None): + self.connection = connection + self.name = None + self.namespace = None + self.dimensions = None + + def __repr__(self): + s = 'Metric:%s' % self.name + if self.dimensions: + for name,value in self.dimensions.items(): + s += '(%s,%s)' % (name, value) + return s + + def startElement(self, name, attrs, connection): + if name == 'Dimensions': + self.dimensions = Dimensions() + return self.dimensions + + def endElement(self, name, value, connection): + if name == 'MeasureName': + self.name = value + elif name == 'Namespace': + self.namespace = value + else: + setattr(self, name, value) + + def query(self, start_time, end_time, statistic, unit, period=60): + return self.connection.get_metric_statistics(period, start_time, end_time, + self.name, self.namespace, [statistic], + self.dimensions, unit) diff --git a/vendor/boto/boto/ec2/connection.py b/vendor/boto/boto/ec2/connection.py new file mode 100644 index 000000000000..a1ddf16739ec --- /dev/null +++ b/vendor/boto/boto/ec2/connection.py @@ -0,0 +1,1605 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +import urllib +import base64 +import hmac +import boto +from hashlib import sha1 as sha +from boto.connection import AWSQueryConnection +from boto.resultset import ResultSet +from boto.ec2.image import Image, ImageAttribute +from boto.ec2.instance import Reservation, Instance, ConsoleOutput, InstanceAttribute +from boto.ec2.keypair import KeyPair +from boto.ec2.address import Address +from boto.ec2.volume import Volume +from boto.ec2.snapshot import Snapshot +from boto.ec2.snapshot import SnapshotAttribute +from boto.ec2.zone import Zone +from boto.ec2.securitygroup import SecurityGroup +from boto.ec2.regioninfo import RegionInfo +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.reservedinstance import ReservedInstancesOffering, ReservedInstance +from boto.ec2.spotinstancerequest import SpotInstanceRequest +from boto.ec2.spotpricehistory import SpotPriceHistory +from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription +from boto.ec2.bundleinstance import BundleInstanceTask +from boto.exception import EC2ResponseError + +#boto.set_stream_logger('ec2') + +class EC2Connection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'ec2_version', '2009-11-30') + DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', + 'ec2.amazonaws.com') + SignatureVersion = '2' + ResponseError = EC2ResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/'): + """ + Init method to create a new connection to EC2. + + B{Note:} The host argument is overridden by the host specified in the boto configuration file. + """ + if not region: + region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path) + + def get_params(self): + """ + Returns a dictionary containing the value of of all of the keyword + arguments passed when constructing this connection. + """ + param_names = ['aws_access_key_id', 'aws_secret_access_key', 'is_secure', + 'port', 'proxy', 'proxy_port', 'proxy_user', 'proxy_pass', + 'debug', 'https_connection_factory'] + params = {} + for name in param_names: + params[name] = getattr(self, name) + return params + + # Image methods + + def get_all_images(self, image_ids=None, owners=None, executable_by=None): + """ + Retrieve all the EC2 images available on your account. + + :type image_ids: list + :param image_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :type executable_by: + :param executable_by: + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + params = {} + if image_ids: + self.build_list_params(params, image_ids, 'ImageId') + if owners: + self.build_list_params(params, owners, 'Owner') + if executable_by: + self.build_list_params(params, executable_by, 'ExecutableBy') + return self.get_list('DescribeImages', params, [('item', Image)]) + + def get_all_kernels(self, kernel_ids=None, owners=None): + """ + Retrieve all the EC2 kernels available on your account. Simply filters the list returned + by get_all_images because EC2 does not provide a way to filter server-side. + + :type kernel_ids: list + :param kernel_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + rs = self.get_all_images(kernel_ids, owners) + kernels = [] + for image in rs: + if image.type == 'kernel': + kernels.append(image) + return kernels + + def get_all_ramdisks(self, ramdisk_ids=None, owners=None): + """ + Retrieve all the EC2 ramdisks available on your account. + Simply filters the list returned by get_all_images because + EC2 does not provide a way to filter server-side. + + :type ramdisk_ids: list + :param ramdisk_ids: A list of strings with the image IDs wanted + + :type owners: list + :param owners: A list of owner IDs + + :rtype: list + :return: A list of :class:`boto.ec2.image.Image` + """ + rs = self.get_all_images(ramdisk_ids, owners) + ramdisks = [] + for image in rs: + if image.type == 'ramdisk': + ramdisks.append(image) + return ramdisks + + def get_image(self, image_id): + """ + Shortcut method to retrieve a specific image (AMI). + + :type image_id: string + :param image_id: the ID of the Image to retrieve + + :rtype: :class:`boto.ec2.image.Image` + :return: The EC2 Image specified or None if the image is not found + """ + try: + return self.get_all_images(image_ids=[image_id])[0] + except IndexError: # None of those images available + return None + + def register_image(self, name=None, description=None, image_location=None, + architecture=None, kernel_id=None, ramdisk_id=None, + root_device_name=None, block_device_map=None): + """ + Register an image. + + :type name: string + :param name: The name of the AMI. Valid only for EBS-based images. + + :type description: string + :param description: The description of the AMI. + + :type image_location: string + :param image_location: Full path to your AMI manifest in Amazon S3 storage. + Only used for S3-based AMI's. + + :type architecture: string + :param architecture: The architecture of the AMI. Valid choices are: + i386 | x86_64 + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the instances + + :type root_device_name: string + :param root_device_name: The root device name (e.g. /dev/sdh) + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated + with the Image. + + :rtype: string + :return: The new image id + """ + params = {} + if name: + params['Name'] = name + if description: + params['Description'] = description + if architecture: + params['Architecture'] = architecture + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if image_location: + params['ImageLocation'] = image_location + if root_device_name: + params['RootDeviceName'] = root_device_name + if block_device_map: + block_device_map.build_list_params(params) + rs = self.get_object('RegisterImage', params, ResultSet) + image_id = getattr(rs, 'imageId', None) + return image_id + + def deregister_image(self, image_id): + """ + Unregister an AMI. + + :type image_id: string + :param image_id: the ID of the Image to unregister + + :rtype: bool + :return: True if successful + """ + return self.get_status('DeregisterImage', {'ImageId':image_id}) + + def create_image(self, instance_id, name, description=None, no_reboot=False): + """ + Will create an AMI from the instance in the running or stopped + state. + + :type instance_id: string + :param instance_id: the ID of the instance to image. + + :type name: string + :param name: The name of the new image + + :type description: string + :param description: An optional human-readable string describing + the contents and purpose of the AMI. + + :type no_reboot: bool + :param no_reboot: An optional flag indicating that the bundling process + should not attempt to shutdown the instance before + bundling. If this flag is True, the responsibility + of maintaining file system integrity is left to the + owner of the instance. + + :rtype: string + :return: The new image id + """ + params = {'InstanceId' : instance_id, + 'Name' : name} + if description: + params['Description'] = description + if no_reboot: + params['NoReboot'] = 'true' + rs = self.get_object('CreateImage', params, Image) + image_id = getattr(rs, 'imageId', None) + if not image_id: + image_id = getattr(rs, 'ImageId', None) + return image_id + + # ImageAttribute methods + + def get_image_attribute(self, image_id, attribute='launchPermission'): + """ + Gets an attribute from an image. + See http://docs.amazonwebservices.com/AWSEC2/2008-02-01/DeveloperGuide/ApiReference-Query-DescribeImageAttribute.html + + :type image_id: string + :param image_id: The Amazon image id for which you want info about + + :type attribute: string + :param attribute: The attribute you need information about. + Valid choices are: + * launchPermission + * productCodes + * blockDeviceMapping + + :rtype: :class:`boto.ec2.image.ImageAttribute` + :return: An ImageAttribute object representing the value of the attribute requested + """ + params = {'ImageId' : image_id, + 'Attribute' : attribute} + return self.get_object('DescribeImageAttribute', params, ImageAttribute) + + def modify_image_attribute(self, image_id, attribute='launchPermission', + operation='add', user_ids=None, groups=None, + product_codes=None): + """ + Changes an attribute of an image. + See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ApiReference-query-ModifyImageAttribute.html + + :type image_id: string + :param image_id: The image id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change + + :type operation: string + :param operation: Either add or remove (this is required for changing launchPermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes + + :type product_codes: list + :param product_codes: Amazon DevPay product code. Currently only one + product code can be associated with an AMI. Once + set, the product code cannot be changed or reset. + """ + params = {'ImageId' : image_id, + 'Attribute' : attribute, + 'OperationType' : operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + if product_codes: + self.build_list_params(params, product_codes, 'ProductCode') + return self.get_status('ModifyImageAttribute', params) + + def reset_image_attribute(self, image_id, attribute='launchPermission'): + """ + Resets an attribute of an AMI to its default value. + See http://docs.amazonwebservices.com/AWSEC2/2008-02-01/DeveloperGuide/ApiReference-Query-ResetImageAttribute.html + + :type image_id: string + :param image_id: ID of the AMI for which an attribute will be described + + :type attribute: string + :param attribute: The attribute to reset + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'ImageId' : image_id, + 'Attribute' : attribute} + return self.get_status('ResetImageAttribute', params) + + # Instance methods + + def get_all_instances(self, instance_ids=None): + """ + Retrieve all the instances associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + return self.get_list('DescribeInstances', params, [('item', Reservation)]) + + def run_instances(self, image_id, min_count=1, max_count=1, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None, + instance_initiated_shutdown_behavior=None): + """ + Runs an image on EC2. + + :type image_id: string + :param image_id: The ID of the image to run + + :type min_count: int + :param min_count: The minimum number of instances to launch + + :type max_count: int + :param max_count: The maximum number of instances to launch + + :type key_name: string + :param key_name: The name of the key pair with which to launch instances + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to associate instances + + :type user_data: string + :param user_data: The user data passed to the launched instances + + :type instance_type: string + :param instance_type: The type of instance to run (m1.small, m1.large, m1.xlarge) + + :type placement: string + :param placement: The availability zone in which to launch the instances + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the instances + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the instances + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable CloudWatch monitoring on the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances for VPC. + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated + with the Image. + + :type instance_initiated_shutdown_behavior: string + :param instance_initiated_shutdown_behavior: Specifies whether the instance's + EBS volues are stopped (i.e. detached) + or terminated (i.e. deleted) when + the instance is shutdown by the + owner. Valid values are: + stop | terminate + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines + """ + params = {'ImageId':image_id, + 'MinCount':min_count, + 'MaxCount': max_count} + if key_name: + params['KeyName'] = key_name + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'SecurityGroup') + if user_data: + params['UserData'] = base64.b64encode(user_data) + if addressing_type: + params['AddressingType'] = addressing_type + if instance_type: + params['InstanceType'] = instance_type + if placement: + params['Placement.AvailabilityZone'] = placement + if kernel_id: + params['KernelId'] = kernel_id + if ramdisk_id: + params['RamdiskId'] = ramdisk_id + if monitoring_enabled: + params['Monitoring.Enabled'] = 'true' + if subnet_id: + params['SubnetId'] = subnet_id + if block_device_map: + block_device_map.build_list_params(params) + if instance_initiated_shutdown_behavior: + val = instance_initiated_shutdown_behavior + params['InstanceInitiatedShutdownBehavior'] = val + return self.get_object('RunInstances', params, Reservation, verb='POST') + + def terminate_instances(self, instance_ids=None): + """ + Terminate the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to terminate + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + return self.get_list('TerminateInstances', params, [('item', Instance)]) + + def stop_instances(self, instance_ids=None, force=False): + """ + Stop the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to stop + + :type force: bool + :param force: Forces the instance to stop + + :rtype: list + :return: A list of the instances stopped + """ + params = {} + if force: + params['Force'] = 'true' + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + return self.get_list('StopInstances', params, [('item', Instance)]) + + def start_instances(self, instance_ids=None): + """ + Start the instances specified + + :type instance_ids: list + :param instance_ids: A list of strings of the Instance IDs to start + + :rtype: list + :return: A list of the instances started + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + return self.get_list('StartInstances', params, [('item', Instance)]) + + def get_console_output(self, instance_id): + """ + Retrieves the console output for the specified instance. + See http://docs.amazonwebservices.com/AWSEC2/2008-02-01/DeveloperGuide/ApiReference-Query-GetConsoleOutput.html + + :type instance_id: string + :param instance_id: The instance ID of a running instance on the cloud. + + :rtype: :class:`boto.ec2.instance.ConsoleOutput` + :return: The console output as a ConsoleOutput object + """ + params = {} + self.build_list_params(params, [instance_id], 'InstanceId') + return self.get_object('GetConsoleOutput', params, ConsoleOutput) + + def reboot_instances(self, instance_ids=None): + """ + Reboot the specified instances. + + :type instance_ids: list + :param instance_ids: The instances to terminate and reboot + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + return self.get_status('RebootInstances', params) + + def confirm_product_instance(self, product_code, instance_id): + params = {'ProductCode' : product_code, + 'InstanceId' : instance_id} + rs = self.get_object('ConfirmProductInstance', params, ResultSet) + return (rs.status, rs.ownerId) + + # InstanceAttribute methods + + def get_instance_attribute(self, instance_id, attribute): + """ + Gets an attribute from an instance. + + :type instance_id: string + :param instance_id: The Amazon id of the instance + + :type attribute: string + :param attribute: The attribute you need information about + Valid choices are: + instanceType|kernel|ramdisk|userData| + disableApiTermination| + instanceInitiatedShutdownBehavior| + rootDeviceName|blockDeviceMapping + + :rtype: :class:`boto.ec2.image.ImageAttribute` + :return: An ImageAttribute object representing the value of the attribute requested + """ + params = {'InstanceId' : instance_id} + if attribute: + params['Attribute'] = attribute + return self.get_object('DescribeInstanceAttribute', params, InstanceAttribute) + + def modify_instance_attribute(self, instance_id, attribute, value): + """ + Changes an attribute of an instance + + :type instance_id: string + :param instance_id: The instance id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. + AttributeName - Expected value (default) + instanceType - A valid instance type (m1.small) + kernel - Kernel ID (None) + ramdisk - Ramdisk ID (None) + userData - Base64 encoded String (None) + disableApiTermination - Boolean (true) + instanceInitiatedShutdownBehavior - stop|terminate + rootDeviceName - device name (None) + + :type value: string + :param value: The new value for the attribute + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'InstanceId' : instance_id, + 'Attribute' : attribute, + 'Value' : value} + return self.get_status('ModifyInstanceAttribute', params) + + def reset_instance_attribute(self, instance_id, attribute): + """ + Resets an attribute of an instance to its default value. + + :type instance_id: string + :param instance_id: ID of the instance + + :type attribute: string + :param attribute: The attribute to reset. Valid values are: + kernel|ramdisk + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'InstanceId' : instance_id, + 'Attribute' : attribute} + return self.get_status('ResetInstanceAttribute', params) + + # Spot Instances + + def get_all_spot_instance_requests(self, request_ids=None): + """ + Retrieve all the spot instances requests associated with your account. + + @type request_ids: list + @param request_ids: A list of strings of spot instance request IDs + + @rtype: list + @return: A list of + :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest` + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + return self.get_list('DescribeSpotInstanceRequests', params, + [('item', SpotInstanceRequest)]) + + def get_spot_price_history(self, start_time=None, end_time=None, + instance_type=None, product_description=None): + """ + Retrieve the recent history of spot instances pricing. + + @type start_time: str + @param start_time: An indication of how far back to provide price + changes for. An ISO8601 DateTime string. + + @type end_time: str + @param end_time: An indication of how far forward to provide price + changes for. An ISO8601 DateTime string. + + @type instance_type: str + @param instance_type: Filter responses to a particular instance type. + + @type product_description: str + @param product_descripton: Filter responses to a particular platform. + Valid values are currently: Linux + + @rtype: list + @return: A list tuples containing price and timestamp. + """ + params = {} + if start_time: + params['StartTime'] = start_time + if end_time: + params['EndTime'] = end_time + if instance_type: + params['InstanceType'] = instance_type + if product_description: + params['ProductDescription'] = product_description + return self.get_list('DescribeSpotPriceHistory', params, [('item', SpotPriceHistory)]) + + def request_spot_instances(self, price, image_id, count=1, type=None, + valid_from=None, valid_until=None, + launch_group=None, availability_zone_group=None, + key_name=None, security_groups=None, + user_data=None, addressing_type=None, + instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None): + """ + Request instances on the spot market at a particular price. + + :type price: str + :param price: The maximum price of your bid + + :type image_id: string + :param image_id: The ID of the image to run + + :type count: int + :param count: The of instances to requested + + :type type: str + :param type: Type of request. Can be 'one-time' or 'persistent'. + Default is one-time. + + :type valid_from: str + :param valid_from: Start date of the request. An ISO8601 time string. + + :type valid_until: str + :param valid_until: End date of the request. An ISO8601 time string. + + :type launch_group: str + :param launch_group: If supplied, all requests will be fulfilled + as a group. + + :type availability_zone_group: str + :param availability_zone_group: If supplied, all requests will be fulfilled + within a single availability zone. + + :type key_name: string + :param key_name: The name of the key pair with which to launch instances + + :type security_groups: list of strings + :param security_groups: The names of the security groups with which to associate instances + + :type user_data: string + :param user_data: The user data passed to the launched instances + + :type instance_type: string + :param instance_type: The type of instance to run (m1.small, m1.large, m1.xlarge) + + :type placement: string + :param placement: The availability zone in which to launch the instances + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the instances + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the instances + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable CloudWatch monitoring on the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances for VPC. + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated + with the Image. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines + """ + params = {'LaunchSpecification.ImageId':image_id, + 'SpotPrice' : price} + if count: + params['InstanceCount'] = count + if valid_from: + params['ValidFrom'] = valid_from + if valid_until: + params['ValidUntil'] = valid_until + if launch_group: + params['LaunchGroup'] = launch_group + if availability_zone_group: + params['AvailabilityZoneGroup'] = availability_zone_group + if key_name: + params['LaunchSpecification.KeyName'] = key_name + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, SecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, + 'LaunchSpecification.SecurityGroup') + if user_data: + params['LaunchSpecification.UserData'] = base64.b64encode(user_data) + if addressing_type: + params['LaunchSpecification.AddressingType'] = addressing_type + if instance_type: + params['LaunchSpecification.InstanceType'] = instance_type + if placement: + params['LaunchSpecification.Placement.AvailabilityZone'] = placement + if kernel_id: + params['LaunchSpecification.KernelId'] = kernel_id + if ramdisk_id: + params['LaunchSpecification.RamdiskId'] = ramdisk_id + if monitoring_enabled: + params['LaunchSpecification.Monitoring.Enabled'] = 'true' + if subnet_id: + params['LaunchSpecification.SubnetId'] = subnet_id + if block_device_map: + block_device_map.build_list_params(params, 'LaunchSpecification.') + return self.get_list('RequestSpotInstances', params, + [('item', SpotInstanceRequest)], + verb='POST') + + + def cancel_spot_instance_requests(self, request_ids): + """ + Cancel the specified Spot Instance Requests. + + :type request_ids: list + :param request_ids: A list of strings of the Request IDs to terminate + + :rtype: list + :return: A list of the instances terminated + """ + params = {} + if request_ids: + self.build_list_params(params, request_ids, 'SpotInstanceRequestId') + return self.get_list('CancelSpotInstanceRequests', params, [('item', Instance)]) + + def get_spot_datafeed_subscription(self): + """ + Return the current spot instance data feed subscription + associated with this account, if any. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + return self.get_object('DescribeSpotDatafeedSubscription', + None, SpotDatafeedSubscription) + + def create_spot_datafeed_subscription(self, bucket, prefix): + """ + Create a spot instance datafeed subscription for this account. + + :type bucket: str or unicode + :param bucket: The name of the bucket where spot instance data + will be written. The account issuing this request + must have FULL_CONTROL access to the bucket + specified in the request. + + :type prefix: str or unicode + :param prefix: An optional prefix that will be pre-pended to all + data files written to the bucket. + + :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription` + :return: The datafeed subscription object or None + """ + params = {'Bucket' : bucket} + if prefix: + params['Prefix'] = prefix + return self.get_object('CreateSpotDatafeedSubscription', + params, SpotDatafeedSubscription) + + def delete_spot_datafeed_subscription(self): + """ + Delete the current spot instance data feed subscription + associated with this account + + :rtype: bool + :return: True if successful + """ + return self.get_status('DeleteSpotDatafeedSubscription', None) + + # Zone methods + + def get_all_zones(self, zones=None): + """ + Get all Availability Zones associated with the current region. + + :type zones: list + :param zones: Optional list of zones. If this list is present, + only the Zones associated with these zone names + will be returned. + + :rtype: list of L{boto.ec2.zone.Zone} + :return: The requested Zone objects + """ + params = {} + if zones: + self.build_list_params(params, zones, 'ZoneName') + return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)]) + + # Address methods + + def get_all_addresses(self, addresses=None): + """ + Get all EIP's associated with the current credentials. + + :type addresses: list + :param addresses: Optional list of addresses. If this list is present, + only the Addresses associated with these addresses + will be returned. + + :rtype: list of L{boto.ec2.address.Address} + :return: The requested Address objects + """ + params = {} + if addresses: + self.build_list_params(params, addresses, 'PublicIp') + return self.get_list('DescribeAddresses', params, [('item', Address)]) + + def allocate_address(self): + """ + Allocate a new Elastic IP address and associate it with your account. + + :rtype: L{boto.ec2.address.Address} + :return: The newly allocated Address + """ + return self.get_object('AllocateAddress', None, Address) + + def associate_address(self, instance_id, public_ip): + """ + Associate an Elastic IP address with a currently running instance. + + :type instance_id: string + :param instance_id: The ID of the instance + + :type public_ip: string + :param public_ip: The public IP address + + :rtype: bool + :return: True if successful + """ + params = {'InstanceId' : instance_id, 'PublicIp' : public_ip} + return self.get_status('AssociateAddress', params) + + def disassociate_address(self, public_ip): + """ + Disassociate an Elastic IP address from a currently running instance. + + :type public_ip: string + :param public_ip: The public IP address + + :rtype: bool + :return: True if successful + """ + params = {'PublicIp' : public_ip} + return self.get_status('DisassociateAddress', params) + + def release_address(self, public_ip): + """ + Free up an Elastic IP address + + :type public_ip: string + :param public_ip: The public IP address + + :rtype: bool + :return: True if successful + """ + params = {'PublicIp' : public_ip} + return self.get_status('ReleaseAddress', params) + + # Volume methods + + def get_all_volumes(self, volume_ids=None): + """ + Get all Volumes associated with the current credentials. + + :type volume_ids: list + :param volume_ids: Optional list of volume ids. If this list is present, + only the volumes associated with these volume ids + will be returned. + + :rtype: list of L{boto.ec2.volume.Volume} + :return: The requested Volume objects + """ + params = {} + if volume_ids: + self.build_list_params(params, volume_ids, 'VolumeId') + return self.get_list('DescribeVolumes', params, [('item', Volume)]) + + def create_volume(self, size, zone, snapshot=None): + """ + Create a new EBS Volume. + + :type size: int + :param size: The size of the new volume, in GiB + + :type zone: string or L{boto.ec2.zone.Zone} + :param zone: The availability zone in which the Volume will be created. + + :type snapshot: string or L{boto.ec2.snapshot.Snapshot} + :param snapshot: The snapshot from which the new Volume will be created. + """ + if isinstance(zone, Zone): + zone = zone.name + params = {'AvailabilityZone' : zone} + if size: + params['Size'] = size + if snapshot: + if isinstance(snapshot, Snapshot): + snapshot = snapshot.id + params['SnapshotId'] = snapshot + return self.get_object('CreateVolume', params, Volume) + + def delete_volume(self, volume_id): + """ + Delete an EBS volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be delete. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId': volume_id} + return self.get_status('DeleteVolume', params) + + def attach_volume(self, volume_id, instance_id, device): + """ + Attach an EBS volume to an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposted (e.g. /dev/sdh) + + :rtype: bool + :return: True if successful + """ + params = {'InstanceId' : instance_id, + 'VolumeId' : volume_id, + 'Device' : device} + return self.get_status('AttachVolume', params) + + def detach_volume(self, volume_id, instance_id=None, device=None, force=False): + """ + Detach an EBS volume from an EC2 instance. + + :type volume_id: str + :param volume_id: The ID of the EBS volume to be attached. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance from which it will + be detached. + + :type device: str + :param device: The device on the instance through which the + volume is exposted (e.g. /dev/sdh) + + :type force: bool + :param force: Forces detachment if the previous detachment attempt did + not occur cleanly. This option can lead to data loss or + a corrupted file system. Use this option only as a last + resort to detach a volume from a failed instance. The + instance will not have an opportunity to flush file system + caches nor file system meta data. If you use this option, + you must perform file system check and repair procedures. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId' : volume_id} + if instance_id: + params['InstanceId'] = instance_id + if device: + params['Device'] = device + if force: + params['Force'] = 'true' + return self.get_status('DetachVolume', params) + + # Snapshot methods + + def get_all_snapshots(self, snapshot_ids=None, owner=None, restorable_by=None): + """ + Get all EBS Snapshots associated with the current credentials. + + :type snapshot_ids: list + :param snapshot_ids: Optional list of snapshot ids. If this list is present, + only the Snapshots associated with these snapshot ids + will be returned. + + :type owner: str + :param owner: If present, only the snapshots owned by the specified user + will be returned. Valid values are: + self | amazon | AWS Account ID + + :type restorable_by: str + :param restorable_by: If present, only the snapshots that are restorable + by the specified account id will be returned. + + :rtype: list of L{boto.ec2.snapshot.Snapshot} + :return: The requested Snapshot objects + """ + params = {} + if snapshot_ids: + self.build_list_params(params, snapshot_ids, 'SnapshotId') + if owner: + params['Owner'] = owner + if restorable_by: + params['RestorableBy'] = restorable_by + return self.get_list('DescribeSnapshots', params, [('item', Snapshot)]) + + def create_snapshot(self, volume_id, description=None): + """ + Create a snapshot of an existing EBS Volume. + + :type volume_id: str + :param volume_id: The ID of the volume to be snapshot'ed + + :type description: str + :param description: A description of the snapshot. Limited to 255 characters. + + :rtype: bool + :return: True if successful + """ + params = {'VolumeId' : volume_id} + if description: + params['Description'] = description[0:255] + return self.get_object('CreateSnapshot', params, Snapshot) + + def delete_snapshot(self, snapshot_id): + params = {'SnapshotId': snapshot_id} + return self.get_status('DeleteSnapshot', params) + + def get_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): + """ + Get information about an attribute of a snapshot. Only one attribute can be + specified per call. + + :type snapshot_id: str + :param snapshot_id: The ID of the snapshot. + + :type attribute: str + :param attribute: The requested attribute. Valid values are: + createVolumePermission + + :rtype: list of L{boto.ec2.snapshotattribute.SnapshotAttribute} + :return: The requested Snapshot attribute + """ + params = {'Attribute' : attribute} + if snapshot_id: + params['SnapshotId'] = snapshot_id + return self.get_object('DescribeSnapshotAttribute', params, SnapshotAttribute) + + def modify_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission', + operation='add', user_ids=None, groups=None): + """ + Changes an attribute of an image. + + :type snapshot_id: string + :param snapshot_id: The snapshot id you wish to change + + :type attribute: string + :param attribute: The attribute you wish to change. Valid values are: + createVolumePermission + + :type operation: string + :param operation: Either add or remove (this is required for changing + snapshot ermissions) + + :type user_ids: list + :param user_ids: The Amazon IDs of users to add/remove attributes + + :type groups: list + :param groups: The groups to add/remove attributes. The only valid + value at this time is 'all'. + + """ + params = {'SnapshotId' : snapshot_id, + 'Attribute' : attribute, + 'OperationType' : operation} + if user_ids: + self.build_list_params(params, user_ids, 'UserId') + if groups: + self.build_list_params(params, groups, 'UserGroup') + return self.get_status('ModifySnapshotAttribute', params) + + def reset_snapshot_attribute(self, snapshot_id, attribute='createVolumePermission'): + """ + Resets an attribute of a snapshot to its default value. + + :type snapshot_id: string + :param snapshot_id: ID of the snapshot + + :type attribute: string + :param attribute: The attribute to reset + + :rtype: bool + :return: Whether the operation succeeded or not + """ + params = {'SnapshotId' : snapshot_id, + 'Attribute' : attribute} + return self.get_status('ResetSnapshotAttribute', params) + + # Keypair methods + + def get_all_key_pairs(self, keynames=None): + """ + Get all key pairs associated with your account. + + :type keynames: list + :param keynames: A list of the names of keypairs to retrieve. + If not provided, all key pairs will be returned. + + :rtype: list + :return: A list of :class:`boto.ec2.keypair.KeyPair` + """ + params = {} + if keynames: + self.build_list_params(params, keynames, 'KeyName') + return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)]) + + def get_key_pair(self, keyname): + """ + Convenience method to retrieve a specific keypair (KeyPair). + + :type image_id: string + :param image_id: the ID of the Image to retrieve + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The KeyPair specified or None if it is not found + """ + try: + return self.get_all_key_pairs(keynames=[keyname])[0] + except IndexError: # None of those key pairs available + return None + + def create_key_pair(self, key_name): + """ + Create a new key pair for your account. + This will create the key pair within the region you + are currently connected to. + + :type key_name: string + :param key_name: The name of the new keypair + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The newly created :class:`boto.ec2.keypair.KeyPair`. + The material attribute of the new KeyPair object + will contain the the unencrypted PEM encoded RSA private key. + """ + params = {'KeyName':key_name} + return self.get_object('CreateKeyPair', params, KeyPair) + + def delete_key_pair(self, key_name): + """ + Delete a key pair from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + """ + params = {'KeyName':key_name} + return self.get_status('DeleteKeyPair', params) + + # SecurityGroup methods + + def get_all_security_groups(self, groupnames=None): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will be returned. + + :rtype: list + :return: A list of :class:`boto.ec2.securitygroup.SecurityGroup` + """ + params = {} + if groupnames: + self.build_list_params(params, groupnames, 'GroupName') + return self.get_list('DescribeSecurityGroups', params, [('item', SecurityGroup)]) + + def create_security_group(self, name, description): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The newly created :class:`boto.ec2.keypair.KeyPair`. + """ + params = {'GroupName':name, 'GroupDescription':description} + group = self.get_object('CreateSecurityGroup', params, SecurityGroup) + group.name = name + group.description = description + return group + + def delete_security_group(self, name): + """ + Delete a security group from your account. + + :type key_name: string + :param key_name: The name of the keypair to delete + """ + params = {'GroupName':name} + return self.get_status('DeleteSecurityGroup', params) + + def authorize_security_group(self, group_name, src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + granting access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security group you are + granting access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type to_port: string + :param to_port: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName':group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = urllib.quote(cidr_ip) + return self.get_status('AuthorizeSecurityGroupIngress', params) + + def revoke_security_group(self, group_name, src_security_group_name=None, + src_security_group_owner_id=None, + ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None): + """ + Remove an existing rule from an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are revoking another + group or you are revoking some ip-based rule. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type src_security_group_name: string + :param src_security_group_name: The name of the security group you are + revoking access to. + + :type src_security_group_owner_id: string + :param src_security_group_owner_id: The ID of the owner of the security group you are + revoking access to. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are disabling + + :type to_port: int + :param to_port: The ending port number you are disabling + + :type to_port: string + :param to_port: The CIDR block you are revoking access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'GroupName':group_name} + if src_security_group_name: + params['SourceSecurityGroupName'] = src_security_group_name + if src_security_group_owner_id: + params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id + if ip_protocol: + params['IpProtocol'] = ip_protocol + if from_port: + params['FromPort'] = from_port + if to_port: + params['ToPort'] = to_port + if cidr_ip: + params['CidrIp'] = cidr_ip + return self.get_status('RevokeSecurityGroupIngress', params) + + # + # Regions + # + + def get_all_regions(self): + """ + Get all available regions for the EC2 service. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return self.get_list('DescribeRegions', None, [('item', RegionInfo)]) + + # + # Reservation methods + # + + def get_all_reserved_instances_offerings(self, reserved_instances_id=None, + instance_type=None, + availability_zone=None, + product_description=None): + """ + Describes Reserved Instance offerings that are available for purchase. + + :type reserved_instances_id: str + :param reserved_instances_id: Displays Reserved Instances with the specified offering IDs. + + :type instance_type: str + :param instance_type: Displays Reserved Instances of the specified instance type. + + :type availability_zone: str + :param availability_zone: Displays Reserved Instances within the specified Availability Zone. + + :type product_description: str + :param product_description: Displays Reserved Instances with the specified product description. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering` + """ + params = {} + if reserved_instances_id: + params['ReservedInstancesId'] = reserved_instances_id + if instance_type: + params['InstanceType'] = instance_type + if availability_zone: + params['AvailabilityZone'] = availability_zone + if product_description: + params['ProductDescription'] = product_description + + return self.get_list('DescribeReservedInstancesOfferings', + params, [('item', ReservedInstancesOffering)]) + + def get_all_reserved_instances(self, reserved_instances_id=None): + """ + Describes Reserved Instance offerings that are available for purchase. + + :type reserved_instance_ids: list + :param reserved_instance_ids: A list of the reserved instance ids that will be returned. + If not provided, all reserved instances will be returned. + + :rtype: list + :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance` + """ + params = {} + if reserved_instances_id: + self.build_list_params(params, reserved_instances_id, 'ReservedInstancesId') + return self.get_list('DescribeReservedInstances', + params, [('item', ReservedInstance)]) + + def purchase_reserved_instance_offering(self, reserved_instances_offering_id, + instance_count=1): + """ + Purchase a Reserved Instance for use with your account. + ** CAUTION ** + This request can result in large amounts of money being charged to your + AWS account. Use with caution! + + :type reserved_instances_offering_id: string + :param reserved_instances_offering_id: The offering ID of the Reserved + Instance to purchase + + :type instance_count: int + :param instance_count: The number of Reserved Instances to purchase. + Default value is 1. + + :rtype: :class:`boto.ec2.reservedinstance.ReservedInstance` + :return: The newly created Reserved Instance + """ + params = {'ReservedInstancesOfferingId' : reserved_instances_offering_id, + 'InstanceCount' : instance_count} + return self.get_object('PurchaseReservedInstancesOffering', params, ReservedInstance) + + # + # Monitoring + # + + def monitor_instance(self, instance_id): + """ + Enable CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {'InstanceId' : instance_id} + return self.get_list('MonitorInstances', params, [('item', InstanceInfo)]) + + def unmonitor_instance(self, instance_id): + """ + Disable CloudWatch monitoring for the supplied instance. + + :type instance_id: string + :param instance_id: The instance id + + :rtype: list + :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo` + """ + params = {'InstanceId' : instance_id} + return self.get_list('UnmonitorInstances', params, [('item', InstanceInfo)]) + + # + # Bundle Windows Instances + # + + def bundle_instance(self, instance_id, + s3_bucket, + s3_prefix, + s3_upload_policy): + """ + Bundle Windows instance. + + :type instance_id: string + :param instance_id: The instance id + + :type s3_bucket: string + :param s3_bucket: The bucket in which the AMI should be stored. + + :type s3_prefix: string + :param s3_prefix: The beginning of the file name for the AMI. + + :type s3_upload_policy: string + :param s3_upload_policy: Base64 encoded policy that specifies condition and permissions + for Amazon EC2 to upload the user's image into Amazon S3. + """ + + params = {'InstanceId' : instance_id, + 'Storage.S3.Bucket' : s3_bucket, + 'Storage.S3.Prefix' : s3_prefix, + 'Storage.S3.UploadPolicy' : s3_upload_policy} + params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id + local_hmac = self.hmac.copy() + local_hmac.update(s3_upload_policy) + s3_upload_policy_signature = base64.b64encode(local_hmac.digest()) + params['Storage.S3.UploadPolicySignature'] = s3_upload_policy_signature + return self.get_object('BundleInstance', params, BundleInstanceTask) + + def get_all_bundle_tasks(self, bundle_ids=None): + """ + Retrieve current bundling tasks. If no bundle id is specified, all tasks are retrieved. + + :type bundle_ids: list + :param bundle_ids: A list of strings containing identifiers for + previously created bundling tasks. + """ + + params = {} + if bundle_ids: + self.build_list_params(params, bundle_ids, 'BundleId') + return self.get_list('DescribeBundleTasks', params, [('item', BundleInstanceTask)]) + + def cancel_bundle_task(self, bundle_id): + """ + Cancel a previously submitted bundle task + + :type bundle_id: string + :param bundle_id: The identifier of the bundle task to cancel. + """ + + params = {'BundleId' : bundle_id} + return self.get_object('CancelBundleTask', params, BundleInstanceTask) + + def get_password_data(self, instance_id): + """ + Get encrypted administrator password for a Windows instance. + + :type instance_id: string + :param instance_id: The identifier of the instance to retrieve the password for. + """ + + params = {'InstanceId' : instance_id} + rs = self.get_object('GetPasswordData', params, ResultSet) + return rs.passwordData + diff --git a/vendor/boto/boto/ec2/ec2object.py b/vendor/boto/boto/ec2/ec2object.py new file mode 100644 index 000000000000..9ffab5d99583 --- /dev/null +++ b/vendor/boto/boto/ec2/ec2object.py @@ -0,0 +1,41 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Object +""" + +class EC2Object(object): + + def __init__(self, connection=None): + self.connection = connection + if self.connection and hasattr(self.connection, 'region'): + self.region = connection.region + else: + self.region = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + + diff --git a/vendor/boto/boto/ec2/elb/__init__.py b/vendor/boto/boto/ec2/elb/__init__.py new file mode 100644 index 000000000000..55e846f08167 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/__init__.py @@ -0,0 +1,238 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This module provides an interface to the Elastic Compute Cloud (EC2) +load balancing service from AWS. +""" +from boto.connection import AWSQueryConnection +from boto.ec2.instanceinfo import InstanceInfo +from boto.ec2.elb.loadbalancer import LoadBalancer +from boto.ec2.elb.instancestate import InstanceState +from boto.ec2.elb.healthcheck import HealthCheck +import boto + +class ELBConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'elb_version', '2009-05-15') + Endpoint = boto.config.get('Boto', 'elb_endpoint', 'elasticloadbalancing.amazonaws.com') + SignatureVersion = '1' + #ResponseError = EC2ResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=False, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=Endpoint, debug=0, + https_connection_factory=None, path='/'): + """ + Init method to create a new connection to EC2 Load Balancing Service. + + B{Note:} The host argument is overridden by the host specified in the boto configuration file. + """ + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory, path) + + def build_list_params(self, params, items, label): + if isinstance(items, str): + items = [items] + for i in range(1, len(items)+1): + params[label % i] = items[i-1] + + def get_all_load_balancers(self, load_balancer_name=None): + """ + Retrieve all load balancers associated with your account. + + :type load_balancer_names: str + :param load_balancer_names: An optional filter string to get only one ELB + + :rtype: list + :return: A list of :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + params = {} + if load_balancer_name: + #self.build_list_params(params, load_balancer_names, 'LoadBalancerName.%d') + params['LoadBalancerName'] = load_balancer_name + return self.get_list('DescribeLoadBalancers', params, [('member', LoadBalancer)]) + + + def create_load_balancer(self, name, zones, listeners): + """ + Create a new load balancer for your account. + + :type name: string + :param name: The mnemonic name associated with the new load balancer + + :type zones: List of strings + :param zones: The names of the availability zone(s) to add. + + :type listeners: List of tuples + :param listeners: Each tuple contains three values. + (LoadBalancerPortNumber, InstancePortNumber, Protocol) + where LoadBalancerPortNumber and InstancePortNumber are + integer values between 1 and 65535 and Protocol is a + string containing either 'TCP' or 'HTTP'. + + :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + :return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer` + """ + params = {'LoadBalancerName' : name} + for i in range(0, len(listeners)): + params['Listeners.member.%d.LoadBalancerPort' % (i+1)] = listeners[i][0] + params['Listeners.member.%d.InstancePort' % (i+1)] = listeners[i][1] + params['Listeners.member.%d.Protocol' % (i+1)] = listeners[i][2] + self.build_list_params(params, zones, 'AvailabilityZones.member.%d') + load_balancer = self.get_object('CreateLoadBalancer', params, LoadBalancer) + load_balancer.name = name + load_balancer.listeners = listeners + load_balancer.availability_zones = zones + return load_balancer + + def delete_load_balancer(self, name): + """ + Delete a Load Balancer from your account. + + :type name: string + :param name: The name of the Load Balancer to delete + """ + params = {'LoadBalancerName': name} + return self.get_status('DeleteLoadBalancer', params) + + def enable_availability_zones(self, load_balancer_name, zones_to_add): + """ + Add availability zones to an existing Load Balancer + All zones must be in the same region as the Load Balancer + Adding zones that are already registered with the Load Balancer + has no effect. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to add. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName' : load_balancer_name} + self.build_list_params(params, zones_to_add, 'AvailabilityZones.member.%d') + return self.get_list('EnableAvailabilityZonesForLoadBalancer', params, None) + + def disable_availability_zones(self, load_balancer_name, zones_to_remove): + """ + Remove availability zones from an existing Load Balancer. + All zones must be in the same region as the Load Balancer. + Removing zones that are not registered with the Load Balancer + has no effect. + You cannot remove all zones from an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type zones: List of strings + :param zones: The name of the zone(s) to remove. + + :rtype: List of strings + :return: An updated list of zones for this Load Balancer. + + """ + params = {'LoadBalancerName' : load_balancer_name} + self.build_list_params(params, zones_to_remove, 'AvailabilityZones.member.%d') + return self.get_list('DisableAvailabilityZonesForLoadBalancer', params, None) + + def register_instances(self, load_balancer_name, instances): + """ + Add new Instances to an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to add. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName' : load_balancer_name} + self.build_list_params(params, instances, 'Instances.member.%d.InstanceId') + return self.get_list('RegisterInstancesWithLoadBalancer', params, [('member', InstanceInfo)]) + + def deregister_instances(self, load_balancer_name, instances): + """ + Remove Instances from an existing Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances to remove. + + :rtype: List of strings + :return: An updated list of instances for this Load Balancer. + + """ + params = {'LoadBalancerName' : load_balancer_name} + self.build_list_params(params, instances, 'Instances.member.%d.InstanceId') + return self.get_list('DeregisterInstancesFromLoadBalancer', params, [('member', InstanceInfo)]) + + def describe_instance_health(self, load_balancer_name, instances=None): + """ + Get current state of all Instances registered to an Load Balancer. + + :type load_balancer_name: string + :param load_balancer_name: The name of the Load Balancer + + :type instances: List of strings + :param instances: The instance ID's of the EC2 instances + to return status for. If not provided, + the state of all instances will be returned. + + :rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState` + :return: list of state info for instances in this Load Balancer. + + """ + params = {'LoadBalancerName' : load_balancer_name} + if instances: + self.build_list_params(params, instances, 'instances.member.%d') + return self.get_list('DescribeInstanceHealth', params, [('member', InstanceState)]) + + def configure_health_check(self, name, health_check): + """ + Define a health check for the EndPoints. + + :type name: string + :param name: The mnemonic name associated with the new access point + + :type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :param health_check: A HealthCheck object populated with the desired + values. + + :rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck` + :return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck` + """ + params = {'LoadBalancerName' : name, + 'HealthCheck.Timeout' : health_check.timeout, + 'HealthCheck.Target' : health_check.target, + 'HealthCheck.Interval' : health_check.interval, + 'HealthCheck.UnhealthyThreshold' : health_check.unhealthy_threshold, + 'HealthCheck.HealthyThreshold' : health_check.healthy_threshold} + return self.get_object('ConfigureHealthCheck', params, HealthCheck) diff --git a/vendor/boto/boto/ec2/elb/healthcheck.py b/vendor/boto/boto/ec2/elb/healthcheck.py new file mode 100644 index 000000000000..5a3edbc639e7 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/healthcheck.py @@ -0,0 +1,68 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class HealthCheck(object): + """ + Represents an EC2 Access Point Health Check + """ + + def __init__(self, access_point=None, interval=30, target=None, + healthy_threshold=3, timeout=5, unhealthy_threshold=5): + self.access_point = access_point + self.interval = interval + self.target = target + self.healthy_threshold = healthy_threshold + self.timeout = timeout + self.unhealthy_threshold = unhealthy_threshold + + def __repr__(self): + return 'HealthCheck:%s' % self.target + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Interval': + self.interval = int(value) + elif name == 'Target': + self.target = value + elif name == 'HealthyThreshold': + self.healthy_threshold = int(value) + elif name == 'Timeout': + self.timeout = int(value) + elif name == 'UnhealthyThreshold': + self.unhealthy_threshold = int(value) + else: + setattr(self, name, value) + + def update(self): + if not self.access_point: + return + + new_hc = self.connection.configure_health_check(self.access_point, + self) + self.interval = new_hc.interval + self.target = new_hc.target + self.healthy_threshold = new_hc.healthy_threshold + self.unhealthy_threshold = new_hc.unhealthy_threshold + self.timeout = new_hc.timeout + + diff --git a/vendor/boto/boto/ec2/elb/instancestate.py b/vendor/boto/boto/ec2/elb/instancestate.py new file mode 100644 index 000000000000..4a9b0d479a52 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/instancestate.py @@ -0,0 +1,54 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class InstanceState(object): + """ + Represents the state of an EC2 Load Balancer Instance + """ + + def __init__(self, load_balancer=None, description=None, + state=None, instance_id=None, reason_code=None): + self.load_balancer = load_balancer + self.description = description + self.state = state + self.instance_id = instance_id + self.reason_code = reason_code + + def __repr__(self): + return 'InstanceState:(%s,%s)' % (self.instance_id, self.state) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Description': + self.description = value + elif name == 'State': + self.state = value + elif name == 'InstanceId': + self.instance_id = value + elif name == 'ReasonCode': + self.reason_code = value + else: + setattr(self, name, value) + + + diff --git a/vendor/boto/boto/ec2/elb/listelement.py b/vendor/boto/boto/ec2/elb/listelement.py new file mode 100644 index 000000000000..5be45992a0e0 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/listelement.py @@ -0,0 +1,31 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ListElement(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'member': + self.append(value) + + diff --git a/vendor/boto/boto/ec2/elb/listener.py b/vendor/boto/boto/ec2/elb/listener.py new file mode 100644 index 000000000000..ab482c2e76b2 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/listener.py @@ -0,0 +1,64 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Listener(object): + """ + Represents an EC2 Load Balancer Listener tuple + """ + + def __init__(self, load_balancer=None, load_balancer_port=0, + instance_port=0, protocol=''): + self.load_balancer = load_balancer + self.load_balancer_port = load_balancer_port + self.instance_port = instance_port + self.protocol = protocol + + def __repr__(self): + return "(%d, %d, '%s')" % (self.load_balancer_port, self.instance_port, self.protocol) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerPort': + self.load_balancer_port = int(value) + elif name == 'InstancePort': + self.instance_port = int(value) + elif name == 'Protocol': + self.protocol = value + else: + setattr(self, name, value) + + def get_tuple(self): + return self.load_balancer_port, self.instance_port, self.protocol + + def __getitem__(self, key): + if key == 0: + return self.load_balancer_port + if key == 1: + return self.instance_port + if key == 2: + return self.protocol + raise KeyError + + + + diff --git a/vendor/boto/boto/ec2/elb/loadbalancer.py b/vendor/boto/boto/ec2/elb/loadbalancer.py new file mode 100644 index 000000000000..0a90389cf6d6 --- /dev/null +++ b/vendor/boto/boto/ec2/elb/loadbalancer.py @@ -0,0 +1,142 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.elb.healthcheck import HealthCheck +from boto.ec2.elb.listener import Listener +from boto.ec2.elb.listelement import ListElement +from boto.ec2.instanceinfo import InstanceInfo +from boto.resultset import ResultSet + +class LoadBalancer(object): + """ + Represents an EC2 Load Balancer + """ + + def __init__(self, connection=None, name=None, endpoints=None): + self.connection = connection + self.name = name + self.listeners = None + self.health_check = None + self.dns_name = None + self.created_time = None + self.instances = None + self.availability_zones = ListElement() + + def __repr__(self): + return 'LoadBalancer:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'HealthCheck': + self.health_check = HealthCheck(self) + return self.health_check + elif name == 'Listeners': + self.listeners = ResultSet([('member', Listener)]) + return self.listeners + elif name == 'AvailabilityZones': + return self.availability_zones + elif name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + else: + return None + + def endElement(self, name, value, connection): + if name == 'LoadBalancerName': + self.name = value + elif name == 'DNSName': + self.dns_name = value + elif name == 'CreatedTime': + self.created_time = value + elif name == 'InstanceId': + self.instances.append(value) + else: + setattr(self, name, value) + + def enable_zones(self, zones): + """ + Enable availability zones to this Access Point. + All zones must be in the same region as the Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, str) or isinstance(zones, unicode): + zones = [zones] + new_zones = self.connection.enable_availability_zones(self.name, zones) + self.availability_zones = new_zones + + def disable_zones(self, zones): + """ + Disable availability zones from this Access Point. + + :type zones: string or List of strings + :param zones: The name of the zone(s) to add. + + """ + if isinstance(zones, str) or isinstance(zones, unicode): + zones = [zones] + new_zones = self.connection.disable_availability_zones(self.name, zones) + self.availability_zones = new_zones + + def register_instances(self, instances): + """ + Add instances to this Load Balancer + All instances must be in the same region as the Load Balancer. + Adding endpoints that are already registered with the Load Balancer + has no effect. + + :type zones: string or List of instance id's + :param zones: The name of the endpoint(s) to add. + + """ + if isinstance(instances, str) or isinstance(instances, unicode): + instances = [instances] + new_instances = self.connection.register_instances(self.name, instances) + self.instances = new_instances + + def deregister_instances(self, instances): + """ + Remove instances from this Load Balancer. + Removing instances that are not registered with the Load Balancer + has no effect. + + :type zones: string or List of instance id's + :param zones: The name of the endpoint(s) to add. + + """ + if isinstance(instances, str) or isinstance(instances, unicode): + instances = [instances] + new_instances = self.connection.deregister_instances(self.name, instances) + self.instances = new_instances + + def delete(self): + """ + Delete this load balancer + """ + return self.connection.delete_load_balancer(self.name) + + def configure_health_check(self, health_check): + self.connection.configure_health_check(self.name, health_check) + + def get_instance_health(self, instances=None): + self.connection.describe_instance_health(self.name, instances) + diff --git a/vendor/boto/boto/ec2/image.py b/vendor/boto/boto/ec2/image.py new file mode 100644 index 000000000000..c9b7fec72588 --- /dev/null +++ b/vendor/boto/boto/ec2/image.py @@ -0,0 +1,250 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.ec2object import EC2Object +from boto.ec2.blockdevicemapping import BlockDeviceMapping + +class ProductCodes(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'productCode': + self.append(value) + +class Image(EC2Object): + """ + Represents an EC2 Image + """ + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.location = None + self.state = None + self.ownerId = None + self.owner_alias = None + self.is_public = False + self.architecture = None + self.platform = None + self.type = None + self.kernel_id = None + self.ramdisk_id = None + self.name = None + self.description = None + self.product_codes = ProductCodes() + self.block_device_mapping = None + self.root_device_type = None + self.root_device_name = None + + def __repr__(self): + return 'Image:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.id = value + elif name == 'imageLocation': + self.location = value + elif name == 'imageState': + self.state = value + elif name == 'imageOwnerId': + self.ownerId = value + elif name == 'isPublic': + if value == 'false': + self.is_public = False + elif value == 'true': + self.is_public = True + else: + raise Exception( + 'Unexpected value of isPublic %s for image %s'%( + value, + self.id + ) + ) + elif name == 'architecture': + self.architecture = value + elif name == 'imageType': + self.type = value + elif name == 'kernelId': + self.kernel_id = value + elif name == 'ramdiskId': + self.ramdisk_id = value + elif name == 'imageOwnerAlias': + self.owner_alias = value + elif name == 'platform': + self.platform = value + elif name == 'name': + self.name = value + elif name == 'description': + self.description = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'rootDeviceName': + self.root_device_name = value + else: + setattr(self, name, value) + + def run(self, min_count=1, max_count=1, key_name=None, + security_groups=None, user_data=None, + addressing_type=None, instance_type='m1.small', placement=None, + kernel_id=None, ramdisk_id=None, + monitoring_enabled=False, subnet_id=None, + block_device_map=None): + """ + Runs this instance. + + :type min_count: int + :param min_count: The minimum number of instances to start + + :type max_count: int + :param max_count: The maximum number of instances to start + + :type key_name: string + :param key_name: The keypair to run this instance with. + + :type security_groups: + :param security_groups: + + :type user_data: + :param user_data: + + :type addressing_type: + :param daddressing_type: + + :type instance_type: string + :param instance_type: The type of instance to run (m1.small, m1.large, m1.xlarge) + + :type placement: + :param placement: + + :type kernel_id: string + :param kernel_id: The ID of the kernel with which to launch the instances + + :type ramdisk_id: string + :param ramdisk_id: The ID of the RAM disk with which to launch the instances + + :type monitoring_enabled: bool + :param monitoring_enabled: Enable CloudWatch monitoring on the instance. + + :type subnet_id: string + :param subnet_id: The subnet ID within which to launch the instances for VPC. + + :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_map: A BlockDeviceMapping data structure + describing the EBS volumes associated + with the Image. + + :rtype: Reservation + :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines + """ + return self.connection.run_instances(self.id, min_count, max_count, + key_name, security_groups, + user_data, addressing_type, + instance_type, placement, + kernel_id, ramdisk_id, + monitoring_enabled, subnet_id, + block_device_map) + + def deregister(self): + return self.connection.deregister_image(self.id) + + def get_launch_permissions(self): + img_attrs = self.connection.get_image_attribute(self.id, + 'launchPermission') + return img_attrs.attrs + + def set_launch_permissions(self, user_ids=None, group_names=None): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'add', + user_ids, + group_names) + + def remove_launch_permissions(self, user_ids=None, group_names=None): + return self.connection.modify_image_attribute(self.id, + 'launchPermission', + 'remove', + user_ids, + group_names) + + def reset_launch_attributes(self): + return self.connection.reset_image_attribute(self.id, + 'launchPermission') + + def get_kernel(self): + img_attrs =self.connection.get_image_attribute(self.id, 'kernel') + return img_attrs.kernel + + def get_ramdisk(self): + img_attrs = self.connection.get_image_attribute(self.id, 'ramdisk') + return img_attrs.ramdisk + +class ImageAttribute: + + def __init__(self, parent=None): + self.name = None + self.kernel = None + self.ramdisk = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + if name == 'blockDeviceMapping': + self.attrs['block_device_mapping'] = BlockDeviceMapping() + return self.attrs['block_device_mapping'] + else: + return None + + def endElement(self, name, value, connection): + if name == 'launchPermission': + self.name = 'launch_permission' + elif name == 'group': + if self.attrs.has_key('groups'): + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if self.attrs.has_key('user_ids'): + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'productCode': + if self.attrs.has_key('product_codes'): + self.attrs['product_codes'].append(value) + else: + self.attrs['product_codes'] = [value] + elif name == 'imageId': + self.image_id = value + elif name == 'kernel': + self.kernel = value + elif name == 'ramdisk': + self.ramdisk = value + else: + setattr(self, name, value) diff --git a/vendor/boto/boto/ec2/instance.py b/vendor/boto/boto/ec2/instance.py new file mode 100644 index 000000000000..78bf55d5d495 --- /dev/null +++ b/vendor/boto/boto/ec2/instance.py @@ -0,0 +1,294 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Instance +""" +import boto +from boto.ec2.ec2object import EC2Object +from boto.resultset import ResultSet +from boto.ec2.address import Address +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.image import ProductCodes +import base64 + +class Reservation(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.owner_id = None + self.groups = [] + self.instances = [] + + def __repr__(self): + return 'Reservation:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'instancesSet': + self.instances = ResultSet([('item', Instance)]) + return self.instances + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + else: + return None + + def endElement(self, name, value, connection): + if name == 'reservationId': + self.id = value + elif name == 'ownerId': + self.owner_id = value + else: + setattr(self, name, value) + + def stop_all(self): + for instance in self.instances: + instance.stop() + +class Instance(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.dns_name = None + self.public_dns_name = None + self.private_dns_name = None + self.state = None + self.state_code = None + self.key_name = None + self.shutdown_state = None + self.previous_state = None + self.instance_type = None + self.instance_class = None + self.launch_time = None + self.image_id = None + self.placement = None + self.kernel = None + self.ramdisk = None + self.product_codes = ProductCodes() + self.ami_launch_index = None + self.monitored = False + self.instance_class = None + self.spot_instance_request_id = None + self.subnet_id = None + self.vpc_id = None + self.private_ip_address = None + self.ip_address = None + self.requester_id = None + self._in_monitoring_element = False + self.persistent = False + self.root_device_name = None + self.root_device_type = None + self.block_device_mapping = None + self.state_reason = None + + def __repr__(self): + return 'Instance:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + elif name == 'productCodes': + return self.product_codes + elif name == 'stateReason': + self.state_reason = StateReason() + return self.state_reason + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.id = value + elif name == 'imageId': + self.image_id = value + elif name == 'dnsName' or name == 'publicDnsName': + self.dns_name = value # backwards compatibility + self.public_dns_name = value + elif name == 'privateDnsName': + self.private_dns_name = value + elif name == 'keyName': + self.key_name = value + elif name == 'amiLaunchIndex': + self.ami_launch_index = value + elif name == 'shutdownState': + self.shutdown_state = value + elif name == 'previousState': + self.previous_state = value + elif name == 'name': + self.state = value + elif name == 'code': + try: + self.state_code = int(value) + except ValueError: + boto.log.warning('Error converting code (%s) to int' % value) + self.state_code = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'instanceClass': + self.instance_class = value + elif name == 'rootDeviceName': + self.root_device_name = value + elif name == 'rootDeviceType': + self.root_device_type = value + elif name == 'launchTime': + self.launch_time = value + elif name == 'availabilityZone': + self.placement = value + elif name == 'placement': + pass + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'state': + if self._in_monitoring_element: + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + elif name == 'instanceClass': + self.instance_class = value + elif name == 'spotInstanceRequestId': + self.spot_instance_request_id = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'vpcId': + self.vpc_id = value + elif name == 'privateIpAddress': + self.private_ip_address = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'requesterId': + self.requester_id = value + elif name == 'persistent': + if value == 'true': + self.persistent = True + else: + self.persistent = False + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self): + rs = self.connection.get_all_instances([self.id]) + if len(rs) > 0: + r = rs[0] + for i in r.instances: + if i.id == self.id: + self._update(i) + return self.state + + def terminate(self): + rs = self.connection.terminate_instances([self.id]) + self._update(rs[0]) + + def stop(self): + rs = self.connection.stop_instances([self.id]) + self._update(rs[0]) + + def start(self): + rs = self.connection.start_instances([self.id]) + self._update(rs[0]) + + def reboot(self): + return self.connection.reboot_instances([self.id]) + + def get_console_output(self): + return self.connection.get_console_output(self.id) + + def confirm_product(self, product_code): + return self.connection.confirm_product_instance(self.id, product_code) + + def use_ip(self, ip_address): + if isinstance(ip_address, Address): + ip_address = ip_address.public_ip + return self.connection.associate_address(self.id, ip_address) + + def monitor(self): + return self.connection.monitor_instance(self.id) + + def unmonitor(self): + return self.connection.unmonitor_instance(self.id) + +class Group: + + def __init__(self, parent=None): + self.id = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'groupId': + self.id = value + else: + setattr(self, name, value) + +class ConsoleOutput: + + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.comment = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + +class InstanceAttribute(dict): + + def __init__(self, parent=None): + dict.__init__(self) + self._current_value = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'value': + self._current_value = value + else: + self[name] = self._current_value + +class StateReason(dict): + + def __init__(self, parent=None): + dict.__init__(self) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name != 'stateReason': + self[name] = value + diff --git a/vendor/boto/boto/ec2/instanceinfo.py b/vendor/boto/boto/ec2/instanceinfo.py new file mode 100644 index 000000000000..6efbaed3bcb3 --- /dev/null +++ b/vendor/boto/boto/ec2/instanceinfo.py @@ -0,0 +1,47 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class InstanceInfo(object): + """ + Represents an EC2 Instance status response from CloudWatch + """ + + def __init__(self, connection=None, id=None, state=None): + self.connection = connection + self.id = id + self.state = state + + def __repr__(self): + return 'InstanceInfo:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId' or name == 'InstanceId': + self.id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + + diff --git a/vendor/boto/boto/ec2/keypair.py b/vendor/boto/boto/ec2/keypair.py new file mode 100644 index 000000000000..d08e5ce3b4a3 --- /dev/null +++ b/vendor/boto/boto/ec2/keypair.py @@ -0,0 +1,111 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Keypair +""" + +import os +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + +class KeyPair(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.name = None + self.fingerprint = None + self.material = None + + def __repr__(self): + return 'KeyPair:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'keyName': + self.name = value + elif name == 'keyFingerprint': + self.fingerprint = value + elif name == 'keyMaterial': + self.material = value + else: + setattr(self, name, value) + + def delete(self): + """ + Delete the KeyPair. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.delete_key_pair(self.name) + + def save(self, directory_path): + """ + Save the material (the unencrypted PEM encoded RSA private key) + of a newly created KeyPair to a local file. + + :type directory_path: string + :param directory_path: The fully qualified path to the directory + in which the keypair will be saved. The + keypair file will be named using the name + of the keypair as the base name and .pem + for the file extension. If a file of that + name already exists in the directory, an + exception will be raised and the old file + will not be overwritten. + + :rtype: bool + :return: True if successful. + """ + if self.material: + file_path = os.path.join(directory_path, '%s.pem' % self.name) + if os.path.exists(file_path): + raise BotoClientError('%s already exists, it will not be overwritten' % file_path) + fp = open(file_path, 'wb') + fp.write(self.material) + fp.close() + return True + else: + raise BotoClientError('KeyPair contains no material') + + def copy_to_region(self, region): + """ + Create a new key pair of the same new in another region. + Note that the new key pair will use a different ssh + cert than the this key pair. After doing the copy, + you will need to save the material associated with the + new key pair (use the save method) to a local file. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :rtype: :class:`boto.ec2.keypair.KeyPair` + :return: The new key pair + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + kp = rconn.create_key_pair(self.name) + return kp + + + diff --git a/vendor/boto/boto/ec2/launchspecification.py b/vendor/boto/boto/ec2/launchspecification.py new file mode 100644 index 000000000000..a574a3825e00 --- /dev/null +++ b/vendor/boto/boto/ec2/launchspecification.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a launch specification for Spot instances. +""" + +from boto.ec2.ec2object import EC2Object +from boto.resultset import ResultSet +from boto.ec2.blockdevicemapping import BlockDeviceMapping +from boto.ec2.instance import Group + +class GroupList(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'groupId': + self.append(value) + +class LaunchSpecification(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.key_name = None + self.instance_type = None + self.image_id = None + self.groups = [] + self.placement = None + self.kernel = None + self.ramdisk = None + self.monitored = False + self.subnet_id = None + self._in_monitoring_element = False + self.block_device_mapping = None + + def __repr__(self): + return 'LaunchSpecification(%s)' % self.image_id + + def startElement(self, name, attrs, connection): + if name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + elif name == 'monitoring': + self._in_monitoring_element = True + elif name == 'blockDeviceMapping': + self.block_device_mapping = BlockDeviceMapping() + return self.block_device_mapping + else: + return None + + def endElement(self, name, value, connection): + if name == 'imageId': + self.image_id = value + elif name == 'keyName': + self.key_name = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.placement = value + elif name == 'placement': + pass + elif name == 'kernelId': + self.kernel = value + elif name == 'ramdiskId': + self.ramdisk = value + elif name == 'subnetId': + self.subnet_id = value + elif name == 'state': + if self._in_monitoring_element: + if value == 'enabled': + self.monitored = True + self._in_monitoring_element = False + else: + setattr(self, name, value) + + diff --git a/vendor/boto/boto/ec2/regioninfo.py b/vendor/boto/boto/ec2/regioninfo.py new file mode 100644 index 000000000000..ab61703f3796 --- /dev/null +++ b/vendor/boto/boto/ec2/regioninfo.py @@ -0,0 +1,60 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class RegionInfo(object): + """ + Represents an EC2 Region + """ + + def __init__(self, connection=None, name=None, endpoint=None): + self.connection = connection + self.name = name + self.endpoint = endpoint + + def __repr__(self): + return 'RegionInfo:%s' % self.name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'regionName': + self.name = value + elif name == 'regionEndpoint': + self.endpoint = value + else: + setattr(self, name, value) + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an EC2Connection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the EC2Connection + object's constructor as keyword arguments and they will be + passed along to the EC2Connection object. + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: The connection to this regions endpoint + """ + from boto.ec2.connection import EC2Connection + return EC2Connection(region=self, **kw_params) + + diff --git a/vendor/boto/boto/ec2/reservedinstance.py b/vendor/boto/boto/ec2/reservedinstance.py new file mode 100644 index 000000000000..1d35c1df069e --- /dev/null +++ b/vendor/boto/boto/ec2/reservedinstance.py @@ -0,0 +1,97 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.ec2.ec2object import EC2Object + +class ReservedInstancesOffering(EC2Object): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None): + EC2Object.__init__(self, connection) + self.id = id + self.instance_type = instance_type + self.availability_zone = availability_zone + self.duration = duration + self.fixed_price = fixed_price + self.usage_price = usage_price + self.description = description + + def __repr__(self): + return 'ReservedInstanceOffering:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'reservedInstancesOfferingId': + self.id = value + elif name == 'instanceType': + self.instance_type = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'duration': + self.duration = value + elif name == 'fixedPrice': + self.fixed_price = value + elif name == 'usagePrice': + self.usage_price = value + elif name == 'productDescription': + self.description = value + else: + setattr(self, name, value) + + def describe(self): + print 'ID=%s' % self.id + print '\tInstance Type=%s' % self.instance_type + print '\tZone=%s' % self.availability_zone + print '\tDuration=%s' % self.duration + print '\tFixed Price=%s' % self.fixed_price + print '\tUsage Price=%s' % self.usage_price + print '\tDescription=%s' % self.description + + def purchase(self, instance_count=1): + return self.connection.purchase_reserved_instance_offering(self.id, instance_count) + +class ReservedInstance(ReservedInstancesOffering): + + def __init__(self, connection=None, id=None, instance_type=None, + availability_zone=None, duration=None, fixed_price=None, + usage_price=None, description=None, + instance_count=None, state=None): + ReservedInstancesOffering.__init__(self, connection, id, instance_type, + availability_zone, duration, fixed_price, + usage_price, description) + self.instance_count = instance_count + self.state = state + + def __repr__(self): + return 'ReservedInstance:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'reservedInstancesId': + self.id = value + if name == 'instanceCount': + self.instance_count = int(value) + elif name == 'state': + self.state = value + else: + ReservedInstancesOffering.endElement(self, name, value, connection) diff --git a/vendor/boto/boto/ec2/securitygroup.py b/vendor/boto/boto/ec2/securitygroup.py new file mode 100644 index 000000000000..61b0a00fdf15 --- /dev/null +++ b/vendor/boto/boto/ec2/securitygroup.py @@ -0,0 +1,282 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Security Group +""" +from boto.ec2.ec2object import EC2Object +from boto.exception import BotoClientError + +class SecurityGroup(EC2Object): + + def __init__(self, connection=None, owner_id=None, + name=None, description=None): + EC2Object.__init__(self, connection) + self.owner_id = owner_id + self.name = name + self.description = description + self.rules = [] + + def __repr__(self): + return 'SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'item': + self.rules.append(IPPermissions(self)) + return self.rules[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'groupName': + self.name = value + elif name == 'groupDescription': + self.description = value + elif name == 'ipRanges': + pass + elif name == 'return': + if value == 'false': + self.status = False + elif value == 'true': + self.status = True + else: + raise Exception( + 'Unexpected value of status %s for group %s'%( + value, + self.name + ) + ) + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_security_group(self.name) + + def add_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip): + rule = IPPermissions(self) + rule.ip_protocol = ip_protocol + rule.from_port = from_port + rule.to_port = to_port + self.rules.append(rule) + rule.add_grant(src_group_name, src_group_owner_id, cidr_ip) + + def remove_rule(self, ip_protocol, from_port, to_port, + src_group_name, src_group_owner_id, cidr_ip): + target_rule = None + for rule in self.rules: + if rule.ip_protocol == ip_protocol: + if rule.from_port == from_port: + if rule.to_port == to_port: + target_rule = rule + target_grant = None + for grant in rule.grants: + if grant.name == src_group_name: + if grant.owner_id == src_group_owner_id: + if grant.cidr_ip == cidr_ip: + target_grant = grant + if target_grant: + rule.grants.remove(target_grant) + if len(rule.grants) == 0: + self.rules.remove(target_rule) + + def authorize(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None): + """ + Add a new rule to this security group. + You need to pass in either src_group_name + OR ip_protocol, from_port, to_port, + and cidr_ip. In other words, either you are authorizing another + group or you are authorizing some ip-based rule. + + :type ip_protocol: string + :param ip_protocol: Either tcp | udp | icmp + + :type from_port: int + :param from_port: The beginning port number you are enabling + + :type to_port: int + :param to_port: The ending port number you are enabling + + :type to_port: string + :param to_port: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or + :class:`boto.ec2.securitygroup.GroupOrCIDR` + + :rtype: bool + :return: True if successful. + """ + if src_group: + from_port = None + to_port = None + cidr_ip = None + ip_protocol = None + src_group_name = src_group.name + src_group_owner_id = src_group.owner_id + else: + src_group_name = None + src_group_owner_id = None + status = self.connection.authorize_security_group(self.name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip) + if status: + self.add_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, cidr_ip) + return status + + def revoke(self, ip_protocol=None, from_port=None, to_port=None, + cidr_ip=None, src_group=None): + if src_group: + from_port=None + to_port=None + cidr_ip=None + ip_protocol = None + src_group_name = src_group.name + src_group_owner_id = src_group.owner_id + else: + src_group_name = None + src_group_owner_id = None + status = self.connection.revoke_security_group(self.name, + src_group_name, + src_group_owner_id, + ip_protocol, + from_port, + to_port, + cidr_ip) + if status: + self.remove_rule(ip_protocol, from_port, to_port, src_group_name, + src_group_owner_id, cidr_ip) + return status + + def copy_to_region(self, region, name=None): + """ + Create a copy of this security group in another region. + Note that the new security group will be a separate entity + and will not stay in sync automatically after the copy + operation. + + :type region: :class:`boto.ec2.regioninfo.RegionInfo` + :param region: The region to which this security group will be copied. + + :type name: string + :param name: The name of the copy. If not supplied, the copy + will have the same name as this security group. + + :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` + :return: The new security group. + """ + if region.name == self.region: + raise BotoClientError('Unable to copy to the same Region') + conn_params = self.connection.get_params() + rconn = region.connect(**conn_params) + sg = rconn.create_security_group(name or self.name, self.description) + source_groups = [] + for rule in self.rules: + grant = rule.grants[0] + if grant.name: + if grant.name not in source_groups: + source_groups.append(grant.name) + sg.authorize(None, None, None, None, grant) + else: + sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port, + grant.cidr_ip) + return sg + + def instances(self): + instances = [] + rs = self.connection.get_all_instances() + for reservation in rs: + uses_group = [g.id for g in reservation.groups if g.id == self.name] + if uses_group: + instances.extend(reservation.instances) + return instances + +class IPPermissions: + + def __init__(self, parent=None): + self.parent = parent + self.ip_protocol = None + self.from_port = None + self.to_port = None + self.grants = [] + + def __repr__(self): + return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol, + self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + if name == 'item': + self.grants.append(GroupOrCIDR(self)) + return self.grants[-1] + return None + + def endElement(self, name, value, connection): + if name == 'ipProtocol': + self.ip_protocol = value + elif name == 'fromPort': + self.from_port = value + elif name == 'toPort': + self.to_port = value + else: + setattr(self, name, value) + + def add_grant(self, name=None, owner_id=None, cidr_ip=None): + grant = GroupOrCIDR(self) + grant.owner_id = owner_id + grant.name = name + grant.cidr_ip = cidr_ip + self.grants.append(grant) + return grant + +class GroupOrCIDR: + + def __init__(self, parent=None): + self.owner_id = None + self.name = None + self.cidr_ip = None + + def __repr__(self): + if self.cidr_ip: + return '%s' % self.cidr_ip + else: + return '%s-%s' % (self.name, self.owner_id) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'userId': + self.owner_id = value + elif name == 'groupName': + self.name = value + if name == 'cidrIp': + self.cidr_ip = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/ec2/snapshot.py b/vendor/boto/boto/ec2/snapshot.py new file mode 100644 index 000000000000..3d4398ed39e5 --- /dev/null +++ b/vendor/boto/boto/ec2/snapshot.py @@ -0,0 +1,127 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic IP Snapshot +""" +from boto.ec2.ec2object import EC2Object + +class Snapshot(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.volume_id = None + self.status = None + self.progress = None + self.start_time = None + self.owner_id = None + self.volume_size = None + self.description = None + + def __repr__(self): + return 'Snapshot:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'snapshotId': + self.id = value + elif name == 'volumeId': + self.volume_id = value + elif name == 'status': + self.status = value + elif name == 'startTime': + self.start_time = value + elif name == 'ownerId': + self.owner_id = value + elif name == 'volumeSize': + try: + self.volume_size = int(value) + except: + self.volume_size = value + elif name == 'description': + self.description = value + else: + setattr(self, name, value) + + def _update(self, updated): + self.progress = updated.progress + self.status = updated.status + + def update(self): + rs = self.connection.get_all_snapshots([self.id]) + if len(rs) > 0: + self._update(rs[0]) + return self.progress + + def delete(self): + return self.connection.delete_snapshot(self.id) + + def get_permissions(self): + attrs = self.connection.get_snapshot_attribute(self.id, + attribute='createVolumePermission') + return attrs.attrs + + def share(self, user_ids=None, groups=None): + return self.connection.modify_snapshot_attribute(self.id, + 'createVolumePermission', + 'add', + user_ids, + groups) + + def unshare(self, user_ids=None, groups=None): + return self.connection.modify_snapshot_attribute(self.id, + 'createVolumePermission', + 'remove', + user_ids, + groups) + + def reset_permissions(self): + return self.connection.reset_snapshot_attribute(self.id, 'createVolumePermission') + +class SnapshotAttribute: + + def __init__(self, parent=None): + self.snapshot_id = None + self.attrs = {} + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'createVolumePermission': + self.name = 'create_volume_permission' + elif name == 'group': + if self.attrs.has_key('groups'): + self.attrs['groups'].append(value) + else: + self.attrs['groups'] = [value] + elif name == 'userId': + if self.attrs.has_key('user_ids'): + self.attrs['user_ids'].append(value) + else: + self.attrs['user_ids'] = [value] + elif name == 'snapshotId': + self.snapshot_id = value + else: + setattr(self, name, value) + + + diff --git a/vendor/boto/boto/ec2/spotdatafeedsubscription.py b/vendor/boto/boto/ec2/spotdatafeedsubscription.py new file mode 100644 index 000000000000..9b820a3e09f8 --- /dev/null +++ b/vendor/boto/boto/ec2/spotdatafeedsubscription.py @@ -0,0 +1,63 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Datafeed Subscription +""" +from boto.ec2.ec2object import EC2Object +from boto.ec2.spotinstancerequest import SpotInstanceStateFault + +class SpotDatafeedSubscription(EC2Object): + + def __init__(self, connection=None, owner_id=None, + bucket=None, prefix=None, state=None,fault=None): + EC2Object.__init__(self, connection) + self.owner_id = owner_id + self.bucket = bucket + self.prefix = prefix + self.state = state + self.fault = fault + + def __repr__(self): + return 'SpotDatafeedSubscription:%s' % self.bucket + + def startElement(self, name, attrs, connection): + if name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + else: + return None + + def endElement(self, name, value, connection): + if name == 'ownerId': + self.owner_id = value + elif name == 'bucket': + self.bucket = value + elif name == 'prefix': + self.prefix = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_spot_datafeed_subscription() + diff --git a/vendor/boto/boto/ec2/spotinstancerequest.py b/vendor/boto/boto/ec2/spotinstancerequest.py new file mode 100644 index 000000000000..3014c7a683f0 --- /dev/null +++ b/vendor/boto/boto/ec2/spotinstancerequest.py @@ -0,0 +1,109 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import EC2Object +from boto.ec2.launchspecification import LaunchSpecification + +class SpotInstanceStateFault(object): + + def __init__(self, code=None, message=None): + self.code = code + self.message = message + + def __repr__(self): + return '(%s, %s)' % (self.code, self.message) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'code': + self.code = value + elif name == 'message': + self.message = value + setattr(self, name, value) + +class SpotInstanceRequest(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.price = None + self.type = None + self.state = None + self.fault = None + self.valid_from = None + self.valid_until = None + self.launch_group = None + self.product_description = None + self.availability_zone_group = None + self.create_time = None + self.launch_specification = None + self.instance_id = None + + def __repr__(self): + return 'SpotInstanceRequest:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'launchSpecification': + self.launch_specification = LaunchSpecification(connection) + return self.launch_specification + elif name == 'fault': + self.fault = SpotInstanceStateFault() + return self.fault + else: + return None + + def endElement(self, name, value, connection): + if name == 'spotInstanceRequestId': + self.id = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'productDescription': + self.product_description = value + elif name == 'validFrom': + self.valid_from = value + elif name == 'validUntil': + self.valid_until = value + elif name == 'launchGroup': + self.launch_group = value + elif name == 'availabilityZoneGroup': + self.availability_zone_group = value + elif name == 'createTime': + self.create_time = value + elif name == 'instanceId': + self.instance_id = value + else: + setattr(self, name, value) + + def cancel(self): + self.connection.cancel_spot_instance_requests([self.id]) + + + diff --git a/vendor/boto/boto/ec2/spotpricehistory.py b/vendor/boto/boto/ec2/spotpricehistory.py new file mode 100644 index 000000000000..d4e171102bd8 --- /dev/null +++ b/vendor/boto/boto/ec2/spotpricehistory.py @@ -0,0 +1,52 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Spot Instance Request +""" + +from boto.ec2.ec2object import EC2Object + +class SpotPriceHistory(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.price = 0.0 + self.instance_type = None + self.product_description = None + self.timestamp = None + + def __repr__(self): + return 'SpotPriceHistory(%s):%2f' % (self.instance_type, self.price) + + def endElement(self, name, value, connection): + if name == 'instanceType': + self.instance_type = value + elif name == 'spotPrice': + self.price = float(value) + elif name == 'productDescription': + self.product_description = value + elif name == 'timestamp': + self.timestamp = value + else: + setattr(self, name, value) + + diff --git a/vendor/boto/boto/ec2/volume.py b/vendor/boto/boto/ec2/volume.py new file mode 100644 index 000000000000..b07f83b99408 --- /dev/null +++ b/vendor/boto/boto/ec2/volume.py @@ -0,0 +1,208 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Elastic Block Storage Volume +""" +from boto.ec2.ec2object import EC2Object + +class Volume(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.create_time = None + self.status = None + self.size = None + self.snapshot_id = None + self.attach_data = None + self.zone = None + + def __repr__(self): + return 'Volume:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'attachmentSet': + self.attach_data = AttachmentSet() + return self.attach_data + else: + return None + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'createTime': + self.create_time = value + elif name == 'status': + if value != '': + self.status = value + elif name == 'size': + self.size = int(value) + elif name == 'snapshotId': + self.snapshot_id = value + elif name == 'availabilityZone': + self.zone = value + else: + setattr(self, name, value) + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + + def update(self): + rs = self.connection.get_all_volumes([self.id]) + if len(rs) > 0: + self._update(rs[0]) + return self.status + + def delete(self): + """ + Delete this EBS volume. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_volume(self.id) + + def attach(self, instance_id, device): + """ + Attach this EBS volume to an EC2 instance. + + :type instance_id: str + :param instance_id: The ID of the EC2 instance to which it will + be attached. + + :type device: str + :param device: The device on the instance through which the + volume will be exposted (e.g. /dev/sdh) + + :rtype: bool + :return: True if successful + """ + return self.connection.attach_volume(self.id, instance_id, device) + + def detach(self, force=False): + """ + Detach this EBS volume from an EC2 instance. + + :type force: bool + :param force: Forces detachment if the previous detachment attempt did + not occur cleanly. This option can lead to data loss or + a corrupted file system. Use this option only as a last + resort to detach a volume from a failed instance. The + instance will not have an opportunity to flush file system + caches nor file system meta data. If you use this option, + you must perform file system check and repair procedures. + + :rtype: bool + :return: True if successful + """ + instance_id = None + if self.attach_data: + instance_id = self.attach_data.instance_id + device = None + if self.attach_data: + device = self.attach_data.device + return self.connection.detach_volume(self.id, instance_id, device, force) + + def create_snapshot(self, description=None): + """ + Create a snapshot of this EBS Volume. + + :type description: str + :param description: A description of the snapshot. Limited to 256 characters. + + :rtype: bool + :return: True if successful + """ + return self.connection.create_snapshot(self.id, description) + + def volume_state(self): + """ + Returns the state of the volume. Same value as the status attribute. + """ + return self.status + + def attachment_state(self): + """ + Get the attachment state. + """ + state = None + if self.attach_data: + state = self.attach_data.status + return state + + def snapshots(self, owner=None, restorable_by=None): + """ + Get all snapshots related to this volume. Note that this requires + that all available snapshots for the account be retrieved from EC2 + first and then the list is filtered client-side to contain only + those for this volume. + + :type owner: str + :param owner: If present, only the snapshots owned by the specified user + will be returned. Valid values are: + self | amazon | AWS Account ID + + :type restorable_by: str + :param restorable_by: If present, only the snapshots that are restorable + by the specified account id will be returned. + + :rtype: list of L{boto.ec2.snapshot.Snapshot} + :return: The requested Snapshot objects + + """ + rs = self.connection.get_all_snapshots(owner=owner, + restorable_by=restorable_by) + mine = [] + for snap in rs: + if snap.volume_id == self.id: + mine.append(snap) + return mine + +class AttachmentSet(object): + + def __init__(self): + self.id = None + self.instance_id = None + self.status = None + self.attach_time = None + self.device = None + + def __repr__(self): + return 'AttachmentSet:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'volumeId': + self.id = value + elif name == 'instanceId': + self.instance_id = value + elif name == 'status': + self.status = value + elif name == 'attachTime': + self.attach_time = value + elif name == 'device': + self.device = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/ec2/zone.py b/vendor/boto/boto/ec2/zone.py new file mode 100644 index 000000000000..aec79b2c4060 --- /dev/null +++ b/vendor/boto/boto/ec2/zone.py @@ -0,0 +1,47 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an EC2 Availability Zone +""" +from boto.ec2.ec2object import EC2Object + +class Zone(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.name = None + self.state = None + + def __repr__(self): + return 'Zone:%s' % self.name + + def endElement(self, name, value, connection): + if name == 'zoneName': + self.name = value + elif name == 'zoneState': + self.state = value + else: + setattr(self, name, value) + + + + diff --git a/vendor/boto/boto/emr/__init__.py b/vendor/boto/boto/emr/__init__.py new file mode 100644 index 000000000000..970b4b80f0a3 --- /dev/null +++ b/vendor/boto/boto/emr/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2010 Spotify AB +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +This module provies an interface to the Elastic MapReduce (EMR) +service from AWS. +""" +from connection import EmrConnection +from step import Step, StreamingStep, JarStep + + diff --git a/vendor/boto/boto/emr/connection.py b/vendor/boto/boto/emr/connection.py new file mode 100644 index 000000000000..c6d454a25642 --- /dev/null +++ b/vendor/boto/boto/emr/connection.py @@ -0,0 +1,236 @@ +# Copyright (c) 2010 Spotify AB +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EMR service +""" +import types + +import boto +from boto.ec2.regioninfo import RegionInfo +from boto.emr.jobflow import JobFlow, RunJobFlowResponse +from boto.emr.step import JarStep +from boto.connection import AWSQueryConnection +from boto.exception import EmrResponseError + +class EmrConnection(AWSQueryConnection): + + APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31') + DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1') + DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint', + 'elasticmapreduce.amazonaws.com') + ResponseError = EmrResponseError + + # Constants for AWS Console debugging + DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar' + DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, host=None, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/'): + if not region: + region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + proxy_user, proxy_pass, + self.region.endpoint, debug, + https_connection_factory, path) + + def describe_jobflow(self, jobflow_id): + """ + Describes a single Elastic MapReduce job flow + + :type jobflow_id: str + :param jobflow_id: The job flow id of interest + """ + jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id]) + if jobflows: + return jobflows[0] + + def describe_jobflows(self, states=None, jobflow_ids=None, + created_after=None, created_before=None): + """ + Retrieve all the Elastic MapReduce job flows on your account + + :type states: list + :param states: A list of strings with job flow states wanted + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + :type created_after: datetime + :param created_after: Bound on job flow creation time + + :type created_before: datetime + :param created_before: Bound on job flow creation time + """ + params = {} + + if states: + self.build_list_params(params, states, 'JobFlowStates.member') + if jobflow_ids: + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + if created_after: + params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S') + if created_before: + params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S') + + return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) + + def terminate_jobflows(self, jobflow_ids): + """ + Terminate an Elastic MapReduce job flow + + :type jobflow_ids: list + :param jobflow_ids: A list of job flow IDs + """ + params = {} + self.build_list_params(params, jobflow_ids, 'JobFlowIds.member') + return self.get_status('TerminateJobFlows', params) + + def add_jobflow_steps(self, jobflow_id, steps): + """ + Adds steps to a jobflow + + :type jobflow_id: str + :param jobflow_id: The job flow id + :type steps: list(boto.emr.Step) + :param steps: A list of steps to add to the job + """ + if type(steps) != types.ListType: + steps = [steps] + params = {} + params['JobFlowId'] = jobflow_id + + # Step args + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse) + + def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None, + master_instance_type='m1.small', + slave_instance_type='m1.small', num_instances=1, + action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False, + enable_debugging=False, + steps=[]): + """ + Runs a job flow + + :type name: str + :param name: Name of the job flow + :type log_uri: str + :param log_uri: URI of the S3 bucket to place logs + :type ec2_keyname: str + :param ec2_keyname: EC2 key used for the instances + :type availability_zone: str + :param availability_zone: EC2 availability zone of the cluster + :type master_instance_type: str + :param master_instance_type: EC2 instance type of the master + :type slave_instance_type: str + :param slave_instance_type: EC2 instance type of the slave nodes + :type num_instances: int + :param num_instances: Number of instances in the Hadoop cluster + :type action_on_failure: str + :param action_on_failure: Action to take if a step terminates + :type keep_alive: bool + :param keep_alive: Denotes whether the cluster should stay alive upon completion + :type enable_debugging: bool + :param enable_debugging: Denotes whether AWS console debugging should be enabled. + :type steps: list(boto.emr.Step) + :param steps: List of steps to add with the job + + :rtype: str + :return: The jobflow id + """ + params = {} + if action_on_failure: + params['ActionOnFailure'] = action_on_failure + params['Name'] = name + params['LogUri'] = log_uri + + # Instance args + instance_params = self._build_instance_args(ec2_keyname, availability_zone, + master_instance_type, slave_instance_type, + num_instances, keep_alive) + params.update(instance_params) + + # Debugging step from EMR API docs + if enable_debugging: + debugging_step = JarStep(name='Setup Hadoop Debugging', + action_on_failure='TERMINATE_JOB_FLOW', + main_class=None, + jar=self.DebuggingJar, + step_args=self.DebuggingArgs) + steps.insert(0, debugging_step) + + # Step args + if steps: + step_args = [self._build_step_args(step) for step in steps] + params.update(self._build_step_list(step_args)) + + response = self.get_object('RunJobFlow', params, RunJobFlowResponse) + return response.jobflowid + + def _build_step_args(self, step): + step_params = {} + step_params['ActionOnFailure'] = step.action_on_failure + step_params['HadoopJarStep.Jar'] = step.jar() + + main_class = step.main_class() + if main_class: + step_params['HadoopJarStep.MainClass'] = main_class + + args = step.args() + if args: + self.build_list_params(step_params, args, 'HadoopJarStep.Args.member') + + step_params['Name'] = step.name + return step_params + + def _build_step_list(self, steps): + if type(steps) != types.ListType: + steps = [steps] + + params = {} + for i, step in enumerate(steps): + for key, value in step.iteritems(): + params['Steps.memeber.%s.%s' % (i+1, key)] = value + return params + + def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type, + slave_instance_type, num_instances, keep_alive): + params = { + 'Instances.MasterInstanceType' : master_instance_type, + 'Instances.SlaveInstanceType' : slave_instance_type, + 'Instances.InstanceCount' : num_instances, + 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower() + } + + if ec2_keyname: + params['Instances.Ec2KeyName'] = ec2_keyname + if availability_zone: + params['Placement'] = availability_zone + + return params + diff --git a/vendor/boto/boto/emr/emrobject.py b/vendor/boto/boto/emr/emrobject.py new file mode 100644 index 000000000000..4e2bd7cde9da --- /dev/null +++ b/vendor/boto/boto/emr/emrobject.py @@ -0,0 +1,34 @@ +# Copyright (c) 2010 Spotify AB +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class EmrObject(object): + Fields = set() + + def __init__(self, connection=None): + self.connection = connection + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in self.Fields: + setattr(self, name.lower(), value) diff --git a/vendor/boto/boto/emr/jobflow.py b/vendor/boto/boto/emr/jobflow.py new file mode 100644 index 000000000000..d2b3ab01706f --- /dev/null +++ b/vendor/boto/boto/emr/jobflow.py @@ -0,0 +1,89 @@ +# Copyright (c) 2010 Spotify AB +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.emr.emrobject import EmrObject +from boto.resultset import ResultSet + +class RunJobFlowResponse(EmrObject): + Fields = set(['JobFlowId']) + +class Arg(EmrObject): + def __init__(self, connection=None): + self.value = None + + def endElement(self, name, value, connection): + self.value = value + + +class Step(EmrObject): + Fields = set(['Name', + 'ActionOnFailure', + 'CreationDateTime', + 'StartDateTime', + 'EndDateTime', + 'LastStateChangeReason', + 'State']) + + def __init__(self, connection=None): + self.connection = connection + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + + +class JobFlow(EmrObject): + Fields = set(['CreationDateTime', + 'StartDateTime', + 'State', + 'EndDateTime', + 'Id', + 'InstanceCount', + 'JobFlowId', + 'KeepJobAliveWhenNoSteps', + 'LogURI', + 'MasterPublicDnsName', + 'MasterInstanceId', + 'Name', + 'Placement', + 'RequestId', + 'Type', + 'Value', + 'AvailabilityZone', + 'SlaveInstanceType', + 'MasterInstanceType', + 'Ec2KeyName', + 'InstanceCount', + 'KeepJobFlowAliveWhenNoSteps']) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', Step)]) + return self.steps + else: + return None + diff --git a/vendor/boto/boto/emr/step.py b/vendor/boto/boto/emr/step.py new file mode 100644 index 000000000000..b6c73fcbed9b --- /dev/null +++ b/vendor/boto/boto/emr/step.py @@ -0,0 +1,168 @@ +# Copyright (c) 2010 Spotify AB +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Step(object): + """ + Jobflow Step base class + """ + def jar(self): + """ + :rtype: str + :return: URI to the jar + """ + raise NotImplemented() + + def args(self): + """ + :rtype: list(str) + :return: List of arguments for the step + """ + raise NotImplemented() + + def main_class(self): + """ + :rtype: str + :return: The main class name + """ + raise NotImplemented() + + +class JarStep(Step): + """ + Custom jar step + """ + def __init__(self, name, jar, main_class, + action_on_failure='TERMINATE_JOB_FLOW', step_args=None): + """ + A elastic mapreduce step that executes a jar + + :type name: str + :param name: The name of the step + :type jar: str + :param jar: S3 URI to the Jar file + :type main_class: str + :param main_class: The class to execute in the jar + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to take on failure. + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + """ + self.name = name + self._jar = jar + self._main_class = main_class + self.action_on_failure = action_on_failure + + if isinstance(step_args, basestring): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return self._jar + + def args(self): + args = [] + + if self.step_args: + args.extend(self.step_args) + + return args + + def main_class(self): + return self._main_class + + +class StreamingStep(Step): + """ + Hadoop streaming step + """ + def __init__(self, name, mapper, reducer, + action_on_failure='TERMINATE_JOB_FLOW', + cache_files=None, cache_archives=None, + step_args=None, input=None, output=None): + """ + A hadoop streaming elastic mapreduce step + + :type name: str + :param name: The name of the step + :type mapper: str + :param mapper: The mapper URI + :type reducer: str + :param reducer: The reducer URI + :type action_on_failure: str + :param action_on_failure: An action, defined in the EMR docs to take on failure. + :type cache_files: list(str) + :param cache_files: A list of cache files to be bundled with the job + :type cache_archives: list(str) + :param cache_archives: A list of jar archives to be bundled with the job + :type step_args: list(str) + :param step_args: A list of arguments to pass to the step + :type input: str + :param input: The input uri + :type output: str + :param output: The output uri + """ + self.name = name + self.mapper = mapper + self.reducer = reducer + self.action_on_failure = action_on_failure + self.cache_files = cache_files + self.cache_archives = cache_archives + self.input = input + self.output = output + + if isinstance(step_args, basestring): + step_args = [step_args] + + self.step_args = step_args + + def jar(self): + return '/home/hadoop/contrib/streaming/hadoop-0.18-streaming.jar' + + def main_class(self): + return None + + def args(self): + args = ['-mapper', self.mapper, + '-reducer', self.reducer] + + if self.input: + if isinstance(self.input, list): + for input in self.input: + args.extend(('-input', input)) + else: + args.extend(('-input', self.input)) + if self.output: + args.extend(('-output', self.output)) + + if self.cache_files: + for cache_file in self.cache_files: + args.extend(('-cacheFile', cache_file)) + + if self.cache_archives: + for cache_archive in self.cache_archives: + args.extend(('-cacheArchive', cache_archive)) + + if self.step_args: + args.extend(self.step_args) + + return args + diff --git a/vendor/boto/boto/exception.py b/vendor/boto/boto/exception.py new file mode 100644 index 000000000000..503f43d1cb5a --- /dev/null +++ b/vendor/boto/boto/exception.py @@ -0,0 +1,293 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Exception classes - Subclassing allows you to check for specific errors +""" +import base64 +import xml.sax +from boto import handler +from boto.resultset import ResultSet + + +class BotoClientError(StandardError): + """ + General Boto Client error (error accessing AWS) + """ + + def __init__(self, reason): + StandardError.__init__(self) + self.reason = reason + + def __repr__(self): + return 'S3Error: %s' % self.reason + + def __str__(self): + return 'S3Error: %s' % self.reason + +class SDBPersistenceError(StandardError): + + pass + +class S3PermissionsError(BotoClientError): + """ + Permissions error when accessing a bucket or key on S3. + """ + pass + +class BotoServerError(StandardError): + + def __init__(self, status, reason, body=None): + StandardError.__init__(self) + self.status = status + self.reason = reason + self.body = body or '' + self.request_id = None + self.error_code = None + self.error_message = None + self.box_usage = None + + # Attempt to parse the error response. If body isn't present, + # then just ignore the error response. + if self.body: + try: + h = handler.XmlHandler(self, self) + xml.sax.parseString(self.body, h) + except xml.sax.SAXParseException, pe: + # Go ahead and clean up anything that may have + # managed to get into the error data so we + # don't get partial garbage. + print "Warning: failed to parse error message from AWS: %s" % pe + self._cleanupParsedProperties() + + def __getattr__(self, name): + if name == 'message': + return self.error_message + if name == 'code': + return self.error_code + raise AttributeError + + def __repr__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def __str__(self): + return '%s: %s %s\n%s' % (self.__class__.__name__, + self.status, self.reason, self.body) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name in ('RequestId', 'RequestID'): + self.request_id = value + elif name == 'Code': + self.error_code = value + elif name == 'Message': + self.error_message = value + elif name == 'BoxUsage': + self.box_usage = value + return None + + def _cleanupParsedProperties(self): + self.request_id = None + self.error_code = None + self.error_message = None + self.box_usage = None + +class ConsoleOutput: + + def __init__(self, parent=None): + self.parent = parent + self.instance_id = None + self.timestamp = None + self.comment = None + self.output = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'instanceId': + self.instance_id = value + elif name == 'output': + self.output = base64.b64decode(value) + else: + setattr(self, name, value) + +class S3CreateError(BotoServerError): + """ + Error creating a bucket or key on S3. + """ + def __init__(self, status, reason, body=None): + self.bucket = None + BotoServerError.__init__(self, status, reason, body) + + def endElement(self, name, value, connection): + if name == 'BucketName': + self.bucket = value + else: + return BotoServerError.endElement(self, name, value, connection) + +class S3CopyError(BotoServerError): + """ + Error copying a key on S3. + """ + pass + +class SQSError(BotoServerError): + """ + General Error on Simple Queue Service. + """ + def __init__(self, status, reason, body=None): + self.detail = None + self.type = None + BotoServerError.__init__(self, status, reason, body) + + def startElement(self, name, attrs, connection): + return BotoServerError.startElement(self, name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Detail': + self.detail = value + elif name == 'Type': + self.type = value + else: + return BotoServerError.endElement(self, name, value, connection) + + def _cleanupParsedProperties(self): + BotoServerError._cleanupParsedProperties(self) + for p in ('detail', 'type'): + setattr(self, p, None) + +class SQSDecodeError(BotoClientError): + """ + Error when decoding an SQS message. + """ + def __init__(self, reason, message): + BotoClientError.__init__(self, reason) + self.message = message + + def __repr__(self): + return 'SQSDecodeError: %s' % self.reason + + def __str__(self): + return 'SQSDecodeError: %s' % self.reason + +class S3ResponseError(BotoServerError): + """ + Error in response from S3. + """ + def __init__(self, status, reason, body=None): + self.resource = None + BotoServerError.__init__(self, status, reason, body) + + def startElement(self, name, attrs, connection): + return BotoServerError.startElement(self, name, attrs, connection) + + def endElement(self, name, value, connection): + if name == 'Resource': + self.resource = value + else: + return BotoServerError.endElement(self, name, value, connection) + + def _cleanupParsedProperties(self): + BotoServerError._cleanupParsedProperties(self) + for p in ('resource'): + setattr(self, p, None) + +class EC2ResponseError(BotoServerError): + """ + Error in response from EC2. + """ + + def __init__(self, status, reason, body=None): + self.errors = None + self._errorResultSet = [] + BotoServerError.__init__(self, status, reason, body) + self.errors = [ (e.error_code, e.error_message) \ + for e in self._errorResultSet ] + if len(self.errors): + self.error_code, self.error_message = self.errors[0] + + def startElement(self, name, attrs, connection): + if name == 'Errors': + self._errorResultSet = ResultSet([('Error', _EC2Error)]) + return self._errorResultSet + else: + return None + + def endElement(self, name, value, connection): + if name == 'RequestID': + self.request_id = value + else: + return None # don't call subclass here + + def _cleanupParsedProperties(self): + BotoServerError._cleanupParsedProperties(self) + self._errorResultSet = [] + for p in ('errors'): + setattr(self, p, None) + +class EmrResponseError(BotoServerError): + """ + Error in response from EMR + """ + pass + +class _EC2Error: + + def __init__(self, connection=None): + self.connection = connection + self.error_code = None + self.error_message = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Code': + self.error_code = value + elif name == 'Message': + self.error_message = value + else: + return None + +class SDBResponseError(BotoServerError): + """ + Error in respones from SDB. + """ + pass + +class AWSConnectionError(BotoClientError): + """ + General error connecting to Amazon Web Services. + """ + pass + +class S3DataError(BotoClientError): + """ + Error receiving data from S3. + """ + pass + +class FPSResponseError(BotoServerError): + pass diff --git a/vendor/boto/boto/fps/__init__.py b/vendor/boto/boto/fps/__init__.py new file mode 100644 index 000000000000..2f44483df616 --- /dev/null +++ b/vendor/boto/boto/fps/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2008, Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/fps/connection.py b/vendor/boto/boto/fps/connection.py new file mode 100644 index 000000000000..0e0d4e8e27e4 --- /dev/null +++ b/vendor/boto/boto/fps/connection.py @@ -0,0 +1,172 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import urllib +import xml.sax +import uuid +import boto +import boto.utils +from boto import handler +from boto.connection import AWSQueryConnection +from boto.resultset import ResultSet +from boto.exception import FPSResponseError + +class FPSConnection(AWSQueryConnection): + + APIVersion = '2007-01-08' + SignatureVersion = '1' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + host='fps.sandbox.amazonaws.com', debug=0, + https_connection_factory=None): + AWSQueryConnection.__init__(self, aws_access_key_id, + aws_secret_access_key, + is_secure, port, proxy, proxy_port, + host, debug, https_connection_factory) + + def install_payment_instruction(self, instruction, token_type="Unrestricted", transaction_id=None): + """ + InstallPaymentInstruction + instruction: The PaymentInstruction to send, for example: + + MyRole=='Caller' orSay 'Roles do not match'; + + token_type: Defaults to "Unrestricted" + transaction_id: Defaults to a new ID + """ + + if(transaction_id == None): + transaction_id = uuid.uuid4() + params = {} + params['PaymentInstruction'] = instruction + params['TokenType'] = token_type + params['CallerReference'] = transaction_id + response = self.make_request("InstallPaymentInstruction", params) + return response + + def install_caller_instruction(self, token_type="Unrestricted", transaction_id=None): + """ + Set us up as a caller + This will install a new caller_token into the FPS section. + This should really only be called to regenerate the caller token. + """ + response = self.install_payment_instruction("MyRole=='Caller';", token_type=token_type, transaction_id=transaction_id) + body = response.read() + if(response.status == 200): + rs = ResultSet() + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + caller_token = rs.TokenId + try: + boto.config.save_system_option("FPS", "caller_token", caller_token) + except(IOError): + boto.config.save_user_option("FPS", "caller_token", caller_token) + return caller_token + else: + raise FPSResponseError(response.status, response.reason, body) + + def install_recipient_instruction(self, token_type="Unrestricted", transaction_id=None): + """ + Set us up as a Recipient + This will install a new caller_token into the FPS section. + This should really only be called to regenerate the recipient token. + """ + response = self.install_payment_instruction("MyRole=='Recipient';", token_type=token_type, transaction_id=transaction_id) + body = response.read() + if(response.status == 200): + rs = ResultSet() + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + recipient_token = rs.TokenId + try: + boto.config.save_system_option("FPS", "recipient_token", recipient_token) + except(IOError): + boto.config.save_user_option("FPS", "recipient_token", recipient_token) + + return recipient_token + else: + raise FPSResponseError(response.status, response.reason, body) + + def make_url(self, returnURL, paymentReason, pipelineName, **params): + """ + Generate the URL with the signature required for a transaction + """ + params['callerKey'] = str(self.aws_access_key_id) + params['returnURL'] = str(returnURL) + params['paymentReason'] = str(paymentReason) + params['pipelineName'] = pipelineName + + if(not params.has_key('callerReference')): + params['callerReference'] = str(uuid.uuid4()) + + url = "" + keys = params.keys() + keys.sort() + for k in keys: + url += "&%s=%s" % (k, urllib.quote_plus(str(params[k]))) + + url = "/cobranded-ui/actions/start?%s" % ( url[1:]) + signature= boto.utils.encode(self.aws_secret_access_key, url, True) + return "https://authorize.payments-sandbox.amazon.com%s&awsSignature=%s" % (url, signature) + + def make_payment(self, amount, sender_token, charge_fee_to="Recipient", reference=None, senderReference=None, recipientReference=None, senderDescription=None, recipientDescription=None, callerDescription=None, metadata=None, transactionDate=None): + """ + Make a payment transaction + You must specify the amount and the sender token. + """ + params = {} + params['RecipientTokenId'] = boto.config.get("FPS", "recipient_token") + params['CallerTokenId'] = boto.config.get("FPS", "caller_token") + params['SenderTokenId'] = sender_token + params['TransactionAmount.Amount'] = str(amount) + params['TransactionAmount.CurrencyCode'] = "USD" + params['ChargeFeeTo'] = charge_fee_to + + if(transactionDate != None): + params['TransactionDate'] = transactionDate + if(senderReference != None): + params['SenderReference'] = senderReference + if(recipientReference != None): + params['RecipientReference'] = recipientReference + if(senderDescription != None): + params['SenderDescription'] = senderDescription + if(recipientDescription != None): + params['RecipientDescription'] = recipientDescription + if(callerDescription != None): + params['CallerDescription'] = callerDescription + if(metadata != None): + params['MetaData'] = metadata + if(transactionDate != None): + params['TransactionDate'] = transactionDate + if(reference == None): + reference = uuid.uuid4() + params['CallerReference'] = reference + + response = self.make_request("Pay", params) + body = response.read() + if(response.status == 200): + rs = ResultSet() + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise FPSResponseError(response.status, response.reason, body) diff --git a/vendor/boto/boto/handler.py b/vendor/boto/boto/handler.py new file mode 100644 index 000000000000..525f9c9a6cd7 --- /dev/null +++ b/vendor/boto/boto/handler.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax + +class XmlHandler(xml.sax.ContentHandler): + + def __init__(self, root_node, connection): + self.connection = connection + self.nodes = [('root', root_node)] + self.current_text = '' + + def startElement(self, name, attrs): + self.current_text = '' + new_node = self.nodes[-1][1].startElement(name, attrs, self.connection) + if new_node != None: + self.nodes.append((name, new_node)) + + def endElement(self, name): + self.nodes[-1][1].endElement(name, self.current_text, self.connection) + if self.nodes[-1][0] == name: + self.nodes.pop() + self.current_text = '' + + def characters(self, content): + self.current_text += content + + diff --git a/vendor/boto/boto/manage/__init__.py b/vendor/boto/boto/manage/__init__.py new file mode 100644 index 000000000000..49d029ba2c9d --- /dev/null +++ b/vendor/boto/boto/manage/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/manage/cmdshell.py b/vendor/boto/boto/manage/cmdshell.py new file mode 100644 index 000000000000..5d287ab7c973 --- /dev/null +++ b/vendor/boto/boto/manage/cmdshell.py @@ -0,0 +1,169 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.mashups.interactive import interactive_shell +import boto +import os +import time +import shutil +import StringIO +import paramiko +import socket +import subprocess + + +class SSHClient(object): + + def __init__(self, server, host_key_file='~/.ssh/known_hosts', uname='root'): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.connect() + + def connect(self): + retry = 0 + while retry < 5: + try: + self._ssh_client.connect(self.server.hostname, username=self.uname, pkey=self._pkey) + return + except socket.error, (value,message): + if value == 61: + print 'SSH Connection refused, will retry in 5 seconds' + time.sleep(5) + retry += 1 + else: + raise + except paramiko.BadHostKeyException: + print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname + print 'Edit that file to remove the entry and then hit return to try again' + raw_input('Hit Enter when ready') + retry += 1 + except EOFError: + print 'Unexpected Error from SSH Connection, retry in 5 seconds' + time.sleep(5) + retry += 1 + print 'Could not establish SSH connection' + + def get_file(self, src, dst): + sftp_client = self._ssh_client.open_sftp() + sftp_client.get(src, dst) + + def put_file(self, src, dst): + sftp_client = self._ssh_client.open_sftp() + sftp_client.put(src, dst) + + def listdir(self, path): + sftp_client = self._ssh_client.open_sftp() + return sftp_client.listdir(path) + + def open_sftp(self): + return self._ssh_client.open_sftp() + + def isdir(self, path): + status = self.run('[ -d %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def exists(self, path): + status = self.run('[ -a %s ] || echo "FALSE"' % path) + if status[1].startswith('FALSE'): + return 0 + return 1 + + def shell(self): + channel = self._ssh_client.invoke_shell() + interactive_shell(channel) + + def run(self, command): + boto.log.info('running:%s on %s' % (command, self.server.instance_id)) + log_fp = StringIO.StringIO() + status = 0 + try: + t = self._ssh_client.exec_command(command) + except paramiko.SSHException: + status = 1 + log_fp.write(t[1].read()) + log_fp.write(t[2].read()) + t[0].close() + t[1].close() + t[2].close() + boto.log.info('output: %s' % log_fp.getvalue()) + return (status, log_fp.getvalue()) + + def close(self): + transport = self._ssh_client.get_transport() + transport.close() + self.server.reset_cmdshell() + +class LocalClient(object): + + def __init__(self, server, host_key_file=None, uname='root'): + self.server = server + self.host_key_file = host_key_file + self.uname = uname + + def get_file(self, src, dst): + shutil.copyfile(src, dst) + + def put_file(self, src, dst): + shutil.copyfile(src, dst) + + def listdir(self, path): + return os.listdir(path) + + def isdir(self, path): + return os.path.isdir(path) + + def exists(self, path): + return os.path.exists(path) + + def shell(self): + raise NotImplementedError, 'shell not supported with LocalClient' + + def run(self): + boto.log.info('running:%s' % self.command) + log_fp = StringIO.StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + while process.poll() == None: + time.sleep(1) + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info(log_fp.getvalue()) + boto.log.info('output: %s' % log_fp.getvalue()) + return (process.returncode, log_fp.getvalue()) + + def close(self): + pass + +def start(server): + instance_id = boto.config.get('Instance', 'instance-id', None) + if instance_id == server.instance_id: + return LocalClient(server) + else: + return SSHClient(server) diff --git a/vendor/boto/boto/manage/propget.py b/vendor/boto/boto/manage/propget.py new file mode 100644 index 000000000000..45b2ff22210b --- /dev/null +++ b/vendor/boto/boto/manage/propget.py @@ -0,0 +1,64 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +def get(prop, choices=None): + prompt = prop.verbose_name + if not prompt: + prompt = prop.name + if choices: + if callable(choices): + choices = choices() + else: + choices = prop.get_choices() + valid = False + while not valid: + if choices: + min = 1 + max = len(choices) + for i in range(min, max+1): + value = choices[i-1] + if isinstance(value, tuple): + value = value[0] + print '[%d] %s' % (i, value) + value = raw_input('%s [%d-%d]: ' % (prompt, min, max)) + try: + int_value = int(value) + value = choices[int_value-1] + if isinstance(value, tuple): + value = value[1] + valid = True + except ValueError: + print '%s is not a valid choice' % value + except IndexError: + print '%s is not within the range[%d-%d]' % (min, max) + else: + value = raw_input('%s: ' % prompt) + try: + value = prop.validate(value) + if prop.empty(value) and prop.required: + print 'A value is required' + else: + valid = True + except: + print 'Invalid value: %s' % value + return value + diff --git a/vendor/boto/boto/manage/server.py b/vendor/boto/boto/manage/server.py new file mode 100644 index 000000000000..9a1e280cce98 --- /dev/null +++ b/vendor/boto/boto/manage/server.py @@ -0,0 +1,548 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +High-level abstraction of an EC2 server +""" +from __future__ import with_statement +import boto.ec2 +from boto.mashups.iobject import IObject +from boto.pyami.config import BotoConfigPath, Config +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty +from boto.manage import propget +from boto.ec2.zone import Zone +from boto.ec2.keypair import KeyPair +import os, time, StringIO +from contextlib import closing +from boto.exception import EC2ResponseError + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', + 'c1.medium', 'c1.xlarge', + 'm2.2xlarge', 'm2.4xlarge'] + +class Bundler(object): + + def __init__(self, server, uname='root'): + from boto.manage.cmdshell import SSHClient + self.server = server + self.uname = uname + self.ssh_client = SSHClient(server, uname=uname) + + def copy_x509(self, key_file, cert_file): + print '\tcopying cert and pk over to /mnt directory on server' + self.ssh_client.open_sftp() + path, name = os.path.split(key_file) + self.remote_key_file = '/mnt/%s' % name + self.ssh_client.put_file(key_file, self.remote_key_file) + path, name = os.path.split(cert_file) + self.remote_cert_file = '/mnt/%s' % name + self.ssh_client.put_file(cert_file, self.remote_cert_file) + print '...complete!' + + def bundle_image(self, prefix, size, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-bundle-vol ' + command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file) + command += '-u %s ' % self.server._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + return command + + def upload_bundle(self, bucket, prefix, ssh_key): + command = "" + if self.uname != 'root': + command = "sudo " + command += 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.server.ec2.aws_access_key_id + command += '-s %s ' % self.server.ec2.aws_secret_access_key + return command + + def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None, + size=None, ssh_key=None, fp=None, clear_history=True): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + if not ssh_key: + ssh_key = self.server.get_ssh_key_file() + self.copy_x509(key_file, cert_file) + if not fp: + fp = StringIO.StringIO() + fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath) + fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ') + if clear_history: + fp.write('history -c; ') + fp.write(self.bundle_image(prefix, size, ssh_key)) + fp.write('; ') + fp.write(self.upload_bundle(bucket, prefix, ssh_key)) + fp.write('; ') + fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath) + fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys') + command = fp.getvalue() + print 'running the following command on the remote server:' + print command + t = self.ssh_client.run(command) + print '\t%s' % t[0] + print '\t%s' % t[1] + print '...complete!' + print 'registering image...' + self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + +class CommandLineGetter(object): + + def get_ami_list(self): + my_amis = [] + for ami in self.ec2.get_all_images(): + # hack alert, need a better way to do this! + if ami.location.find('pyami') >= 0: + my_amis.append((ami.location, ami)) + return my_amis + + def get_region(self, params): + region = params.get('region', None) + if isinstance(region, str) or isinstance(region, unicode): + region = boto.ec2.get_region(region) + params['region'] = region + if not region: + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_description(self, params): + if not params.get('description', None): + prop = self.cls.find_property('description') + params['description'] = propget.get(prop) + + def get_instance_type(self, params): + if not params.get('instance_type', None): + prop = StringProperty(name='instance_type', verbose_name='Instance Type', + choices=InstanceTypes) + params['instance_type'] = propget.get(prop) + + def get_quantity(self, params): + if not params.get('quantity', None): + prop = IntegerProperty(name='quantity', verbose_name='Number of Instances') + params['quantity'] = propget.get(prop) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_ami_id(self, params): + ami = params.get('ami', None) + if isinstance(ami, str) or isinstance(ami, unicode): + for a in self.ec2.get_all_images(): + if a.id == ami: + params['ami'] = a + if not params.get('ami', None): + prop = StringProperty(name='ami', verbose_name='AMI', + choices=self.get_ami_list) + params['ami'] = propget.get(prop) + + def get_group(self, params): + group = params.get('group', None) + if isinstance(group, str) or isinstance(group, unicode): + group_list = self.ec2.get_all_security_groups() + for g in group_list: + if g.name == group: + group = g + params['group'] = g + if not group: + prop = StringProperty(name='group', verbose_name='EC2 Security Group', + choices=self.ec2.get_all_security_groups) + params['group'] = propget.get(prop) + + def get_key(self, params): + keypair = params.get('keypair', None) + if isinstance(keypair, str) or isinstance(keypair, unicode): + key_list = self.ec2.get_all_key_pairs() + for k in key_list: + if k.name == keypair: + keypair = k.name + params['keypair'] = k.name + if not keypair: + prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair', + choices=self.ec2.get_all_key_pairs) + params['keypair'] = propget.get(prop).name + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_name(params) + self.get_description(params) + self.get_instance_type(params) + self.get_zone(params) + self.get_quantity(params) + self.get_ami_id(params) + self.get_group(params) + self.get_key(params) + +class Server(Model): + + # + # The properties of this object consists of real properties for data that + # is not already stored in EC2 somewhere (e.g. name, description) plus + # calculated properties for all of the properties that are already in + # EC2 (e.g. hostname, security groups, etc.) + # + name = StringProperty(unique=True, verbose_name="Name") + description = StringProperty(verbose_name="Description") + region_name = StringProperty(verbose_name="EC2 Region Name") + instance_id = StringProperty(verbose_name="EC2 Instance ID") + elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address") + production = BooleanProperty(verbose_name="Is This Server Production", default=False) + ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True) + zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True) + hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True) + private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True) + groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True) + security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True) + key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True) + instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True) + status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True) + launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True) + console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=file, use_method=True) + + packages = [] + plugins = [] + + @classmethod + def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key): + if not cfg.has_section('Credentials'): + cfg.add_section('Credentials') + cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id) + cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key) + if not cfg.has_section('DB_Server'): + cfg.add_section('DB_Server') + cfg.set('DB_Server', 'db_type', 'SimpleDB') + cfg.set('DB_Server', 'db_name', cls._manager.domain.name) + + ''' + Create a new instance based on the specified configuration file or the specified + configuration and the passed in parameters. + + If the config_file argument is not None, the configuration is read from there. + Otherwise, the cfg argument is used. + + The config file may include other config files with a #import reference. The included + config files must reside in the same directory as the specified file. + + The logical_volume argument, if supplied, will be used to get the current physical + volume ID and use that as an override of the value specified in the config file. This + may be useful for debugging purposes when you want to debug with a production config + file but a test Volume. + + The dictionary argument may be used to override any EC2 configuration values in the + config file. + ''' + @classmethod + def create(cls, config_file=None, logical_volume = None, cfg = None, **params): + if config_file: + cfg = Config(path=config_file) + if cfg.has_section('EC2'): + # include any EC2 configuration values that aren't specified in params: + for option in cfg.options('EC2'): + if option not in params: + params[option] = cfg.get('EC2', option) + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key) + ami = params.get('ami') + kp = params.get('keypair') + group = params.get('group') + zone = params.get('zone') + # deal with possibly passed in logical volume: + if logical_volume != None: + cfg.set('EBS', 'logical_volume_name', logical_volume.name) + cfg_fp = StringIO.StringIO() + cfg.write(cfg_fp) + # deal with the possibility that zone and/or keypair are strings read from the config file: + if isinstance(zone, Zone): + zone = zone.name + if isinstance(kp, KeyPair): + kp = kp.name + reservation = ami.run(min_count=1, + max_count=params.get('quantity', 1), + key_name=kp, + security_groups=[group], + instance_type=params.get('instance_type'), + placement = zone, + user_data = cfg_fp.getvalue()) + l = [] + i = 0 + elastic_ip = params.get('elastic_ip') + instances = reservation.instances + if elastic_ip != None and instances.__len__() > 0: + instance = instances[0] + print 'Waiting for instance to start so we can set its elastic IP address...' + while instance.update() != 'running': + time.sleep(1) + instance.use_ip(elastic_ip) + print 'set the elastic IP of the first instance to %s' % elastic_ip + for instance in instances: + s = cls() + s.ec2 = ec2 + s.name = params.get('name') + '' if i==0 else str(i) + s.description = params.get('description') + s.region_name = region.name + s.instance_id = instance.id + if elastic_ip and i == 0: + s.elastic_ip = elastic_ip + s.put() + l.append(s) + i += 1 + return l + + @classmethod + def create_from_instance_id(cls, instance_id, name, description=''): + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + try: + rs = ec2.get_all_instances([instance_id]) + except: + rs = [] + if len(rs) == 1: + s = cls() + s.ec2 = ec2 + s.name = name + s.description = description + s.region_name = region.name + s.instance_id = instance_id + s._reservation = rs[0] + for instance in s._reservation.instances: + if instance.id == instance_id: + s._instance = instance + s.put() + return s + return None + + @classmethod + def create_from_current_instances(cls): + servers = [] + regions = boto.ec2.regions() + for region in regions: + ec2 = region.connect() + rs = ec2.get_all_instances() + for reservation in rs: + for instance in reservation.instances: + try: + Server.find(instance_id=instance.id).next() + boto.log.info('Server for %s already exists' % instance.id) + except StopIteration: + s = cls() + s.ec2 = ec2 + s.name = instance.id + s.region_name = region.name + s.instance_id = instance.id + s._reservation = reservation + s.put() + servers.append(s) + return servers + + def __init__(self, id=None, **kw): + Model.__init__(self, id, **kw) + self.ssh_key_file = None + self.ec2 = None + self._cmdshell = None + self._reservation = None + self._instance = None + self._setup_ec2() + + def _setup_ec2(self): + if self.ec2 and self._instance and self._reservation: + return + if self.id: + if self.region_name: + for region in boto.ec2.regions(): + if region.name == self.region_name: + self.ec2 = region.connect() + if self.instance_id and not self._instance: + try: + rs = self.ec2.get_all_instances([self.instance_id]) + if len(rs) >= 1: + for instance in rs[0].instances: + if instance.id == self.instance_id: + self._reservation = rs[0] + self._instance = instance + except EC2ResponseError: + pass + + def _status(self): + status = '' + if self._instance: + self._instance.update() + status = self._instance.state + return status + + def _hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.public_dns_name + return hostname + + def _private_hostname(self): + hostname = '' + if self._instance: + hostname = self._instance.private_dns_name + return hostname + + def _instance_type(self): + it = '' + if self._instance: + it = self._instance.instance_type + return it + + def _launch_time(self): + lt = '' + if self._instance: + lt = self._instance.launch_time + return lt + + def _console_output(self): + co = '' + if self._instance: + co = self._instance.get_console_output() + return co + + def _groups(self): + gn = [] + if self._reservation: + gn = self._reservation.groups + return gn + + def _security_group(self): + groups = self._groups() + if len(groups) >= 1: + return groups[0].id + return "" + + def _zone(self): + zone = None + if self._instance: + zone = self._instance.placement + return zone + + def _key_name(self): + kn = None + if self._instance: + kn = self._instance.key_name + return kn + + def put(self): + Model.put(self) + self._setup_ec2() + + def delete(self): + if self.production: + raise ValueError, "Can't delete a production server" + #self.stop() + Model.delete(self) + + def stop(self): + if self.production: + raise ValueError, "Can't delete a production server" + if self._instance: + self._instance.stop() + + def terminate(self): + if self.production: + raise ValueError, "Can't delete a production server" + if self._instance: + self._instance.terminate() + + def reboot(self): + if self._instance: + self._instance.reboot() + + def wait(self): + while self.status != 'running': + time.sleep(5) + + def get_ssh_key_file(self): + if not self.ssh_key_file: + ssh_dir = os.path.expanduser('~/.ssh') + if os.path.isdir(ssh_dir): + ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name) + if os.path.isfile(ssh_file): + self.ssh_key_file = ssh_file + if not self.ssh_key_file: + iobject = IObject() + self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file') + return self.ssh_key_file + + def get_cmdshell(self): + if not self._cmdshell: + import cmdshell + self.get_ssh_key_file() + self._cmdshell = cmdshell.start(self) + return self._cmdshell + + def reset_cmdshell(self): + self._cmdshell = None + + def run(self, command): + with closing(self.get_cmdshell()) as cmd: + status = cmd.run(command) + return status + + def get_bundler(self, uname='root'): + self.get_ssh_key_file() + return Bundler(self, uname) + + def get_ssh_client(self, uname='root'): + from boto.manage.cmdshell import SSHClient + self.get_ssh_key_file() + return SSHClient(self, uname=uname) + + def install(self, pkg): + return self.run('apt-get -y install %s' % pkg) + + + diff --git a/vendor/boto/boto/manage/task.py b/vendor/boto/boto/manage/task.py new file mode 100644 index 000000000000..2f9d7d001705 --- /dev/null +++ b/vendor/boto/boto/manage/task.py @@ -0,0 +1,175 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto +from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty +from boto.sdb.db.model import Model +import datetime, subprocess, StringIO, time + +def check_hour(val): + if val == '*': + return + if int(val) < 0 or int(val) > 23: + raise ValueError + +class Task(Model): + + """ + A scheduled, repeating task that can be executed by any participating servers. + The scheduling is similar to cron jobs. Each task has an hour attribute. + The allowable values for hour are [0-23|*]. + + To keep the operation reasonably efficient and not cause excessive polling, + the minimum granularity of a Task is hourly. Some examples: + + hour='*' - the task would be executed each hour + hour='3' - the task would be executed at 3AM GMT each day. + + """ + name = StringProperty() + hour = StringProperty(required=True, validator=check_hour, default='*') + command = StringProperty(required=True) + last_executed = DateTimeProperty() + last_status = IntegerProperty() + last_output = StringProperty() + message_id = StringProperty() + + @classmethod + def start_all(cls, queue_name): + for task in cls.all(): + task.start(queue_name) + + def __init__(self, id=None, **kw): + Model.__init__(self, id, **kw) + self.hourly = self.hour == '*' + self.daily = self.hour != '*' + self.now = datetime.datetime.utcnow() + + def check(self): + """ + Determine how long until the next scheduled time for a Task. + Returns the number of seconds until the next scheduled time or zero + if the task needs to be run immediately. + If it's an hourly task and it's never been run, run it now. + If it's a daily task and it's never been run and the hour is right, run it now. + """ + boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed)) + + if self.hourly and not self.last_executed: + return 0 + + if self.daily and not self.last_executed: + if int(self.hour) == self.now.hour: + return 0 + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + delta = self.now - self.last_executed + if self.hourly: + if delta.seconds >= 60*60: + return 0 + else: + return 60*60 - delta.seconds + else: + if int(self.hour) == self.now.hour: + if delta.days >= 1: + return 0 + else: + return 82800 # 23 hours, just to be safe + else: + return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60 + + def _run(self, msg, vtimeout): + boto.log.info('Task[%s] - running:%s' % (self.name, self.command)) + log_fp = StringIO.StringIO() + process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + nsecs = 5 + current_timeout = vtimeout + while process.poll() == None: + boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout)) + if nsecs >= current_timeout: + current_timeout += vtimeout + boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout)) + if msg: + msg.change_visibility(current_timeout) + time.sleep(5) + nsecs += 5 + t = process.communicate() + log_fp.write(t[0]) + log_fp.write(t[1]) + boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue())) + self.last_executed = self.now + self.last_status = process.returncode + self.last_output = log_fp.getvalue()[0:1023] + + def run(self, msg, vtimeout=60): + delay = self.check() + boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay)) + if delay == 0: + self._run(msg, vtimeout) + queue = msg.queue + new_msg = queue.new_message(self.id) + new_msg = queue.write(new_msg) + self.message_id = new_msg.id + self.put() + boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id)) + msg.delete() + boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id)) + else: + boto.log.info('new_vtimeout: %d' % delay) + msg.change_visibility(delay) + + def start(self, queue_name): + boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name)) + queue = boto.lookup('sqs', queue_name) + msg = queue.new_message(self.id) + msg = queue.write(msg) + self.message_id = msg.id + self.put() + boto.log.info('Task[%s] - start successful' % self.name) + +class TaskPoller(object): + + def __init__(self, queue_name): + self.sqs = boto.connect_sqs() + self.queue = self.sqs.lookup(queue_name) + + def poll(self, wait=60, vtimeout=60): + while 1: + m = self.queue.read(vtimeout) + if m: + task = Task.get_by_id(m.get_body()) + if task: + if not task.message_id or m.id == task.message_id: + boto.log.info('Task[%s] - read message %s' % (task.name, m.id)) + task.run(m, vtimeout) + else: + boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name) + else: + time.sleep(wait) + + + + + + diff --git a/vendor/boto/boto/manage/test_manage.py b/vendor/boto/boto/manage/test_manage.py new file mode 100644 index 000000000000..e0b032a9b87b --- /dev/null +++ b/vendor/boto/boto/manage/test_manage.py @@ -0,0 +1,34 @@ +from boto.manage.server import Server +from boto.manage.volume import Volume +import time + +print '--> Creating New Volume' +volume = Volume.create() +print volume + +print '--> Creating New Server' +server_list = Server.create() +server = server_list[0] +print server + +print '----> Waiting for Server to start up' +while server.status != 'running': + print '*' + time.sleep(10) +print '----> Server is running' + +print '--> Run "df -k" on Server' +status = server.run('df -k') +print status[1] + +print '--> Now run volume.make_ready to make the volume ready to use on server' +volume.make_ready(server) + +print '--> Run "df -k" on Server' +status = server.run('df -k') +print status[1] + +print '--> Do an "ls -al" on the new filesystem' +status = server.run('ls -al %s' % volume.mount_point) +print status[1] + diff --git a/vendor/boto/boto/manage/volume.py b/vendor/boto/boto/manage/volume.py new file mode 100644 index 000000000000..66a458f67f2e --- /dev/null +++ b/vendor/boto/boto/manage/volume.py @@ -0,0 +1,420 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from __future__ import with_statement +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty +from boto.manage.server import Server +from boto.manage import propget +import boto.ec2 +import time +import traceback +from contextlib import closing +import dateutil.parser +import datetime + + +class CommandLineGetter(object): + + def get_region(self, params): + if not params.get('region', None): + prop = self.cls.find_property('region_name') + params['region'] = propget.get(prop, choices=boto.ec2.regions) + + def get_zone(self, params): + if not params.get('zone', None): + prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', + choices=self.ec2.get_all_zones) + params['zone'] = propget.get(prop) + + def get_name(self, params): + if not params.get('name', None): + prop = self.cls.find_property('name') + params['name'] = propget.get(prop) + + def get_size(self, params): + if not params.get('size', None): + prop = IntegerProperty(name='size', verbose_name='Size (GB)') + params['size'] = propget.get(prop) + + def get_mount_point(self, params): + if not params.get('mount_point', None): + prop = self.cls.find_property('mount_point') + params['mount_point'] = propget.get(prop) + + def get_device(self, params): + if not params.get('device', None): + prop = self.cls.find_property('device') + params['device'] = propget.get(prop) + + def get(self, cls, params): + self.cls = cls + self.get_region(params) + self.ec2 = params['region'].connect() + self.get_zone(params) + self.get_name(params) + self.get_size(params) + self.get_mount_point(params) + self.get_device(params) + +class Volume(Model): + + name = StringProperty(required=True, unique=True, verbose_name='Name') + region_name = StringProperty(required=True, verbose_name='EC2 Region') + zone_name = StringProperty(required=True, verbose_name='EC2 Zone') + mount_point = StringProperty(verbose_name='Mount Point') + device = StringProperty(verbose_name="Device Name", default='/dev/sdp') + volume_id = StringProperty(required=True) + past_volume_ids = ListProperty(item_type=str) + server = ReferenceProperty(Server, collection_name='volumes', + verbose_name='Server Attached To') + volume_state = CalculatedProperty(verbose_name="Volume State", + calculated_type=str, use_method=True) + attachment_state = CalculatedProperty(verbose_name="Attachment State", + calculated_type=str, use_method=True) + size = CalculatedProperty(verbose_name="Size (GB)", + calculated_type=int, use_method=True) + + @classmethod + def create(cls, **params): + getter = CommandLineGetter() + getter.get(cls, params) + region = params.get('region') + ec2 = region.connect() + zone = params.get('zone') + size = params.get('size') + ebs_volume = ec2.create_volume(size, zone.name) + v = cls() + v.ec2 = ec2 + v.volume_id = ebs_volume.id + v.name = params.get('name') + v.mount_point = params.get('mount_point') + v.device = params.get('device') + v.region_name = region.name + v.zone_name = zone.name + v.put() + return v + + @classmethod + def create_from_volume_id(cls, region_name, volume_id, name): + vol = None + ec2 = boto.ec2.connect_to_region(region_name) + rs = ec2.get_all_volumes([volume_id]) + if len(rs) == 1: + v = rs[0] + vol = cls() + vol.volume_id = v.id + vol.name = name + vol.region_name = v.region.name + vol.zone_name = v.zone + vol.put() + return vol + + def create_from_latest_snapshot(self, name, size=None): + snapshot = self.get_snapshots()[-1] + return self.create_from_snapshot(name, snapshot, size) + + def create_from_snapshot(self, name, snapshot, size=None): + if size < self.size: + size = self.size + ec2 = self.get_ec2_connection() + if self.zone_name == None or self.zone_name == '': + # deal with the migration case where the zone is not set in the logical volume: + current_volume = ec2.get_all_volumes([self.volume_id])[0] + self.zone_name = current_volume.zone + ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) + v = Volume() + v.ec2 = self.ec2 + v.volume_id = ebs_volume.id + v.name = name + v.mount_point = self.mount_point + v.device = self.device + v.region_name = self.region_name + v.zone_name = self.zone_name + v.put() + return v + + def get_ec2_connection(self): + if self.server: + return self.server.ec2 + if not hasattr(self, 'ec2') or self.ec2 == None: + self.ec2 = boto.ec2.connect_to_region(self.region_name) + return self.ec2 + + def _volume_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].volume_state() + + def _attachment_state(self): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + return rs[0].attachment_state() + + def _size(self): + if not hasattr(self, '__size'): + ec2 = self.get_ec2_connection() + rs = ec2.get_all_volumes([self.volume_id]) + self.__size = rs[0].size + return self.__size + + def install_xfs(self): + if self.server: + self.server.install('xfsprogs xfsdump') + + def get_snapshots(self): + """ + Returns a list of all completed snapshots for this volume ID. + """ + ec2 = self.get_ec2_connection() + rs = ec2.get_all_snapshots() + all_vols = [self.volume_id] + self.past_volume_ids + snaps = [] + for snapshot in rs: + if snapshot.volume_id in all_vols: + if snapshot.progress == '100%': + snapshot.date = dateutil.parser.parse(snapshot.start_time) + snapshot.keep = True + snaps.append(snapshot) + snaps.sort(cmp=lambda x,y: cmp(x.date, y.date)) + return snaps + + def attach(self, server=None): + if self.attachment_state == 'attached': + print 'already attached' + return None + if server: + self.server = server + self.put() + ec2 = self.get_ec2_connection() + ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) + + def detach(self, force=False): + state = self.attachment_state + if state == 'available' or state == None or state == 'detaching': + print 'already detached' + return None + ec2 = self.get_ec2_connection() + ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force) + self.server = None + self.put() + + def checkfs(self, use_cmd=None): + if self.server == None: + raise ValueError, 'server attribute must be set to run this command' + # detemine state of file system on volume, only works if attached + if use_cmd: + cmd = use_cmd + else: + cmd = self.server.get_cmdshell() + status = cmd.run('xfs_check %s' % self.device) + if not use_cmd: + cmd.close() + if status[1].startswith('bad superblock magic number 0'): + return False + return True + + def wait(self): + if self.server == None: + raise ValueError, 'server attribute must be set to run this command' + with closing(self.server.get_cmdshell()) as cmd: + # wait for the volume device to appear + cmd = self.server.get_cmdshell() + while not cmd.exists(self.device): + boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) + time.sleep(10) + + def format(self): + if self.server == None: + raise ValueError, 'server attribute must be set to run this command' + status = None + with closing(self.server.get_cmdshell()) as cmd: + if not self.checkfs(cmd): + boto.log.info('make_fs...') + status = cmd.run('mkfs -t xfs %s' % self.device) + return status + + def mount(self): + if self.server == None: + raise ValueError, 'server attribute must be set to run this command' + boto.log.info('handle_mount_point') + with closing(self.server.get_cmdshell()) as cmd: + cmd = self.server.get_cmdshell() + if not cmd.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + cmd.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + status = cmd.run('mount -l') + lines = status[1].split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + cmd.run('umount %s' % self.mount_point) + cmd.run('mount %s /tmp' % t[0]) + cmd.run('chmod 777 /tmp') + break + # Mount up our new EBS volume onto mount_point + cmd.run("mount %s %s" % (self.device, self.mount_point)) + cmd.run('xfs_growfs %s' % self.mount_point) + + def make_ready(self, server): + self.server = server + self.put() + self.install_xfs() + self.attach() + self.wait() + self.format() + self.mount() + + def freeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point) + + def unfreeze(self): + if self.server: + return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point) + + def snapshot(self): + # if this volume is attached to a server + # we need to freeze the XFS file system + try: + self.freeze() + if self.server == None: + snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) + else: + snapshot = self.server.ec2.create_snapshot(self.volume_id) + boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot)) + except Exception: + boto.log.info('Snapshot error') + boto.log.info(traceback.format_exc()) + finally: + status = self.unfreeze() + return status + + def get_snapshot_range(self, snaps, start_date=None, end_date=None): + l = [] + for snap in snaps: + if start_date and end_date: + if snap.date >= start_date and snap.date <= end_date: + l.append(snap) + elif start_date: + if snap.date >= start_date: + l.append(snap) + elif end_date: + if snap.date <= end_date: + l.append(snap) + else: + l.append(snap) + return l + + def trim_snapshots(self, delete=False): + """ + Trim the number of snapshots for this volume. This method always + keeps the oldest snapshot. It then uses the parameters passed in + to determine how many others should be kept. + + The algorithm is to keep all snapshots from the current day. Then + it will keep the first snapshot of the day for the previous seven days. + Then, it will keep the first snapshot of the week for the previous + four weeks. After than, it will keep the first snapshot of the month + for as many months as there are. + + """ + snaps = self.get_snapshots() + # Always keep the oldest and the newest + if len(snaps) <= 2: + return snaps + snaps = snaps[1:-1] + now = datetime.datetime.now(snaps[0].date.tzinfo) + midnight = datetime.datetime(year=now.year, month=now.month, + day=now.day, tzinfo=now.tzinfo) + # Keep the first snapshot from each day of the previous week + one_week = datetime.timedelta(days=7, seconds=60*60) + print midnight-one_week, midnight + previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight) + print previous_week + if not previous_week: + return snaps + current_day = None + for snap in previous_week: + if current_day and current_day == snap.date.day: + snap.keep = False + else: + current_day = snap.date.day + # Get ourselves onto the next full week boundary + if previous_week: + week_boundary = previous_week[0].date + if week_boundary.weekday() != 0: + delta = datetime.timedelta(days=week_boundary.weekday()) + week_boundary = week_boundary - delta + # Keep one within this partial week + partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date) + if len(partial_week) > 1: + for snap in partial_week[1:]: + snap.keep = False + # Keep the first snapshot of each week for the previous 4 weeks + for i in range(0,4): + weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary) + if len(weeks_worth) > 1: + for snap in weeks_worth[1:]: + snap.keep = False + week_boundary = week_boundary - one_week + # Now look through all remaining snaps and keep one per month + remainder = self.get_snapshot_range(snaps, end_date=week_boundary) + current_month = None + for snap in remainder: + if current_month and current_month == snap.date.month: + snap.keep = False + else: + current_month = snap.date.month + if delete: + for snap in snaps: + if not snap.keep: + boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) + snap.delete() + return snaps + + def grow(self, size): + pass + + def copy(self, snapshot): + pass + + def get_snapshot_from_date(self, date): + pass + + def delete(self, delete_ebs_volume=False): + if delete_ebs_volume: + self.detach() + ec2 = self.get_ec2_connection() + ec2.delete_volume(self.volume_id) + Model.delete(self) + + def archive(self): + # snapshot volume, trim snaps, delete volume-id + pass + + diff --git a/vendor/boto/boto/mapreduce/__init__.py b/vendor/boto/boto/mapreduce/__init__.py new file mode 100644 index 000000000000..ac3ddc447416 --- /dev/null +++ b/vendor/boto/boto/mapreduce/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/mapreduce/lqs.py b/vendor/boto/boto/mapreduce/lqs.py new file mode 100644 index 000000000000..fc76e50f82cd --- /dev/null +++ b/vendor/boto/boto/mapreduce/lqs.py @@ -0,0 +1,152 @@ +import SocketServer, os, datetime, sys, random, time +import simplejson + +class LQSCommand: + + def __init__(self, line): + self.raw_line = line + self.line = self.raw_line.strip() + l = self.line.split(' ') + self.name = l[0] + if len(l) > 1: + self.args = [arg for arg in l[1:] if arg] + else: + self.args = [] + +class LQSMessage(dict): + + def __init__(self, item=None, args=None, jsonvalue=None): + dict.__init__(self) + if jsonvalue: + self.decode(jsonvalue) + else: + self['id'] = '%d_%d' % (int(time.time()), int(random.random()*1000000)) + self['item'] = item + self['args'] = args + + def encode(self): + return simplejson.dumps(self) + + def decode(self, value): + self.update(simplejson.loads(value)) + + def is_empty(self): + if self['item'] == None: + return True + return False + +class LQSServer(SocketServer.UDPServer): + + PORT = 5151 + TIMEOUT = 30 + MAXSIZE = 8192 + + def __init__(self, server_address, RequestHandlerClass, iterator, args=None): + server_address = (server_address, self.PORT) + SocketServer.UDPServer.__init__(self, server_address, RequestHandlerClass) + self.count = 0 + self.iterator = iterator + self.args = args + self.start = datetime.datetime.now() + self.end = None + self.extant = [] + +class LQSHandler(SocketServer.DatagramRequestHandler): + + def get_cmd(self): + return LQSCommand(self.rfile.readline()) + + def build_msg(self): + if not self.server.iterator: + return LQSMessage(None) + try: + item = self.server.iterator.next() + msg = LQSMessage(item, self.server.args) + return msg + except StopIteration: + self.server.iterator = None + return LQSMessage(None) + + def respond(self, msg): + self.wfile.write(msg.encode()) + + def check_extant(self): + if len(self.server.extant) == 0 and not self.server.iterator: + self.server.end = datetime.datetime.now() + delta = self.server.end - self.server.start + print 'Total Processing Time: %s' % delta + print 'Total Messages Processed: %d' % self.server.count + + def do_debug(self, cmd): + args = {'extant' : self.server.extant, + 'count' : self.server.count} + msg = LQSMessage('debug', args) + self.respond(msg) + + def do_next(self, cmd): + out_msg = self.build_msg() + if not out_msg.is_empty(): + self.server.count += 1 + self.server.extant.append(out_msg['id']) + self.respond(out_msg) + + def do_delete(self, cmd): + if len(cmd.args) != 1: + self.error(cmd, 'delete command requires message id') + else: + mid = cmd.args[0] + try: + self.server.extant.remove(mid) + except ValueError: + self.error(cmd, 'message id not found') + args = {'deleted' : True} + msg = LQSMessage(mid, args) + self.respond(msg) + self.check_extant() + + def error(self, cmd, error_msg=None): + args = {'error_msg' : error_msg, + 'cmd_name' : cmd.name, + 'cmd_args' : cmd.args} + msg = LQSMessage('error', args) + self.respond(msg) + + def do_stop(self, cmd): + sys.exit(0) + + def handle(self): + cmd = self.get_cmd() + if hasattr(self, 'do_%s' % cmd.name): + method = getattr(self, 'do_%s' % cmd.name) + method(cmd) + else: + self.error(cmd, 'unrecognized command') + +class PersistHandler(LQSHandler): + + def build_msg(self): + if not self.server.iterator: + return LQSMessage(None) + try: + obj = self.server.iterator.next() + msg = LQSMessage(obj.id, self.server.args) + return msg + except StopIteration: + self.server.iterator = None + return LQSMessage(None) + +def test_file(path, args=None): + l = os.listdir(path) + if not args: + args = {} + args['path'] = path + s = LQSServer('', LQSHandler, iter(l), args) + print "Awaiting UDP messages on port %d" % s.PORT + s.serve_forever() + +def test_simple(n): + l = range(0, n) + s = LQSServer('', LQSHandler, iter(l), None) + print "Awaiting UDP messages on port %d" % s.PORT + s.serve_forever() + diff --git a/vendor/boto/boto/mapreduce/partitiondb.py b/vendor/boto/boto/mapreduce/partitiondb.py new file mode 100644 index 000000000000..25cf135367d1 --- /dev/null +++ b/vendor/boto/boto/mapreduce/partitiondb.py @@ -0,0 +1,175 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import random +import os +import datetime +from boto.sdb.persist.object import SDBObject +from boto.sdb.persist.property import StringProperty, ObjectProperty, DateTimeProperty, ObjectListProperty, S3KeyProperty + + +HEX_DIGITS = '0123456789abcdef' + +class Identifier(object): + + @staticmethod + def gen(prefix): + suffix = '' + for i in range(0,8): + suffix += random.choice(HEX_DIGITS) + return prefix + '-' + suffix + + +class Version(SDBObject): + + name = StringProperty() + pdb = ObjectProperty(ref_class=SDBObject) + date = DateTimeProperty() + + def __init__(self, id=None, manager=None): + SDBObject.__init__(self, id, manager) + if id == None: + self.name = Identifier.gen('v') + self.date = datetime.datetime.now() + print 'created Version %s' % self.name + + def partitions(self): + """ + Return an iterator containing all Partition objects related to this Version. + + :rtype: iterator of :class:`boto.mapreduce.partitiondb.Partition` + :return: The Partitions in this Version + """ + return self.get_related_objects('version', Partition) + + def add_partition(self, name=None): + """ + Add a new Partition to this Version. + + :type name: string + :param name: The name of the new Partition (optional) + + :rtype: :class:`boto.mapreduce.partitiondb.Partition` + :return: The new Partition object + """ + p = Partition(manager=self.manager, name=name) + p.version = self + p.pdb = self.pdb + p.save() + return p + + def get_s3_prefix(self): + if not self.pdb: + raise ValueError, 'pdb attribute must be set to compute S3 prefix' + return self.pdb.get_s3_prefix() + self.name + '/' + +class PartitionDB(SDBObject): + + name = StringProperty() + bucket_name = StringProperty() + versions = ObjectListProperty(ref_class=Version) + + def __init__(self, id=None, manager=None, name='', bucket_name=''): + SDBObject.__init__(self, id, manager) + if id == None: + self.name = name + self.bucket_name = bucket_name + + def get_s3_prefix(self): + return self.name + '/' + + def add_version(self): + """ + Add a new Version to this PartitionDB. The newly added version becomes the + current version. + + :rtype: :class:`boto.mapreduce.partitiondb.Version` + :return: The newly created Version object. + """ + v = Version() + v.pdb = self + v.save() + self.versions.append(v) + return v + + def revert(self): + """ + Revert to the previous version of this PartitionDB. The current version is removed from the + list of Versions and the Version immediately preceeding it becomes the current version. + Note that this method does not delete the Version object or any Partitions related to the + Version object. + + :rtype: :class:`boto.mapreduce.partitiondb.Version` + :return: The previous current Version object. + """ + v = self.current_version() + if v: + self.versions.remove(v) + return v + + def current_version(self): + """ + Get the currently active Version of this PartitionDB object. + + :rtype: :class:`boto.mapreduce.partitiondb.Version` + :return: The current Version object or None if there are no Versions associated + with this PartitionDB object. + """ + if self.versions: + if len(self.versions) > 0: + return self.versions[-1] + return None + +class Partition(SDBObject): + + def __init__(self, id=None, manager=None, name=None): + SDBObject.__init__(self, id, manager) + if id == None: + self.name = name + + name = StringProperty() + version = ObjectProperty(ref_class=Version) + pdb = ObjectProperty(ref_class=PartitionDB) + data = S3KeyProperty() + + def get_key_name(self): + return self.version.get_s3_prefix() + self.name + + def upload(self, path, bucket_name=None): + if not bucket_name: + bucket_name = self.version.pdb.bucket_name + s3 = self.manager.get_s3_connection() + bucket = s3.lookup(bucket_name) + directory, filename = os.path.split(path) + self.name = filename + key = bucket.new_key(self.get_key_name()) + key.set_contents_from_filename(path) + self.data = key + self.save() + + def delete(self): + if self.data: + self.data.delete() + SDBObject.delete(self) + + + diff --git a/vendor/boto/boto/mapreduce/pdb_delete b/vendor/boto/boto/mapreduce/pdb_delete new file mode 100644 index 000000000000..b7af9cc06a24 --- /dev/null +++ b/vendor/boto/boto/mapreduce/pdb_delete @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import queuetools, os, signal, sys +import subprocess +import time +from optparse import OptionParser +from boto.mapreduce.partitiondb import PartitionDB, Partition, Version +from lqs import LQSServer, PersistHandler +from boto.exception import SDBPersistenceError +from boto.sdb.persist import get_manager, revive_object_from_id + +USAGE = """ + SYNOPSIS + %prog [options] [command] + DESCRIPTION + Delete a PartitionDB and all related data in SimpleDB and S3. +""" +class Client: + + def __init__(self, queue_name): + self.q = queuetools.get_queue(queue_name) + self.q.connect() + self.manager = get_manager() + self.process() + + def process(self): + m = self.q.get() + while m['item']: + print 'Deleting: %s' % m['item'] + obj = revive_object_from_id(m['item'], manager=self.manager) + obj.delete() + self.q.delete(m) + m = self.q.get() + print 'client processing complete' + +class Server: + + def __init__(self, pdb_name, domain_name=None): + self.pdb_name = pdb_name + self.manager = get_manager(domain_name) + self.pdb = PartitionDB.get(name=self.pdb_name) + self.serve() + + def serve(self): + args = {'pdb_id' : self.pdb.id} + rs = self.pdb.get_related_objects('pdb') + self.pdb.delete() + s = LQSServer('', PersistHandler, rs, args) + s.serve_forever() + +class Delete: + + Commands = {'client' : 'Start a Delete client', + 'server' : 'Start a Delete server'} + + def __init__(self): + self.parser = OptionParser(usage=USAGE) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option('-d', '--domain-name', action='store', type='string', + help='name of the SimpleDB domain where PDB objects are stored') + self.parser.add_option('-n', '--num-processes', action='store', type='int', dest='num_processes', + help='the number of client processes launched') + self.parser.set_defaults(num_processes=5) + self.parser.add_option('-p', '--pdb-name', action='store', type='string', + help='name of the PDB in which to store files (will create if necessary)') + self.options, self.args = self.parser.parse_args() + self.prog_name = sys.argv[0] + + def print_command_help(self): + print '\nCommands:' + for key in self.Commands.keys(): + print ' %s\t\t%s' % (key, self.Commands[key]) + + def do_server(self): + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + s = Server(self.options.pdb_name, self.options.domain_name) + + def do_client(self): + c = Client('localhost') + + def main(self): + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) == 0: + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + server_command = '%s -p %s ' % (self.prog_name, self.options.pdb_name) + server_command += ' server' + client_command = '%s client' % self.prog_name + server = subprocess.Popen(server_command, shell=True) + print 'server pid: %s' % server.pid + time.sleep(5) + clients = [] + for i in range(0, self.options.num_processes): + client = subprocess.Popen(client_command, shell=True) + clients.append(client) + print 'waiting for clients to finish' + for client in clients: + client.wait() + os.kill(server.pid, signal.SIGTERM) + elif len(self.args) == 1: + self.command = self.args[0] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + else: + self.parser.error('unrecognized commands') + +if __name__ == "__main__": + delete = Delete() + delete.main() diff --git a/vendor/boto/boto/mapreduce/pdb_describe b/vendor/boto/boto/mapreduce/pdb_describe new file mode 100755 index 000000000000..d0fa86c63d1a --- /dev/null +++ b/vendor/boto/boto/mapreduce/pdb_describe @@ -0,0 +1,124 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import sys +from optparse import OptionParser +from boto.mapreduce.partitiondb import PartitionDB, Partition, Version +from boto.exception import SDBPersistenceError +from boto.sdb.persist import get_manager, get_domain + +USAGE = """ + SYNOPSIS + %prog [options] + DESCRIPTION + List and describe your PartitionDBs. + Called with no options, all PartitionDB objects defined in your default + domain (as specified in the "default_domain" option in the "[Persist]" + section of your boto config file) will be listed. + When called with a particular PartitionDB name (using -p option) all + Version objects of that PartitionDB object will be listed. + When called with the -p option and a particular Version name specified + (using the -v option) all Partitions in that Version object will be listed. +""" +class Describe: + + def __init__(self): + self.parser = OptionParser(usage=USAGE) + self.parser.add_option('-d', '--domain-name', action='store', type='string', + help='name of the SimpleDB domain where PDB objects are stored') + self.parser.add_option('-n', '--num-entries', action='store', type='int', + help='maximum number of entries to print (default 100)') + self.parser.set_defaults(num_entries=100) + self.parser.add_option('-p', '--pdb-name', action='store', type='string', + help='name of the PDB to describe') + self.parser.add_option('-v', '--version-name', action='store', type='string', + help='name of the PDB Version to describe') + self.options, self.args = self.parser.parse_args() + self.prog_name = sys.argv[0] + + def describe_all(self): + print 'Using SimpleDB Domain: %s' % get_domain() + print 'PDBs:' + rs = PartitionDB.list() + i = 0 + for pdb in rs: + print '%s\t%s\t%s' % (pdb.id, pdb.name, pdb.bucket_name) + i += 1 + if i == self.options.num_entries: + break + + def describe_pdb(self, pdb_name): + print 'Using SimpleDB Domain: %s' % get_domain() + print 'PDB: %s' % pdb_name + print 'Versions:' + try: + pdb = PartitionDB.get(name=pdb_name) + i = 0 + for v in pdb.versions: + if v.date: + ds = v.date.isoformat() + else: + ds = 'unknown' + print '%s\t%s\t%s' % (v.id, v.name, ds) + i += 1 + if i == self.options.num_entries: + break + cv = pdb.current_version() + if cv: + print 'Current Version: %s' % cv.name + else: + print 'Current Version: None' + except SDBPersistenceError: + self.parser.error('pdb_name (%s) unknown' % pdb_name) + + def describe_version(self, pdb_name, version_name): + print 'Using SimpleDB Domain: %s' % get_domain() + print 'PDB: %s' % pdb_name + print 'Version: %s' % version_name + print 'Partitions:' + try: + pdb = PartitionDB.get(name=pdb_name) + for v in pdb.versions: + if v.name == version_name: + i = 0 + for p in v.partitions(): + print '%s\t%s' % (p.id, p.name) + i += 1 + if i == self.options.num_entries: + break + except SDBPersistenceError: + self.parser.error('pdb_name (%s) unknown' % pdb_name) + + def main(self): + self.options, self.args = self.parser.parse_args() + self.manager = get_manager(self.options.domain_name) + + if self.options.pdb_name: + if self.options.version_name: + self.describe_version(self.options.pdb_name, self.options.version_name) + else: + self.describe_pdb(self.options.pdb_name) + else: + self.describe_all() + +if __name__ == "__main__": + describe = Describe() + describe.main() diff --git a/vendor/boto/boto/mapreduce/pdb_revert b/vendor/boto/boto/mapreduce/pdb_revert new file mode 100755 index 000000000000..daffeef1edee --- /dev/null +++ b/vendor/boto/boto/mapreduce/pdb_revert @@ -0,0 +1,135 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import queuetools, os, signal, sys +import subprocess +import time +from optparse import OptionParser +from boto.mapreduce.partitiondb import PartitionDB, Partition, Version +from lqs import LQSServer, PersistHandler +from boto.exception import SDBPersistenceError +from boto.sdb.persist import get_manager + +USAGE = """ + SYNOPSIS + %prog [options] [command] + DESCRIPTION + Revert to the previous Version in a PartitionDB. +""" +class Client: + + def __init__(self, queue_name): + self.q = queuetools.get_queue(queue_name) + self.q.connect() + self.manager = get_manager() + self.process() + + def process(self): + m = self.q.get() + while m['item']: + print 'Deleting: %s' % m['item'] + p = Partition(id=m['item'], manager=self.manager) + p.delete() + self.q.delete(m) + m = self.q.get() + print 'client processing complete' + +class Server: + + def __init__(self, pdb_name, domain_name=None): + self.pdb_name = pdb_name + self.manager = get_manager(domain_name) + self.pdb = PartitionDB.get(name=self.pdb_name) + self.serve() + + def serve(self): + v = self.pdb.revert() + args = {'v_id' : v.id} + rs = v.partitions() + s = LQSServer('', PersistHandler, rs, args) + s.serve_forever() + +class Revert: + + Commands = {'client' : 'Start a Revert client', + 'server' : 'Start a Revert server'} + + def __init__(self): + self.parser = OptionParser(usage=USAGE) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option('-d', '--domain-name', action='store', type='string', + help='name of the SimpleDB domain where PDB objects are stored') + self.parser.add_option('-n', '--num-processes', action='store', type='int', dest='num_processes', + help='the number of client processes launched') + self.parser.set_defaults(num_processes=5) + self.parser.add_option('-p', '--pdb-name', action='store', type='string', + help='name of the PDB in which to store files (will create if necessary)') + self.options, self.args = self.parser.parse_args() + self.prog_name = sys.argv[0] + + def print_command_help(self): + print '\nCommands:' + for key in self.Commands.keys(): + print ' %s\t\t%s' % (key, self.Commands[key]) + + def do_server(self): + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + s = Server(self.options.pdb_name, self.options.domain_name) + + def do_client(self): + c = Client('localhost') + + def main(self): + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) == 0: + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + server_command = '%s -p %s ' % (self.prog_name, self.options.pdb_name) + server_command += ' server' + client_command = '%s client' % self.prog_name + server = subprocess.Popen(server_command, shell=True) + print 'server pid: %s' % server.pid + time.sleep(5) + clients = [] + for i in range(0, self.options.num_processes): + client = subprocess.Popen(client_command, shell=True) + clients.append(client) + print 'waiting for clients to finish' + for client in clients: + client.wait() + os.kill(server.pid, signal.SIGTERM) + elif len(self.args) == 1: + self.command = self.args[0] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + else: + self.parser.error('unrecognized commands') + +if __name__ == "__main__": + revert = Revert() + revert.main() diff --git a/vendor/boto/boto/mapreduce/pdb_upload b/vendor/boto/boto/mapreduce/pdb_upload new file mode 100755 index 000000000000..1ca2b6d39884 --- /dev/null +++ b/vendor/boto/boto/mapreduce/pdb_upload @@ -0,0 +1,172 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import queuetools, os, signal, sys +import subprocess +import time +from optparse import OptionParser +from boto.mapreduce.partitiondb import PartitionDB, Partition, Version +from lqs import LQSServer, LQSHandler +from boto.exception import SDBPersistenceError +from boto.sdb.persist import get_manager + +USAGE = """ + SYNOPSIS + %prog [options] + DESCRIPTION + Upload partition files to a PartitionDB. + Called with no options, all PartitionDB objects defined in your default + domain (as specified in the "default_domain" option in the "[Persist]" + section of your boto config file) will be listed. + When called with a particular PartitionDB name (using -p option) all + Version objects of that PartitionDB object will be listed. + When called with the -p option and a particular Version name specified + (using the -v option) all Partitions in that Version object will be listed. +""" +class Client: + + def __init__(self, queue_name): + self.q = queuetools.get_queue(queue_name) + self.q.connect() + self.manager = get_manager() + self.process() + + def process(self): + m = self.q.get() + if m['item']: + v = Version(m['args']['v_id'], self.manager) + bucket_name = v.pdb.bucket_name + while m['item']: + print 'Uploading: %s' % m['item'] + p = v.add_partition(name=m['item']) + p.upload(os.path.join(m['args']['path'], m['item']), bucket_name) + self.q.delete(m) + m = self.q.get() + print 'client processing complete' + +class Server: + + def __init__(self, path, pdb_name, bucket_name=None, domain_name=None): + self.path = path + self.pdb_name = pdb_name + self.bucket_name = bucket_name + self.manager = get_manager(domain_name) + self.get_pdb() + self.serve() + + def get_pdb(self): + try: + self.pdb = PartitionDB.get(name=self.pdb_name) + except SDBPersistenceError: + self.pdb = PartitionDB(manager=self.manager, name=self.pdb_name, bucket_name=self.bucket_name) + self.pdb.save() + + def serve(self): + v = self.pdb.add_version() + args = {'path' : self.path, + 'v_id' : v.id} + l = os.listdir(self.path) + s = LQSServer('', LQSHandler, iter(l), args) + s.serve_forever() + +class Upload: + + Usage = "usage: %prog [options] command" + + Commands = {'client' : 'Start an Upload client', + 'server' : 'Start an Upload server'} + + def __init__(self): + self.parser = OptionParser(usage=self.Usage) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option('-d', '--domain-name', action='store', type='string', + help='name of the SimpleDB domain where PDB objects are stored') + self.parser.add_option('-n', '--num-processes', action='store', type='int', dest='num_processes', + help='the number of client processes launched') + self.parser.set_defaults(num_processes=2) + self.parser.add_option('-i', '--input-path', action='store', type='string', + help='the path to directory to upload') + self.parser.add_option('-p', '--pdb-name', action='store', type='string', + help='name of the PDB in which to store files (will create if necessary)') + self.parser.add_option('-b', '--bucket-name', action='store', type='string', + help='name of S3 bucket (only needed if creating new PDB)') + self.options, self.args = self.parser.parse_args() + self.prog_name = sys.argv[0] + + def print_command_help(self): + print '\nCommands:' + for key in self.Commands.keys(): + print ' %s\t\t%s' % (key, self.Commands[key]) + + def do_server(self): + if not self.options.input_path: + self.parser.error('No path provided') + if not os.path.isdir(self.options.input_path): + self.parser.error('Invalid path (%s)' % self.options.input_path) + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + s = Server(self.options.input_path, self.options.pdb_name, + self.options.bucket_name, self.options.domain_name) + + def do_client(self): + c = Client('localhost') + + def main(self): + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) == 0: + if not self.options.input_path: + self.parser.error('No path provided') + if not os.path.isdir(self.options.input_path): + self.parser.error('Invalid path (%s)' % self.options.input_path) + if not self.options.pdb_name: + self.parser.error('No PDB name provided') + server_command = '%s -p %s -i %s' % (self.prog_name, self.options.pdb_name, self.options.input_path) + if self.options.bucket_name: + server_command += ' -b %s' % self.options.bucket_name + server_command += ' server' + client_command = '%s client' % self.prog_name + server = subprocess.Popen(server_command, shell=True) + print 'server pid: %s' % server.pid + time.sleep(5) + clients = [] + for i in range(0, self.options.num_processes): + client = subprocess.Popen(client_command, shell=True) + clients.append(client) + print 'waiting for clients to finish' + for client in clients: + client.wait() + os.kill(server.pid, signal.SIGTERM) + elif len(self.args) == 1: + self.command = self.args[0] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + else: + self.parser.error('unrecognized commands') + +if __name__ == "__main__": + upload = Upload() + upload.main() diff --git a/vendor/boto/boto/mapreduce/queuetools.py b/vendor/boto/boto/mapreduce/queuetools.py new file mode 100644 index 000000000000..db1e495c6d04 --- /dev/null +++ b/vendor/boto/boto/mapreduce/queuetools.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +import socket +from lqs import LQSServer, LQSMessage +import boto +from boto.sqs.jsonmessage import JSONMessage + +class LQSClient: + + def __init__(self, host): + self.host = host + self.port = LQSServer.PORT + self.timeout = LQSServer.TIMEOUT + self.max_len = LQSServer.MAXSIZE + self.sock = None + + def connect(self): + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.settimeout(self.timeout) + self.sock.connect((self.host, self.port)) + + def decode(self, jsonstr): + return LQSMessage(jsonvalue=jsonstr) + + def get(self): + self.sock.send('next') + try: + jsonstr = self.sock.recv(self.max_len) + msg = LQSMessage(jsonvalue=jsonstr) + return msg + except: + print "recv from %s failed" % self.host + + def delete(self, msg): + self.sock.send('delete %s' % msg['id']) + try: + jsonstr = self.sock.recv(self.max_len) + msg = LQSMessage(jsonvalue=jsonstr) + return msg + except: + print "recv from %s failed" % self.host + + def close(self): + self.sock.close() + +class SQSClient: + + def __init__(self, queue_name): + self.queue_name = queue_name + + def connect(self): + self.queue = boto.lookup('sqs', self.queue_name) + self.queue.set_mesasge_class(JSONMessage) + + def get(self): + m = self.queue.read() + return m.get_body() + + def close(self): + pass + +def get_queue(name): + if name == 'localhost': + return LQSClient(name) + else: + return SQSClient(name) + diff --git a/vendor/boto/boto/mashups/__init__.py b/vendor/boto/boto/mashups/__init__.py new file mode 100644 index 000000000000..449bd162a8ea --- /dev/null +++ b/vendor/boto/boto/mashups/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/mashups/interactive.py b/vendor/boto/boto/mashups/interactive.py new file mode 100644 index 000000000000..b80e661e5f02 --- /dev/null +++ b/vendor/boto/boto/mashups/interactive.py @@ -0,0 +1,97 @@ +# Copyright (C) 2003-2007 Robey Pointer +# +# This file is part of paramiko. +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + + +import socket +import sys + +# windows does not have termios... +try: + import termios + import tty + has_termios = True +except ImportError: + has_termios = False + + +def interactive_shell(chan): + if has_termios: + posix_shell(chan) + else: + windows_shell(chan) + + +def posix_shell(chan): + import select + + oldtty = termios.tcgetattr(sys.stdin) + try: + tty.setraw(sys.stdin.fileno()) + tty.setcbreak(sys.stdin.fileno()) + chan.settimeout(0.0) + + while True: + r, w, e = select.select([chan, sys.stdin], [], []) + if chan in r: + try: + x = chan.recv(1024) + if len(x) == 0: + print '\r\n*** EOF\r\n', + break + sys.stdout.write(x) + sys.stdout.flush() + except socket.timeout: + pass + if sys.stdin in r: + x = sys.stdin.read(1) + if len(x) == 0: + break + chan.send(x) + + finally: + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) + + +# thanks to Mike Looijmans for this code +def windows_shell(chan): + import threading + + sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n") + + def writeall(sock): + while True: + data = sock.recv(256) + if not data: + sys.stdout.write('\r\n*** EOF ***\r\n\r\n') + sys.stdout.flush() + break + sys.stdout.write(data) + sys.stdout.flush() + + writer = threading.Thread(target=writeall, args=(chan,)) + writer.start() + + try: + while True: + d = sys.stdin.read(1) + if not d: + break + chan.send(d) + except EOFError: + # user hit ^Z or F6 + pass diff --git a/vendor/boto/boto/mashups/iobject.py b/vendor/boto/boto/mashups/iobject.py new file mode 100644 index 000000000000..a226b5ca6af3 --- /dev/null +++ b/vendor/boto/boto/mashups/iobject.py @@ -0,0 +1,115 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os + +def int_val_fn(v): + try: + int(v) + return True + except: + return False + +class IObject(object): + + def choose_from_list(self, item_list, search_str='', + prompt='Enter Selection'): + if not item_list: + print 'No Choices Available' + return + choice = None + while not choice: + n = 1 + choices = [] + for item in item_list: + if isinstance(item, str): + print '[%d] %s' % (n, item) + choices.append(item) + n += 1 + else: + obj, id, desc = item + if desc: + if desc.find(search_str) >= 0: + print '[%d] %s - %s' % (n, id, desc) + choices.append(obj) + n += 1 + else: + if id.find(search_str) >= 0: + print '[%d] %s' % (n, id) + choices.append(obj) + n += 1 + if choices: + val = raw_input('%s[1-%d]: ' % (prompt, len(choices))) + if val.startswith('/'): + search_str = val[1:] + else: + try: + int_val = int(val) + if int_val == 0: + return None + choice = choices[int_val-1] + except ValueError: + print '%s is not a valid choice' % val + except IndexError: + print '%s is not within the range[1-%d]' % (val, + len(choices)) + else: + print "No objects matched your pattern" + search_str = '' + return choice + + def get_string(self, prompt, validation_fn=None): + okay = False + while not okay: + val = raw_input('%s: ' % prompt) + if validation_fn: + okay = validation_fn(val) + if not okay: + print 'Invalid value: %s' % val + else: + okay = True + return val + + def get_filename(self, prompt): + okay = False + val = '' + while not okay: + val = raw_input('%s: %s' % (prompt, val)) + val = os.path.expanduser(val) + if os.path.isfile(val): + okay = True + elif os.path.isdir(val): + path = val + val = self.choose_from_list(os.listdir(path)) + if val: + val = os.path.join(path, val) + okay = True + else: + val = '' + else: + print 'Invalid value: %s' % val + val = '' + return val + + def get_int(self, prompt): + s = self.get_string(prompt, int_val_fn) + return int(s) + diff --git a/vendor/boto/boto/mashups/order.py b/vendor/boto/boto/mashups/order.py new file mode 100644 index 000000000000..6efdc3ecabbe --- /dev/null +++ b/vendor/boto/boto/mashups/order.py @@ -0,0 +1,211 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +High-level abstraction of an EC2 order for servers +""" + +import boto +import boto.ec2 +from boto.mashups.server import Server, ServerSet +from boto.mashups.iobject import IObject +from boto.pyami.config import Config +from boto.sdb.persist import get_domain, set_domain +import time, StringIO + +InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge'] + +class Item(IObject): + + def __init__(self): + self.region = None + self.name = None + self.instance_type = None + self.quantity = 0 + self.zone = None + self.ami = None + self.groups = [] + self.key = None + self.ec2 = None + self.config = None + + def set_userdata(self, key, value): + self.userdata[key] = value + + def get_userdata(self, key): + return self.userdata[key] + + def set_region(self, region=None): + if region: + self.region = region + else: + l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()] + self.region = self.choose_from_list(l, prompt='Choose Region') + + def set_name(self, name=None): + if name: + self.name = name + else: + self.name = self.get_string('Name') + + def set_instance_type(self, instance_type=None): + if instance_type: + self.instance_type = instance_type + else: + self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type') + + def set_quantity(self, n=0): + if n > 0: + self.quantity = n + else: + self.quantity = self.get_int('Quantity') + + def set_zone(self, zone=None): + if zone: + self.zone = zone + else: + l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()] + self.zone = self.choose_from_list(l, prompt='Choose Availability Zone') + + def set_ami(self, ami=None): + if ami: + self.ami = ami + else: + l = [(a, a.id, a.location) for a in self.ec2.get_all_images()] + self.ami = self.choose_from_list(l, prompt='Choose AMI') + + def add_group(self, group=None): + if group: + self.groups.append(group) + else: + l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()] + self.groups.append(self.choose_from_list(l, prompt='Choose Security Group')) + + def set_key(self, key=None): + if key: + self.key = key + else: + l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()] + self.key = self.choose_from_list(l, prompt='Choose Keypair') + + def update_config(self): + if not self.config.has_section('Credentials'): + self.config.add_section('Credentials') + self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id) + self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key) + if not self.config.has_section('Pyami'): + self.config.add_section('Pyami') + sdb_domain = get_domain() + if sdb_domain: + self.config.set('Pyami', 'server_sdb_domain', sdb_domain) + self.config.set('Pyami', 'server_sdb_name', self.name) + + def set_config(self, config_path=None): + if not config_path: + config_path = self.get_filename('Specify Config file') + self.config = Config(path=config_path) + + def get_userdata_string(self): + s = StringIO.StringIO() + self.config.write(s) + return s.getvalue() + + def enter(self, **params): + self.region = params.get('region', self.region) + if not self.region: + self.set_region() + self.ec2 = self.region.connect() + self.name = params.get('name', self.name) + if not self.name: + self.set_name() + self.instance_type = params.get('instance_type', self.instance_type) + if not self.instance_type: + self.set_instance_type() + self.zone = params.get('zone', self.zone) + if not self.zone: + self.set_zone() + self.quantity = params.get('quantity', self.quantity) + if not self.quantity: + self.set_quantity() + self.ami = params.get('ami', self.ami) + if not self.ami: + self.set_ami() + self.groups = params.get('groups', self.groups) + if not self.groups: + self.add_group() + self.key = params.get('key', self.key) + if not self.key: + self.set_key() + self.config = params.get('config', self.config) + if not self.config: + self.set_config() + self.update_config() + +class Order(IObject): + + def __init__(self): + self.items = [] + self.reservation = None + + def add_item(self, **params): + item = Item() + item.enter(**params) + self.items.append(item) + + def display(self): + print 'This Order consists of the following items' + print + print 'QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair' + for item in self.items: + print '%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type, + item.ami.id, item.groups, item.key.name) + + def place(self, block=True): + if get_domain() == None: + print 'SDB Persistence Domain not set' + domain_name = self.get_string('Specify SDB Domain') + set_domain(domain_name) + s = ServerSet() + for item in self.items: + r = item.ami.run(min_count=1, max_count=item.quantity, + key_name=item.key.name, user_data=item.get_userdata_string(), + security_groups=item.groups, instance_type=item.instance_type, + placement=item.zone.name) + if block: + states = [i.state for i in r.instances] + if states.count('running') != len(states): + print states + time.sleep(15) + states = [i.update() for i in r.instances] + for i in r.instances: + server = Server() + server.name = item.name + server.instance_id = i.id + server.reservation = r + server.save() + s.append(server) + if len(s) == 1: + return s[0] + else: + return s + + + diff --git a/vendor/boto/boto/mashups/server.py b/vendor/boto/boto/mashups/server.py new file mode 100644 index 000000000000..6cea106c058d --- /dev/null +++ b/vendor/boto/boto/mashups/server.py @@ -0,0 +1,395 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +High-level abstraction of an EC2 server +""" +import boto +import boto.utils +from boto.mashups.iobject import IObject +from boto.pyami.config import Config, BotoConfigPath +from boto.mashups.interactive import interactive_shell +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty +import os +import StringIO + + +class ServerSet(list): + + def __getattr__(self, name): + results = [] + is_callable = False + for server in self: + try: + val = getattr(server, name) + if callable(val): + is_callable = True + results.append(val) + except: + results.append(None) + if is_callable: + self.map_list = results + return self.map + return results + + def map(self, *args): + results = [] + for fn in self.map_list: + results.append(fn(*args)) + return results + +class Server(Model): + + @property + def ec2(self): + if self._ec2 is None: + self._ec2 = boto.connect_ec2() + return self._ec2 + + @classmethod + def Inventory(cls): + """ + Returns a list of Server instances, one for each Server object + persisted in the db + """ + l = ServerSet() + rs = cls.find() + for server in rs: + l.append(server) + return l + + @classmethod + def Register(cls, name, instance_id, description=''): + s = cls() + s.name = name + s.instance_id = instance_id + s.description = description + s.save() + return s + + def __init__(self, id=None, **kw): + Model.__init__(self, id, **kw) + self._reservation = None + self._instance = None + self._ssh_client = None + self._pkey = None + self._config = None + self._ec2 = None + + name = StringProperty(unique=True, verbose_name="Name") + instance_id = StringProperty(verbose_name="Instance ID") + config_uri = StringProperty() + ami_id = StringProperty(verbose_name="AMI ID") + zone = StringProperty(verbose_name="Availability Zone") + security_group = StringProperty(verbose_name="Security Group", default="default") + key_name = StringProperty(verbose_name="Key Name") + elastic_ip = StringProperty(verbose_name="Elastic IP") + instance_type = StringProperty(verbose_name="Instance Type") + description = StringProperty(verbose_name="Description") + log = StringProperty() + + def setReadOnly(self, value): + raise AttributeError + + def getInstance(self): + if not self._instance: + if self.instance_id: + try: + rs = self.ec2.get_all_instances([self.instance_id]) + except: + return None + if len(rs) > 0: + self._reservation = rs[0] + self._instance = self._reservation.instances[0] + return self._instance + + instance = property(getInstance, setReadOnly, None, 'The Instance for the server') + + def getAMI(self): + if self.instance: + return self.instance.image_id + + ami = property(getAMI, setReadOnly, None, 'The AMI for the server') + + def getStatus(self): + if self.instance: + self.instance.update() + return self.instance.state + + status = property(getStatus, setReadOnly, None, + 'The status of the server') + + def getHostname(self): + if self.instance: + return self.instance.public_dns_name + + hostname = property(getHostname, setReadOnly, None, + 'The public DNS name of the server') + + def getPrivateHostname(self): + if self.instance: + return self.instance.private_dns_name + + private_hostname = property(getPrivateHostname, setReadOnly, None, + 'The private DNS name of the server') + + def getLaunchTime(self): + if self.instance: + return self.instance.launch_time + + launch_time = property(getLaunchTime, setReadOnly, None, + 'The time the Server was started') + + def getConsoleOutput(self): + if self.instance: + return self.instance.get_console_output() + + console_output = property(getConsoleOutput, setReadOnly, None, + 'Retrieve the console output for server') + + def getGroups(self): + if self._reservation: + return self._reservation.groups + else: + return None + + groups = property(getGroups, setReadOnly, None, + 'The Security Groups controlling access to this server') + + def getConfig(self): + if not self._config: + remote_file = BotoConfigPath + local_file = '%s.ini' % self.instance.id + self.get_file(remote_file, local_file) + self._config = Config(local_file) + return self._config + + def setConfig(self, config): + local_file = '%s.ini' % self.instance.id + fp = open(local_file) + config.write(fp) + fp.close() + self.put_file(local_file, BotoConfigPath) + self._config = config + + config = property(getConfig, setConfig, None, + 'The instance data for this server') + + def set_config(self, config): + """ + Set SDB based config + """ + self._config = config + self._config.dump_to_sdb("botoConfigs", self.id) + + def load_config(self): + self._config = Config(do_load=False) + self._config.load_from_sdb("botoConfigs", self.id) + + def stop(self): + if self.instance: + self.instance.stop() + + def start(self): + self.stop() + ec2 = boto.connect_ec2() + ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0] + groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)]) + if not self._config: + self.load_config() + if not self._config.has_section("Credentials"): + self._config.add_section("Credentials") + self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id) + self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key) + + if not self._config.has_section("Pyami"): + self._config.add_section("Pyami") + + if self._manager.domain: + self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name) + self._config.set("Pyami", 'server_sdb_name', self.name) + + cfg = StringIO.StringIO() + self._config.write(cfg) + cfg = cfg.getvalue() + r = ami.run(min_count=1, + max_count=1, + key_name=self.key_name, + security_groups = groups, + instance_type = self.instance_type, + placement = self.zone, + user_data = cfg) + i = r.instances[0] + self.instance_id = i.id + self.put() + if self.elastic_ip: + ec2.associate_address(self.instance_id, self.elastic_ip) + + def reboot(self): + if self.instance: + self.instance.reboot() + + def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts', + uname='root'): + import paramiko + if not self.instance: + print 'No instance yet!' + return + if not self._ssh_client: + if not key_file: + iobject = IObject() + key_file = iobject.get_filename('Path to OpenSSH Key file') + self._pkey = paramiko.RSAKey.from_private_key_file(key_file) + self._ssh_client = paramiko.SSHClient() + self._ssh_client.load_system_host_keys() + self._ssh_client.load_host_keys(os.path.expanduser(host_key_file)) + self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self._ssh_client.connect(self.instance.public_dns_name, + username=uname, pkey=self._pkey) + return self._ssh_client + + def get_file(self, remotepath, localpath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.get(remotepath, localpath) + + def put_file(self, localpath, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + sftp_client.put(localpath, remotepath) + + def listdir(self, remotepath): + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + return sftp_client.listdir(remotepath) + + def shell(self, key_file=None): + ssh_client = self.get_ssh_client(key_file) + channel = ssh_client.invoke_shell() + interactive_shell(channel) + + def bundle_image(self, prefix, key_file, cert_file, size): + print 'bundling image...' + print '\tcopying cert and pk over to /mnt directory on server' + ssh_client = self.get_ssh_client() + sftp_client = ssh_client.open_sftp() + path, name = os.path.split(key_file) + remote_key_file = '/mnt/%s' % name + self.put_file(key_file, remote_key_file) + path, name = os.path.split(cert_file) + remote_cert_file = '/mnt/%s' % name + self.put_file(cert_file, remote_cert_file) + print '\tdeleting %s' % BotoConfigPath + # delete the metadata.ini file if it exists + try: + sftp_client.remove(BotoConfigPath) + except: + pass + command = 'sudo ec2-bundle-vol ' + command += '-c %s -k %s ' % (remote_cert_file, remote_key_file) + command += '-u %s ' % self._reservation.owner_id + command += '-p %s ' % prefix + command += '-s %d ' % size + command += '-d /mnt ' + if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium': + command += '-r i386' + else: + command += '-r x86_64' + print '\t%s' % command + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' + + def upload_bundle(self, bucket, prefix): + print 'uploading bundle...' + command = 'ec2-upload-bundle ' + command += '-m /mnt/%s.manifest.xml ' % prefix + command += '-b %s ' % bucket + command += '-a %s ' % self.ec2.aws_access_key_id + command += '-s %s ' % self.ec2.aws_secret_access_key + print '\t%s' % command + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' + + def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None): + iobject = IObject() + if not bucket: + bucket = iobject.get_string('Name of S3 bucket') + if not prefix: + prefix = iobject.get_string('Prefix for AMI file') + if not key_file: + key_file = iobject.get_filename('Path to RSA private key file') + if not cert_file: + cert_file = iobject.get_filename('Path to RSA public cert file') + if not size: + size = iobject.get_int('Size (in MB) of bundled image') + self.bundle_image(prefix, key_file, cert_file, size) + self.upload_bundle(bucket, prefix) + print 'registering image...' + self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix)) + return self.image_id + + def attach_volume(self, volume, device="/dev/sdp"): + """ + Attach an EBS volume to this server + + :param volume: EBS Volume to attach + :type volume: boto.ec2.volume.Volume + + :param device: Device to attach to (default to /dev/sdp) + :type device: string + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) + + def detach_volume(self, volume): + """ + Detach an EBS volume from this server + + :param volume: EBS Volume to detach + :type volume: boto.ec2.volume.Volume + """ + if hasattr(volume, "id"): + volume_id = volume.id + else: + volume_id = volume + return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id) + + def install_package(self, package_name): + print 'installing %s...' % package_name + command = 'yum -y install %s' % package_name + print '\t%s' % command + ssh_client = self.get_ssh_client() + t = ssh_client.exec_command(command) + response = t[1].read() + print '\t%s' % response + print '\t%s' % t[2].read() + print '...complete!' diff --git a/vendor/boto/boto/mturk/__init__.py b/vendor/boto/boto/mturk/__init__.py new file mode 100644 index 000000000000..449bd162a8ea --- /dev/null +++ b/vendor/boto/boto/mturk/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/mturk/connection.py b/vendor/boto/boto/mturk/connection.py new file mode 100644 index 000000000000..f06455451862 --- /dev/null +++ b/vendor/boto/boto/mturk/connection.py @@ -0,0 +1,515 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import datetime + +from boto import handler +from boto.mturk.price import Price +import boto.mturk.notification +from boto.connection import AWSQueryConnection +from boto.exception import EC2ResponseError +from boto.resultset import ResultSet + +class MTurkConnection(AWSQueryConnection): + + APIVersion = '2008-08-02' + SignatureVersion = '1' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=False, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host='mechanicalturk.amazonaws.com', debug=0, + https_connection_factory=None): + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + host, debug, https_connection_factory) + + def get_account_balance(self): + """ + """ + params = {} + return self._process_request('GetAccountBalance', params, [('AvailableBalance', Price), + ('OnHoldBalance', Price)]) + + def register_hit_type(self, title, description, reward, duration, + keywords=None, approval_delay=None, qual_req=None): + """ + Register a new HIT Type + \ttitle, description are strings + \treward is a Price object + \tduration can be an integer or string + """ + params = {'Title' : title, + 'Description' : description, + 'AssignmentDurationInSeconds' : duration} + params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward')) + + if keywords: + params['Keywords'] = keywords + + if approval_delay is not None: + params['AutoApprovalDelayInSeconds']= approval_delay + + return self._process_request('RegisterHITType', params) + + def set_email_notification(self, hit_type, email, event_types=None): + """ + Performs a SetHITTypeNotification operation to set email notification for a specified HIT type + """ + return self._set_notification(hit_type, 'Email', email, event_types) + + def set_rest_notification(self, hit_type, url, event_types=None): + """ + Performs a SetHITTypeNotification operation to set REST notification for a specified HIT type + """ + return self._set_notification(hit_type, 'REST', url, event_types) + + def _set_notification(self, hit_type, transport, destination, event_types=None): + """ + Common SetHITTypeNotification operation to set notification for a specified HIT type + """ + assert type(hit_type) is str, "hit_type argument should be a string." + + params = {'HITTypeId': hit_type} + + # from the Developer Guide: + # The 'Active' parameter is optional. If omitted, the active status of the HIT type's + # notification specification is unchanged. All HIT types begin with their + # notification specifications in the "inactive" status. + notification_params = {'Destination': destination, + 'Transport': transport, + 'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION, + 'Active': True, + } + + # add specific event types if required + if event_types: + self.build_list_params(notification_params, event_types, 'EventType') + + # Set up dict of 'Notification.1.Transport' etc. values + notification_rest_params = {} + num = 1 + for key in notification_params: + notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key] + + # Update main params dict + params.update(notification_rest_params) + + # Execute operation + return self._process_request('SetHITTypeNotification', params) + + def create_hit(self, hit_type=None, question=None, lifetime=60*60*24*7, max_assignments=1, + title=None, description=None, keywords=None, reward=None, + duration=60*60*24*7, approval_delay=None, annotation=None, qual_req=None, + questions=None, qualifications=None, response_groups=None): + """ + Creates a new HIT. + Returns a ResultSet + See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html + """ + + # handle single or multiple questions + if question is not None and questions is not None: + raise ValueError("Must specify either question (single Question instance) or questions (list), but not both") + if question is not None and questions is None: + questions = [question] + + + # Handle basic required arguments and set up params dict + params = {'Question': question.get_as_xml(), + 'LifetimeInSeconds' : lifetime, + 'MaxAssignments' : max_assignments, + } + + # if hit type specified then add it + # else add the additional required parameters + if hit_type: + params['HITTypeId'] = hit_type + else: + # Handle keywords + final_keywords = MTurkConnection.get_keywords_as_string(keywords) + + # Handle price argument + final_price = MTurkConnection.get_price_as_price(reward) + + additional_params = {'Title': title, + 'Description' : description, + 'Keywords': final_keywords, + 'AssignmentDurationInSeconds' : duration, + } + additional_params.update(final_price.get_as_params('Reward')) + + if approval_delay is not None: + additional_params['AutoApprovalDelayInSeconds'] = approval_delay + + # add these params to the others + params.update(additional_params) + + # add the annotation if specified + if annotation is not None: + params['RequesterAnnotation'] = annotation + + # Add the Qualifications if specified + if qualifications is not None: + params.update(qualifications.get_as_params()) + + # Handle optional response groups argument + if response_groups: + self.build_list_params(params, response_groups, 'ResponseGroup') + + # Submit + return self._process_request('CreateHIT', params, [('HIT', HIT),]) + + def get_reviewable_hits(self, hit_type=None, status='Reviewable', + sort_by='Expiration', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Retrieve the HITs that have a status of Reviewable, or HITs that + have a status of Reviewing, and that belong to the Requester calling the operation. + """ + params = {'Status' : status, + 'SortProperty' : sort_by, + 'SortDirection' : sort_direction, + 'PageSize' : page_size, + 'PageNumber' : page_number} + + # Handle optional hit_type argument + if hit_type is not None: + params.update({'HITTypeId': hit_type}) + + return self._process_request('GetReviewableHITs', params, [('HIT', HIT),]) + + def search_hits(self, sort_by='CreationTime', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Return all of a Requester's HITs, on behalf of the Requester. + The operation returns HITs of any status, except for HITs that have been disposed + with the DisposeHIT operation. + Note: + The SearchHITs operation does not accept any search parameters that filter the results. + """ + params = {'SortProperty' : sort_by, + 'SortDirection' : sort_direction, + 'PageSize' : page_size, + 'PageNumber' : page_number} + + return self._process_request('SearchHITs', params, [('HIT', HIT),]) + + def get_assignments(self, hit_id, status=None, + sort_by='SubmitTime', sort_direction='Ascending', + page_size=10, page_number=1): + """ + Retrieves completed assignments for a HIT. + Use this operation to retrieve the results for a HIT. + + The returned ResultSet will have the following attributes: + + NumResults + The number of assignments on the page in the filtered results list, + equivalent to the number of assignments being returned by this call. + A non-negative integer + PageNumber + The number of the page in the filtered results list being returned. + A positive integer + TotalNumResults + The total number of HITs in the filtered results list based on this call. + A non-negative integer + + The ResultSet will contain zero or more Assignment objects + + """ + params = {'HITId' : hit_id, + 'SortProperty' : sort_by, + 'SortDirection' : sort_direction, + 'PageSize' : page_size, + 'PageNumber' : page_number} + + if status is not None: + params['AssignmentStatus'] = status + + return self._process_request('GetAssignmentsForHIT', params, [('Assignment', Assignment),]) + + def approve_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId' : assignment_id,} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('ApproveAssignment', params) + + def reject_assignment(self, assignment_id, feedback=None): + """ + """ + params = {'AssignmentId' : assignment_id,} + if feedback: + params['RequesterFeedback'] = feedback + return self._process_request('RejectAssignment', params) + + def get_hit(self, hit_id): + """ + """ + params = {'HITId' : hit_id,} + return self._process_request('GetHIT', params, [('HIT', HIT),]) + + def set_reviewing(self, hit_id, revert=None): + """ + Update a HIT with a status of Reviewable to have a status of Reviewing, + or reverts a Reviewing HIT back to the Reviewable status. + + Only HITs with a status of Reviewable can be updated with a status of Reviewing. + Similarly, only Reviewing HITs can be reverted back to a status of Reviewable. + """ + params = {'HITId' : hit_id,} + if revert: + params['Revert'] = revert + return self._process_request('SetHITAsReviewing', params) + + def disable_hit(self, hit_id): + """ + Remove a HIT from the Mechanical Turk marketplace, approves all submitted assignments + that have not already been approved or rejected, and disposes of the HIT and all + assignment data. + + Assignments for the HIT that have already been submitted, but not yet approved or rejected, will be + automatically approved. Assignments in progress at the time of the call to DisableHIT will be + approved once the assignments are submitted. You will be charged for approval of these assignments. + DisableHIT completely disposes of the HIT and all submitted assignment data. Assignment results + data cannot be retrieved for a HIT that has been disposed. + + It is not possible to re-enable a HIT once it has been disabled. To make the work from a disabled HIT + available again, create a new HIT. + """ + params = {'HITId' : hit_id,} + return self._process_request('DisableHIT', params) + + def dispose_hit(self, hit_id): + """ + Dispose of a HIT that is no longer needed. + + Only HITs in the "reviewable" state, with all submitted assignments approved or rejected, + can be disposed. A Requester can call GetReviewableHITs to determine which HITs are + reviewable, then call GetAssignmentsForHIT to retrieve the assignments. + Disposing of a HIT removes the HIT from the results of a call to GetReviewableHITs. + """ + params = {'HITId' : hit_id,} + return self._process_request('DisposeHIT', params) + + def expire_hit(self, hit_id): + + """ + Expire a HIT that is no longer needed. + + The effect is identical to the HIT expiring on its own. The HIT no longer appears on the + Mechanical Turk web site, and no new Workers are allowed to accept the HIT. Workers who + have accepted the HIT prior to expiration are allowed to complete it or return it, or + allow the assignment duration to elapse (abandon the HIT). Once all remaining assignments + have been submitted, the expired HIT becomes "reviewable", and will be returned by a call + to GetReviewableHITs. + """ + params = {'HITId' : hit_id,} + return self._process_request('ForceExpireHIT', params) + + def extend_hit(self, hit_id, assignments_increment=None, expiration_increment=None): + """ + Increase the maximum number of assignments, or extend the expiration date, of an existing HIT. + + NOTE: If a HIT has a status of Reviewable and the HIT is extended to make it Available, the + HIT will not be returned by GetReviewableHITs, and its submitted assignments will not + be returned by GetAssignmentsForHIT, until the HIT is Reviewable again. + Assignment auto-approval will still happen on its original schedule, even if the HIT has + been extended. Be sure to retrieve and approve (or reject) submitted assignments before + extending the HIT, if so desired. + """ + # must provide assignment *or* expiration increment + if (assignments_increment is None and expiration_increment is None) or \ + (assignments_increment is not None and expiration_increment is not None): + raise ValueError("Must specify either assignments_increment or expiration_increment, but not both") + + params = {'HITId' : hit_id,} + if assignments_increment: + params['MaxAssignmentsIncrement'] = assignments_increment + if expiration_increment: + params['ExpirationIncrementInSeconds'] = expiration_increment + + return self._process_request('ExtendHIT', params) + + def get_help(self, about, help_type='Operation'): + """ + Return information about the Mechanical Turk Service operations and response group + NOTE - this is basically useless as it just returns the URL of the documentation + + help_type: either 'Operation' or 'ResponseGroup' + """ + params = {'About': about, 'HelpType': help_type,} + return self._process_request('Help', params) + + def grant_bonus(self, worker_id, assignment_id, bonus_price, reason): + """ + Issues a payment of money from your account to a Worker. + To be eligible for a bonus, the Worker must have submitted results for one of your + HITs, and have had those results approved or rejected. This payment happens separately + from the reward you pay to the Worker when you approve the Worker's assignment. + The Bonus must be passed in as an instance of the Price object. + """ + params = bonus_price.get_as_params('BonusAmount', 1) + params['WorkerId'] = worker_id + params['AssignmentId'] = assignment_id + params['Reason'] = reason + + return self._process_request('GrantBonus', params) + + def _process_request(self, request_type, params, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + response = self.make_request(request_type, params) + return self._process_response(response, marker_elems) + + def _process_response(self, response, marker_elems=None): + """ + Helper to process the xml response from AWS + """ + body = response.read() + #print body + if '' not in body: + rs = ResultSet(marker_elems) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise EC2ResponseError(response.status, response.reason, body) + + @staticmethod + def get_keywords_as_string(keywords): + """ + Returns a comma+space-separated string of keywords from either a list or a string + """ + if type(keywords) is list: + final_keywords = ', '.join(keywords) + elif type(keywords) is str: + final_keywords = keywords + elif type(keywords) is unicode: + final_keywords = keywords.encode('utf-8') + elif keywords is None: + final_keywords = "" + else: + raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords)) + return final_keywords + + @staticmethod + def get_price_as_price(reward): + """ + Returns a Price data structure from either a float or a Price + """ + if isinstance(reward, Price): + final_price = reward + else: + final_price = Price(reward) + return final_price + +class BaseAutoResultElement: + """ + Base class to automatically add attributes when parsing XML + """ + def __init__(self, connection): + self.connection = connection + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + setattr(self, name, value) + +class HIT(BaseAutoResultElement): + """ + Class to extract a HIT structure from a response (used in ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. HITId, HITTypeId, CreationTime + """ + + # property helper to determine if HIT has expired + def _has_expired(self): + """ Has this HIT expired yet? """ + expired = False + if hasattr(self, 'Expiration'): + now = datetime.datetime.utcnow() + expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ') + expired = (now >= expiration) + else: + raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!") + return expired + + # are we there yet? + expired = property(_has_expired) + +class Assignment(BaseAutoResultElement): + """ + Class to extract an Assignment structure from a response (used in ResultSet) + + Will have attributes named as per the Developer Guide, + e.g. AssignmentId, WorkerId, HITId, Answer, etc + """ + + def __init__(self, connection): + BaseAutoResultElement.__init__(self, connection) + self.answers = [] + + def endElement(self, name, value, connection): + # the answer consists of embedded XML, so it needs to be parsed independantly + if name == 'Answer': + answer_rs = ResultSet([('Answer', QuestionFormAnswer),]) + h = handler.XmlHandler(answer_rs, connection) + value = self.connection.get_utf8_value(value) + xml.sax.parseString(value, h) + self.answers.append(answer_rs) + else: + BaseAutoResultElement.endElement(self, name, value, connection) + +class QuestionFormAnswer(BaseAutoResultElement): + """ + Class to extract Answers from inside the embedded XML QuestionFormAnswers element inside the + Answer element which is part of the Assignment structure + + A QuestionFormAnswers element contains an Answer element for each question in the HIT or + Qualification test for which the Worker provided an answer. Each Answer contains a + QuestionIdentifier element whose value corresponds to the QuestionIdentifier of a + Question in the QuestionForm. See the QuestionForm data structure for more information about + questions and answer specifications. + + If the question expects a free-text answer, the Answer element contains a FreeText element. This + element contains the Worker's answer + + *NOTE* - currently really only supports free-text answers + """ + + def __init__(self, connection): + BaseAutoResultElement.__init__(self, connection) + self.fields = [] + self.qid = None + + def endElement(self, name, value, connection): + if name == 'QuestionIdentifier': + self.qid = value + elif name == 'FreeText' and self.qid: + self.fields.append((self.qid,value)) + elif name == 'Answer': + self.qid = None diff --git a/vendor/boto/boto/mturk/notification.py b/vendor/boto/boto/mturk/notification.py new file mode 100644 index 000000000000..4904a998252c --- /dev/null +++ b/vendor/boto/boto/mturk/notification.py @@ -0,0 +1,95 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Provides NotificationMessage and Event classes, with utility methods, for +implementations of the Mechanical Turk Notification API. +""" + +import hmac +try: + from hashlib import sha1 as sha +except ImportError: + import sha +import base64 +import re + +class NotificationMessage: + + NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl" + NOTIFICATION_VERSION = '2006-05-05' + + SERVICE_NAME = "AWSMechanicalTurkRequesterNotification" + OPERATION_NAME = "Notify" + + EVENT_PATTERN = r"Event\.(?P\d+)\.(?P\w+)" + EVENT_RE = re.compile(EVENT_PATTERN) + + def __init__(self, d): + """ + Constructor; expects parameter d to be a dict of string parameters from a REST transport notification message + """ + self.signature = d['Signature'] # vH6ZbE0NhkF/hfNyxz2OgmzXYKs= + self.timestamp = d['Timestamp'] # 2006-05-23T23:22:30Z + self.version = d['Version'] # 2006-05-05 + assert d['method'] == NotificationMessage.OPERATION_NAME, "Method should be '%s'" % NotificationMessage.OPERATION_NAME + + # Build Events + self.events = [] + events_dict = {} + if 'Event' in d: + # TurboGears surprised me by 'doing the right thing' and making { 'Event': { '1': { 'EventType': ... } } } etc. + events_dict = d['Event'] + else: + for k in d: + v = d[k] + if k.startswith('Event.'): + ed = NotificationMessage.EVENT_RE.search(k).groupdict() + n = int(ed['n']) + param = str(ed['param']) + if n not in events_dict: + events_dict[n] = {} + events_dict[n][param] = v + for n in events_dict: + self.events.append(Event(events_dict[n])) + + def verify(self, secret_key): + """ + Verifies the authenticity of a notification message. + """ + verification_input = NotificationMessage.SERVICE_NAME + NotificationMessage.OPERATION_NAME + self.timestamp + h = hmac.new(key=secret_key, digestmod=sha) + h.update(verification_input) + signature_calc = base64.b64encode(h.digest()) + return self.signature == signature_calc + +class Event: + def __init__(self, d): + self.event_type = d['EventType'] + self.event_time_str = d['EventTime'] + self.hit_type = d['HITTypeId'] + self.hit_id = d['HITId'] + self.assignment_id = d['AssignmentId'] + + #TODO: build self.event_time datetime from string self.event_time_str + + def __repr__(self): + return "" % (self.event_type, self.hit_id) diff --git a/vendor/boto/boto/mturk/price.py b/vendor/boto/boto/mturk/price.py new file mode 100644 index 000000000000..3c88a9654990 --- /dev/null +++ b/vendor/boto/boto/mturk/price.py @@ -0,0 +1,48 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Price: + + def __init__(self, amount=0.0, currency_code='USD'): + self.amount = amount + self.currency_code = currency_code + self.formatted_price = '' + + def __repr__(self): + if self.formatted_price: + return self.formatted_price + else: + return str(self.amount) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Amount': + self.amount = float(value) + elif name == 'CurrencyCode': + self.currency_code = value + elif name == 'FormattedPrice': + self.formatted_price = value + + def get_as_params(self, label, ord=1): + return {'%s.%d.Amount'%(label, ord) : str(self.amount), + '%s.%d.CurrencyCode'%(label, ord) : self.currency_code} diff --git a/vendor/boto/boto/mturk/qualification.py b/vendor/boto/boto/mturk/qualification.py new file mode 100644 index 000000000000..ed02087f0185 --- /dev/null +++ b/vendor/boto/boto/mturk/qualification.py @@ -0,0 +1,118 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Qualifications: + + def __init__(self, requirements = []): + self.requirements = requirements + + def add(self, req): + self.requirements.append(req) + + def get_as_params(self): + params = {} + assert(len(self.requirements) <= 10) + for n, req in enumerate(self.requirements): + reqparams = req.get_as_params() + for rp in reqparams: + params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp] + return params + + +class Requirement(object): + """ + Representation of a single requirement + """ + + def __init__(self, qualification_type_id, comparator, integer_value, required_to_preview=False): + self.qualification_type_id = qualification_type_id + self.comparator = comparator + self.integer_value = integer_value + self.required_to_preview = required_to_preview + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + "IntegerValue": self.integer_value, + } + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params + +class PercentAssignmentsSubmittedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsAbandonedRequirement(Requirement): + """ + The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsReturnedRequirement(Requirement): + """ + The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsApprovedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class PercentAssignmentsRejectedRequirement(Requirement): + """ + The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100. + """ + + def __init__(self, comparator, integer_value, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview) + +class LocaleRequirement(Requirement): + """ + A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account. + """ + + def __init__(self, comparator, locale, required_to_preview=False): + Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview) + self.locale = locale + + def get_as_params(self): + params = { + "QualificationTypeId": self.qualification_type_id, + "Comparator": self.comparator, + 'LocaleValue.Country': self.locale, + } + if self.required_to_preview: + params['RequiredToPreview'] = "true" + return params diff --git a/vendor/boto/boto/mturk/question.py b/vendor/boto/boto/mturk/question.py new file mode 100644 index 000000000000..d4d9734f9ae6 --- /dev/null +++ b/vendor/boto/boto/mturk/question.py @@ -0,0 +1,336 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Question(object): + + QUESTION_XML_TEMPLATE = """%s%s%s%s%s""" + DISPLAY_NAME_XML_TEMPLATE = """%s""" + + def __init__(self, identifier, content, answer_spec, is_required=False, display_name=None): #amount=0.0, currency_code='USD'): + self.identifier = identifier + self.content = content + self.answer_spec = answer_spec + self.is_required = is_required + self.display_name = display_name + + def get_as_params(self, label='Question', identifier=None): + + if identifier is None: + raise ValueError("identifier (QuestionIdentifier) is required per MTurk spec.") + + return { label : self.get_as_xml() } + + def get_as_xml(self): + # add the display name if required + display_name_xml = '' + if self.display_name: + display_name_xml = self.DISPLAY_NAME_XML_TEMPLATE %(self.display_name) + + ret = Question.QUESTION_XML_TEMPLATE % (self.identifier, + display_name_xml, + str(self.is_required).lower(), + self.content.get_as_xml(), + self.answer_spec.get_as_xml()) + + return ret + +class ExternalQuestion(object): + + EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd" + EXTERNAL_QUESTION_XML_TEMPLATE = """%s%s""" + + def __init__(self, external_url, frame_height): + self.external_url = external_url + self.frame_height = frame_height + + def get_as_params(self, label='ExternalQuestion'): + return { label : self.get_as_xml() } + + def get_as_xml(self): + ret = ExternalQuestion.EXTERNAL_QUESTION_XML_TEMPLATE % (ExternalQuestion.EXTERNAL_QUESTIONFORM_SCHEMA_LOCATION, + self.external_url, + self.frame_height) + return ret + +class OrderedContent(object): + def __init__(self): + self.items = [] + + def append(self, field, value): + "Expects field type and value" + self.items.append((field, value)) + + def get_binary_xml(self, field, value): + return """ + + + %s + %s + + %s + %s +""" % (value['type'], + value['subtype'], + value['dataurl'], + value['alttext']) + + def get_application_xml(self, field, value): + raise NotImplementedError("Application question content is not yet supported.") + + def get_as_xml(self): + default_handler = lambda f,v: '<%s>%s' % (f,v,f) + bulleted_list_handler = lambda _,list: '%s' % ''.join([('%s' % item) for item in list]) + formatted_content_handler = lambda _,content: "" % content + application_handler = self.get_application_xml + binary_handler = self.get_binary_xml + + children = '' + for (field,value) in self.items: + handler = default_handler + if field == 'List': + handler = bulleted_list_handler + elif field == 'Application': + handler = application_handler + elif field == 'Binary': + handler = binary_handler + elif field == 'FormattedContent': + handler = formatted_content_handler + children = children + handler(field, value) + + return children + +class Overview(object): + OVERVIEW_XML_TEMPLATE = """%s""" + + def __init__(self): + self.ordered_content = OrderedContent() + + def append(self, field, value): + self.ordered_content.append(field,value) + + def get_as_params(self, label='Overview'): + return { label : self.get_as_xml() } + + def get_as_xml(self): + ret = Overview.OVERVIEW_XML_TEMPLATE % (self.ordered_content.get_as_xml()) + + return ret + + +class QuestionForm(object): + + QUESTIONFORM_SCHEMA_LOCATION = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd" + QUESTIONFORM_XML_TEMPLATE = """%s""" + + def __init__(self): + self.items = [] + + def append(self, item): + "Expects field type and value" + self.items.append(item) + + def get_as_xml(self): + xml = '' + for item in self.items: + xml = xml + item.get_as_xml() + return QuestionForm.QUESTIONFORM_XML_TEMPLATE % (QuestionForm.QUESTIONFORM_SCHEMA_LOCATION, xml) + +class QuestionContent(object): + QUESTIONCONTENT_XML_TEMPLATE = """%s""" + + def __init__(self): + self.ordered_content = OrderedContent() + + def append(self, field, value): + self.ordered_content.append(field,value) + + def get_as_xml(self): + ret = QuestionContent.QUESTIONCONTENT_XML_TEMPLATE % (self.ordered_content.get_as_xml()) + + return ret + + +class AnswerSpecification(object): + + ANSWERSPECIFICATION_XML_TEMPLATE = """%s""" + + def __init__(self, spec): + self.spec = spec + def get_as_xml(self): + values = () # TODO + return AnswerSpecification.ANSWERSPECIFICATION_XML_TEMPLATE % self.spec.get_as_xml() + +class FreeTextAnswer(object): + + FREETEXTANSWER_XML_TEMPLATE = """%s%s""" # (constraints, default) + FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE = """%s%s%s""" # (is_numeric_xml, length_xml, regex_xml) + FREETEXTANSWER_LENGTH_XML_TEMPLATE = """""" # (min_length_attr, max_length_attr) + FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE = """""" # (min_value_attr, max_value_attr) + FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE = """%s""" # (default) + + def __init__(self, default=None, min_length=None, max_length=None, is_numeric=False, min_value=None, max_value=None, format_regex=None): + self.default = default + self.min_length = min_length + self.max_length = max_length + self.is_numeric = is_numeric + self.min_value = min_value + self.max_value = max_value + self.format_regex = format_regex + + def get_as_xml(self): + is_numeric_xml = "" + if self.is_numeric: + min_value_attr = "" + max_value_attr = "" + if self.min_value: + min_value_attr = """minValue="%d" """ % self.min_value + if self.max_value: + max_value_attr = """maxValue="%d" """ % self.max_value + is_numeric_xml = FreeTextAnswer.FREETEXTANSWER_ISNUMERIC_XML_TEMPLATE % (min_value_attr, max_value_attr) + + length_xml = "" + if self.min_length or self.max_length: + min_length_attr = "" + max_length_attr = "" + if self.min_length: + min_length_attr = """minLength="%d" """ + if self.max_length: + max_length_attr = """maxLength="%d" """ + length_xml = FreeTextAnswer.FREETEXTANSWER_LENGTH_XML_TEMPLATE % (min_length_attr, max_length_attr) + + regex_xml = "" + if self.format_regex: + format_regex_attribs = '''regex="%s"''' %self.format_regex['regex'] + + error_text = self.format_regex.get('error_text', None) + if error_text: + format_regex_attribs += ' errorText="%s"' %error_text + + flags = self.format_regex.get('flags', None) + if flags: + format_regex_attribs += ' flags="%s"' %flags + + regex_xml = """""" %format_regex_attribs + + constraints_xml = "" + if is_numeric_xml or length_xml or regex_xml: + constraints_xml = FreeTextAnswer.FREETEXTANSWER_CONSTRAINTS_XML_TEMPLATE % (is_numeric_xml, length_xml, regex_xml) + + default_xml = "" + if self.default is not None: + default_xml = FreeTextAnswer.FREETEXTANSWER_DEFAULTTEXT_XML_TEMPLATE % self.default + + return FreeTextAnswer.FREETEXTANSWER_XML_TEMPLATE % (constraints_xml, default_xml) + +class FileUploadAnswer(object): + FILEUPLOADANSWER_XML_TEMLPATE = """%d%d""" # (min, max) + DEFAULT_MIN_SIZE = 1024 # 1K (completely arbitrary!) + DEFAULT_MAX_SIZE = 5 * 1024 * 1024 # 5MB (completely arbitrary!) + + def __init__(self, min=None, max=None): + self.min = min + self.max = max + if self.min is None: + self.min = FileUploadAnswer.DEFAULT_MIN_SIZE + if self.max is None: + self.max = FileUploadAnswer.DEFAULT_MAX_SIZE + + def get_as_xml(self): + return FileUploadAnswer.FILEUPLOADANSWER_XML_TEMLPATE % (self.min, self.max) + +class SelectionAnswer(object): + """ + A class to generate SelectionAnswer XML data structures. + Does not yet implement Binary selection options. + """ + SELECTIONANSWER_XML_TEMPLATE = """%s%s%s""" # % (count_xml, style_xml, selections_xml) + SELECTION_XML_TEMPLATE = """%s%s""" # (identifier, value_xml) + SELECTION_VALUE_XML_TEMPLATE = """<%s>%s""" # (type, value, type) + STYLE_XML_TEMPLATE = """%s""" # (style) + MIN_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + MAX_SELECTION_COUNT_XML_TEMPLATE = """%s""" # count + ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser'] + OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection' + + def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False): + + if style is not None: + if style in SelectionAnswer.ACCEPTED_STYLES: + self.style_suggestion = style + else: + raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES))) + else: + self.style_suggestion = None + + if selections is None: + raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples") + else: + self.selections = selections + + self.min_selections = min + self.max_selections = max + + assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections + #assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections + + self.type = type + + self.other = other + + def get_as_xml(self): + if self.type == 'text': + TYPE_TAG = "Text" + elif self.type == 'binary': + TYPE_TAG = "Binary" + else: + raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type)) + + # build list of elements + selections_xml = "" + for tpl in self.selections: + value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG) + selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml) + selections_xml += selection_xml + + if self.other: + # add OtherSelection element as xml if available + if hasattr(self.other, 'get_as_xml'): + assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer' + selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection') + else: + selections_xml += "" + + if self.style_suggestion is not None: + style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion + else: + style_xml = "" + + if self.style_suggestion != 'radiobutton': + count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections + count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections + else: + count_xml = "" + + ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml) + + # return XML + return ret + diff --git a/vendor/boto/boto/mturk/test/all_tests.py b/vendor/boto/boto/mturk/test/all_tests.py new file mode 100644 index 000000000000..a8f291a6a967 --- /dev/null +++ b/vendor/boto/boto/mturk/test/all_tests.py @@ -0,0 +1,8 @@ +import doctest + +# doctest.testfile("create_hit.doctest") +# doctest.testfile("create_hit_binary.doctest") +doctest.testfile("create_free_text_question_regex.doctest") +# doctest.testfile("create_hit_from_hit_type.doctest") +# doctest.testfile("search_hits.doctest") +# doctest.testfile("reviewable_hits.doctest") diff --git a/vendor/boto/boto/mturk/test/cleanup_tests.py b/vendor/boto/boto/mturk/test/cleanup_tests.py new file mode 100644 index 000000000000..7bdff90c74c1 --- /dev/null +++ b/vendor/boto/boto/mturk/test/cleanup_tests.py @@ -0,0 +1,67 @@ +from boto.mturk.connection import MTurkConnection + +def cleanup(): + """Remove any boto test related HIT's""" + + conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + current_page = 1 + page_size = 10 + total_disabled = 0 + ignored = [] + + while True: + # reset the total for this loop + disabled_count = 0 + + # search all the hits in the sandbox + search_rs = conn.search_hits(page_size=page_size, page_number=current_page) + + # success? + if search_rs.status: + for hit in search_rs: + # delete any with Boto in the description + print 'hit id:%s Status:%s, desc:%s' %(hit.HITId, hit.HITStatus, hit.Description) + if hit.Description.find('Boto') != -1: + if hit.HITStatus != 'Reviewable': + print 'Disabling hit id:%s %s' %(hit.HITId, hit.Description) + disable_rs = conn.disable_hit(hit.HITId) + if disable_rs.status: + disabled_count += 1 + # update the running total + total_disabled += 1 + else: + print 'Error when disabling, code:%s, message:%s' %(disable_rs.Code, disable_rs.Message) + else: + print 'Disposing hit id:%s %s' %(hit.HITId, hit.Description) + dispose_rs = conn.dispose_hit(hit.HITId) + if dispose_rs.status: + disabled_count += 1 + # update the running total + total_disabled += 1 + else: + print 'Error when disposing, code:%s, message:%s' %(dispose_rs.Code, dispose_rs.Message) + + else: + if hit.HITId not in ignored: + print 'ignored:%s' %hit.HITId + ignored.append(hit.HITId) + + # any more results? + if int(search_rs.TotalNumResults) > current_page*page_size: + # if we have disabled any HITs on this page + # then we don't need to go to a new page + # otherwise we do + if not disabled_count: + current_page += 1 + else: + # no, we're done + break + else: + print 'Error performing search, code:%s, message:%s' %(search_rs.Code, search_rs.Message) + break + + total_ignored = len(ignored) + print 'Processed: %d HITs, disabled/disposed: %d, ignored: %d' %(total_ignored + total_disabled, total_disabled, total_ignored) + +if __name__ == '__main__': + cleanup() diff --git a/vendor/boto/boto/mturk/test/create_free_text_question_regex.doctest b/vendor/boto/boto/mturk/test/create_free_text_question_regex.doctest new file mode 100644 index 000000000000..a10b7ed1b894 --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_free_text_question_regex.doctest @@ -0,0 +1,92 @@ +>>> import uuid +>>> import datetime +>>> from boto.mturk.connection import MTurkConnection +>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer + +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + +# create content for a question +>>> qn_content = QuestionContent(title='Boto no hit type question content', +... text='What is a boto no hit type?') + +# create a free text answer that is not quite so free! +>>> ft_answer = FreeTextAnswer(format_regex=dict(regex="^[12][0-9]{3}-[01]?\d-[0-3]?\d$", +... error_text="You must enter a date with the format yyyy-mm-dd.", +... flags="i"), +... default="This is not a valid format") + +# create the question specification +>>> qn = Question(identifier=str(uuid.uuid4()), +... content=qn_content, +... answer_spec=AnswerSpecification(ft_answer)) + +# now, create the actual HIT for the question without using a HIT type +# NOTE - the response_groups are specified to get back additional information for testing +>>> keywords=['boto', 'test', 'doctest'] +>>> create_hit_rs = conn.create_hit(question=qn, +... lifetime=60*65, +... max_assignments=2, +... title='Boto create_hit title', +... description='Boto create_hit description', +... keywords=keywords, +... reward=0.23, +... duration=60*6, +... approval_delay=60*60, +... annotation='An annotation from boto create_hit test', +... response_groups=['Minimal', +... 'HITDetail', +... 'HITQuestion', +... 'HITAssignmentSummary',]) + +# this is a valid request +>>> create_hit_rs.status +True + +# for the requested hit type id +# the HIT Type Id is a unicode string +>>> hit_type_id = create_hit_rs.HITTypeId +>>> hit_type_id # doctest: +ELLIPSIS +u'...' + +>>> create_hit_rs.MaxAssignments +u'2' + +>>> create_hit_rs.AutoApprovalDelayInSeconds +u'3600' + +# expiration should be very close to now + the lifetime in seconds +>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900) +>>> expiration_datetime = datetime.datetime.strptime(create_hit_rs.Expiration, '%Y-%m-%dT%H:%M:%SZ') +>>> delta = expected_datetime - expiration_datetime +>>> delta.seconds < 5 +True + +# duration is as specified for the HIT type +>>> create_hit_rs.AssignmentDurationInSeconds +u'360' + +# the reward has been set correctly (allow for float error here) +>>> int(create_hit_rs[0].amount * 100) +23 + +>>> create_hit_rs[0].formatted_price +u'$0.23' + +# only US currency supported at present +>>> create_hit_rs[0].currency_code +u'USD' + +# title is the HIT type title +>>> create_hit_rs.Title +u'Boto create_hit title' + +# title is the HIT type description +>>> create_hit_rs.Description +u'Boto create_hit description' + +# annotation is correct +>>> create_hit_rs.RequesterAnnotation +u'An annotation from boto create_hit test' + +>>> create_hit_rs.HITReviewStatus +u'NotReviewed' diff --git a/vendor/boto/boto/mturk/test/create_hit.doctest b/vendor/boto/boto/mturk/test/create_hit.doctest new file mode 100644 index 000000000000..22209d670290 --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_hit.doctest @@ -0,0 +1,86 @@ +>>> import uuid +>>> import datetime +>>> from boto.mturk.connection import MTurkConnection +>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer + +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + +# create content for a question +>>> qn_content = QuestionContent(title='Boto no hit type question content', +... text='What is a boto no hit type?') + +# create the question specification +>>> qn = Question(identifier=str(uuid.uuid4()), +... content=qn_content, +... answer_spec=AnswerSpecification(FreeTextAnswer())) + +# now, create the actual HIT for the question without using a HIT type +# NOTE - the response_groups are specified to get back additional information for testing +>>> keywords=['boto', 'test', 'doctest'] +>>> create_hit_rs = conn.create_hit(question=qn, +... lifetime=60*65, +... max_assignments=2, +... title='Boto create_hit title', +... description='Boto create_hit description', +... keywords=keywords, +... reward=0.23, +... duration=60*6, +... approval_delay=60*60, +... annotation='An annotation from boto create_hit test', +... response_groups=['Minimal', +... 'HITDetail', +... 'HITQuestion', +... 'HITAssignmentSummary',]) + +# this is a valid request +>>> create_hit_rs.status +True + +# for the requested hit type id +# the HIT Type Id is a unicode string +>>> hit_type_id = create_hit_rs.HITTypeId +>>> hit_type_id # doctest: +ELLIPSIS +u'...' + +>>> create_hit_rs.MaxAssignments +u'2' + +>>> create_hit_rs.AutoApprovalDelayInSeconds +u'3600' + +# expiration should be very close to now + the lifetime in seconds +>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900) +>>> expiration_datetime = datetime.datetime.strptime(create_hit_rs.Expiration, '%Y-%m-%dT%H:%M:%SZ') +>>> delta = expected_datetime - expiration_datetime +>>> delta.seconds < 5 +True + +# duration is as specified for the HIT type +>>> create_hit_rs.AssignmentDurationInSeconds +u'360' + +# the reward has been set correctly (allow for float error here) +>>> int(create_hit_rs[0].amount * 100) +23 + +>>> create_hit_rs[0].formatted_price +u'$0.23' + +# only US currency supported at present +>>> create_hit_rs[0].currency_code +u'USD' + +# title is the HIT type title +>>> create_hit_rs.Title +u'Boto create_hit title' + +# title is the HIT type description +>>> create_hit_rs.Description +u'Boto create_hit description' + +# annotation is correct +>>> create_hit_rs.RequesterAnnotation +u'An annotation from boto create_hit test' + +>>> create_hit_rs.HITReviewStatus +u'NotReviewed' diff --git a/vendor/boto/boto/mturk/test/create_hit_binary.doctest b/vendor/boto/boto/mturk/test/create_hit_binary.doctest new file mode 100644 index 000000000000..309608320c08 --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_hit_binary.doctest @@ -0,0 +1,87 @@ +>>> import uuid +>>> import datetime +>>> from boto.mturk.connection import MTurkConnection +>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer + +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + +# create content for a question +>>> qn_content = QuestionContent(title='Boto no hit type question content', +... text='What is a boto no hit type?', +... binary='http://www.example.com/test1.jpg') + +# create the question specification +>>> qn = Question(identifier=str(uuid.uuid4()), +... content=qn_content, +... answer_spec=AnswerSpecification(FreeTextAnswer())) + +# now, create the actual HIT for the question without using a HIT type +# NOTE - the response_groups are specified to get back additional information for testing +>>> keywords=['boto', 'test', 'doctest'] +>>> create_hit_rs = conn.create_hit(question=qn, +... lifetime=60*65, +... max_assignments=2, +... title='Boto create_hit title', +... description='Boto create_hit description', +... keywords=keywords, +... reward=0.23, +... duration=60*6, +... approval_delay=60*60, +... annotation='An annotation from boto create_hit test', +... response_groups=['Minimal', +... 'HITDetail', +... 'HITQuestion', +... 'HITAssignmentSummary',]) + +# this is a valid request +>>> create_hit_rs.status +True + +# for the requested hit type id +# the HIT Type Id is a unicode string +>>> hit_type_id = create_hit_rs.HITTypeId +>>> hit_type_id # doctest: +ELLIPSIS +u'...' + +>>> create_hit_rs.MaxAssignments +u'2' + +>>> create_hit_rs.AutoApprovalDelayInSeconds +u'3600' + +# expiration should be very close to now + the lifetime in seconds +>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900) +>>> expiration_datetime = datetime.datetime.strptime(create_hit_rs.Expiration, '%Y-%m-%dT%H:%M:%SZ') +>>> delta = expected_datetime - expiration_datetime +>>> delta.seconds < 5 +True + +# duration is as specified for the HIT type +>>> create_hit_rs.AssignmentDurationInSeconds +u'360' + +# the reward has been set correctly (allow for float error here) +>>> int(create_hit_rs[0].amount * 100) +23 + +>>> create_hit_rs[0].formatted_price +u'$0.23' + +# only US currency supported at present +>>> create_hit_rs[0].currency_code +u'USD' + +# title is the HIT type title +>>> create_hit_rs.Title +u'Boto create_hit title' + +# title is the HIT type description +>>> create_hit_rs.Description +u'Boto create_hit description' + +# annotation is correct +>>> create_hit_rs.RequesterAnnotation +u'An annotation from boto create_hit test' + +>>> create_hit_rs.HITReviewStatus +u'NotReviewed' diff --git a/vendor/boto/boto/mturk/test/create_hit_external.py b/vendor/boto/boto/mturk/test/create_hit_external.py new file mode 100644 index 000000000000..e7425d665dc6 --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_hit_external.py @@ -0,0 +1,14 @@ +import uuid +import datetime +from boto.mturk.connection import MTurkConnection +from boto.mturk.question import ExternalQuestion + +def test(): + q = ExternalQuestion(external_url="http://websort.net/s/F3481C", frame_height=800) + conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + keywords=['boto', 'test', 'doctest'] + create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal','HITDetail','HITQuestion','HITAssignmentSummary',]) + assert(create_hit_rs.status == True) + +if __name__ == "__main__": + test() diff --git a/vendor/boto/boto/mturk/test/create_hit_from_hit_type.doctest b/vendor/boto/boto/mturk/test/create_hit_from_hit_type.doctest new file mode 100644 index 000000000000..144a677f6dcb --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_hit_from_hit_type.doctest @@ -0,0 +1,97 @@ +>>> import uuid +>>> import datetime +>>> from boto.mturk.connection import MTurkConnection +>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer +>>> +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') +>>> keywords=['boto', 'test', 'doctest'] +>>> hit_type_rs = conn.register_hit_type('Boto Test HIT type', +... 'HIT Type for testing Boto', +... 0.12, +... 60*6, +... keywords=keywords, +... approval_delay=60*60) + +# this was a valid request +>>> hit_type_rs.status +True + +# the HIT Type Id is a unicode string +>>> hit_type_id = hit_type_rs.HITTypeId +>>> hit_type_id # doctest: +ELLIPSIS +u'...' + +# create content for a question +>>> qn_content = QuestionContent(title='Boto question content create_hit_from_hit_type', +... text='What is a boto create_hit_from_hit_type?') + +# create the question specification +>>> qn = Question(identifier=str(uuid.uuid4()), +... content=qn_content, +... answer_spec=AnswerSpecification(FreeTextAnswer())) + +# now, create the actual HIT for the question using the HIT type +# NOTE - the response_groups are specified to get back additional information for testing +>>> create_hit_rs = conn.create_hit(hit_type=hit_type_rs.HITTypeId, +... question=qn, +... lifetime=60*65, +... max_assignments=2, +... annotation='An annotation from boto create_hit_from_hit_type test', +... response_groups=['Minimal', +... 'HITDetail', +... 'HITQuestion', +... 'HITAssignmentSummary',]) + +# this is a valid request +>>> create_hit_rs.status +True + +# for the requested hit type id +>>> create_hit_rs.HITTypeId == hit_type_id +True + +# with the correct number of maximum assignments +>>> create_hit_rs.MaxAssignments +u'2' + +# and the approval delay +>>> create_hit_rs.AutoApprovalDelayInSeconds +u'3600' + +# expiration should be very close to now + the lifetime in seconds +>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900) +>>> expiration_datetime = datetime.datetime.strptime(create_hit_rs.Expiration, '%Y-%m-%dT%H:%M:%SZ') +>>> delta = expected_datetime - expiration_datetime +>>> delta.seconds < 5 +True + +# duration is as specified for the HIT type +>>> create_hit_rs.AssignmentDurationInSeconds +u'360' + +# the reward has been set correctly +>>> create_hit_rs[0].amount +0.12 + +>>> create_hit_rs[0].formatted_price +u'$0.12' + +# only US currency supported at present +>>> create_hit_rs[0].currency_code +u'USD' + +# title is the HIT type title +>>> create_hit_rs.Title +u'Boto Test HIT type' + +# title is the HIT type description +>>> create_hit_rs.Description +u'HIT Type for testing Boto' + +# annotation is correct +>>> create_hit_rs.RequesterAnnotation +u'An annotation from boto create_hit_from_hit_type test' + +# not reviewed yet +>>> create_hit_rs.HITReviewStatus +u'NotReviewed' diff --git a/vendor/boto/boto/mturk/test/create_hit_with_qualifications.py b/vendor/boto/boto/mturk/test/create_hit_with_qualifications.py new file mode 100644 index 000000000000..f2149eebe20d --- /dev/null +++ b/vendor/boto/boto/mturk/test/create_hit_with_qualifications.py @@ -0,0 +1,18 @@ +import uuid +import datetime +from boto.mturk.connection import MTurkConnection +from boto.mturk.question import ExternalQuestion +from boto.mturk.qualification import Qualifications, PercentAssignmentsApprovedRequirement + +def test(): + q = ExternalQuestion(external_url="http://websort.net/s/F3481C", frame_height=800) + conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + keywords=['boto', 'test', 'doctest'] + qualifications = Qualifications() + qualifications.add(PercentAssignmentsApprovedRequirement(comparator="GreaterThan", integer_value="95")) + create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', qualifications=qualifications) + assert(create_hit_rs.status == True) + print create_hit_rs.HITTypeId + +if __name__ == "__main__": + test() diff --git a/vendor/boto/boto/mturk/test/reviewable_hits.doctest b/vendor/boto/boto/mturk/test/reviewable_hits.doctest new file mode 100644 index 000000000000..030590109810 --- /dev/null +++ b/vendor/boto/boto/mturk/test/reviewable_hits.doctest @@ -0,0 +1,71 @@ +>>> from boto.mturk.connection import MTurkConnection +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + +# should have some reviewable HIT's returned, especially if returning all HIT type's +# NOTE: but only if your account has existing HIT's in the reviewable state +>>> reviewable_rs = conn.get_reviewable_hits() + +# this is a valid request +>>> reviewable_rs.status +True + +>>> len(reviewable_rs) > 1 +True + +# should contain at least one HIT object +>>> reviewable_rs # doctest: +ELLIPSIS +[>> hit_id = reviewable_rs[0].HITId + +# check that we can retrieve the assignments for a HIT +>>> assignments_rs = conn.get_assignments(hit_id) + +# this is a valid request +>>> assignments_rs.status +True + +>>> assignments_rs.NumResults >= 1 +True + +>>> len(assignments_rs) == int(assignments_rs.NumResults) +True + +>>> assignments_rs.PageNumber +u'1' + +>>> assignments_rs.TotalNumResults >= 1 +True + +# should contain at least one Assignment object +>>> assignments_rs # doctest: +ELLIPSIS +[>> assignment = assignments_rs[0] + +>>> assignment.HITId == hit_id +True + +# should have a valid status +>>> assignment.AssignmentStatus in ['Submitted', 'Approved', 'Rejected'] +True + +# should have returned at least one answer +>>> len(assignment.answers) > 0 +True + +# should contain at least one set of QuestionFormAnswer objects +>>> assignment.answers # doctest: +ELLIPSIS +[[>> answer = assignment.answers[0][0] + +# answer should be a FreeTextAnswer +>>> answer.FreeText # doctest: +ELLIPSIS +u'...' + +# question identifier should be a unicode string +>>> answer.QuestionIdentifier # doctest: +ELLIPSIS +u'...' + diff --git a/vendor/boto/boto/mturk/test/search_hits.doctest b/vendor/boto/boto/mturk/test/search_hits.doctest new file mode 100644 index 000000000000..a2547ea14bfc --- /dev/null +++ b/vendor/boto/boto/mturk/test/search_hits.doctest @@ -0,0 +1,16 @@ +>>> from boto.mturk.connection import MTurkConnection +>>> conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com') + +# should have some HIT's returned by a search (but only if your account has existing HIT's) +>>> search_rs = conn.search_hits() + +# this is a valid request +>>> search_rs.status +True + +>>> len(search_rs) > 1 +True + +>>> search_rs # doctest: +ELLIPSIS +[= 0: + method, version = update.split(':') + version = '-r%s' % version + else: + version = '-rHEAD' + location = boto.config.get('Boto', 'boto_location', '/usr/local/boto') + self.run('svn update %s %s' % (version, location)) + else: + # first remove the symlink needed when running from subversion + self.run('rm /usr/local/lib/python2.5/site-packages/boto') + self.run('easy_install %s' % update) + + def fetch_s3_file(self, s3_file): + try: + if s3_file.startswith('s3:'): + bucket_name, key_name = s3_file[len('s3:'):].split('/') + c = boto.connect_s3() + bucket = c.get_bucket(bucket_name) + key = bucket.get_key(key_name) + boto.log.info('Fetching %s/%s' % (bucket.name, key.name)) + path = os.path.join(self.working_dir, key.name) + key.get_contents_to_filename(path) + except: + boto.log.exception('Problem Retrieving file: %s' % s3_file) + path = None + return path + + def load_packages(self): + package_str = boto.config.get('Pyami', 'packages') + if package_str: + packages = package_str.split(',') + for package in packages: + package = package.strip() + if package.startswith('s3:'): + package = self.fetch_s3_file(package) + if package: + # if the "package" is really a .py file, it doesn't have to + # be installed, just being in the working dir is enough + if not package.endswith('.py'): + self.run('easy_install -Z %s' % package, exit_on_error=False) + + def main(self): + self.create_working_dir() + self.load_boto() + self.load_packages() + self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id')) + +if __name__ == "__main__": + # because bootstrap starts before any logging configuration can be loaded from + # the boto config files, we will manually enable logging to /var/log/boto.log + boto.set_file_logger('bootstrap', '/var/log/boto.log') + bs = Bootstrap() + bs.main() diff --git a/vendor/boto/boto/pyami/config.py b/vendor/boto/boto/pyami/config.py new file mode 100644 index 000000000000..ea0c3a1a5e07 --- /dev/null +++ b/vendor/boto/boto/pyami/config.py @@ -0,0 +1,203 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import StringIO, os, re +import ConfigParser +import boto + +BotoConfigPath = '/etc/boto.cfg' +BotoConfigLocations = [BotoConfigPath] +if 'HOME' in os.environ: + UserConfigPath = os.path.expanduser('~/.boto') + BotoConfigLocations.append(UserConfigPath) +else: + UserConfigPath = None +if 'BOTO_CONFIG' in os.environ: + BotoConfigLocations.append(os.path.expanduser(os.environ['BOTO_CONFIG'])) + +class Config(ConfigParser.SafeConfigParser): + + def __init__(self, path=None, fp=None, do_load=True): + ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', + 'debug' : '0'}) + if do_load: + if path: + self.load_from_path(path) + elif fp: + self.readfp(fp) + else: + self.read(BotoConfigLocations) + if "AWS_CREDENTIAL_FILE" in os.environ: + self.load_credential_file(os.path.expanduser(os.environ['AWS_CREDENTIAL_FILE'])) + + def load_credential_file(self, path): + """Load a credential file as is setup like the Java utilities""" + c_data = StringIO.StringIO() + c_data.write("[Credentials]\n") + for line in open(path, "r").readlines(): + c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key")) + c_data.seek(0) + self.readfp(c_data) + + def load_from_path(self, path): + file = open(path) + for line in file.readlines(): + match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line) + if match: + extended_file = match.group(1) + (dir, file) = os.path.split(path) + self.load_from_path(os.path.join(dir, extended_file)) + self.read(path) + + def save_option(self, path, section, option, value): + """ + Write the specified Section.Option to the config file specified by path. + Replace any previous value. If the path doesn't exist, create it. + Also add the option the the in-memory config. + """ + config = ConfigParser.SafeConfigParser() + config.read(path) + if not config.has_section(section): + config.add_section(section) + config.set(section, option, value) + fp = open(path, 'w') + config.write(fp) + fp.close() + if not self.has_section(section): + self.add_section(section) + self.set(section, option, value) + + def save_user_option(self, section, option, value): + self.save_option(UserConfigPath, section, option, value) + + def save_system_option(self, section, option, value): + self.save_option(BotoConfigPath, section, option, value) + + def get_instance(self, name, default=None): + try: + val = self.get('Instance', name) + except: + val = default + return val + + def get_user(self, name, default=None): + try: + val = self.get('User', name) + except: + val = default + return val + + def getint_user(self, name, default=0): + try: + val = self.getint('User', name) + except: + val = default + return val + + def get_value(self, section, name, default=None): + return self.get(section, name, default) + + def get(self, section, name, default=None): + try: + val = ConfigParser.SafeConfigParser.get(self, section, name) + except: + val = default + return val + + def getint(self, section, name, default=0): + try: + val = ConfigParser.SafeConfigParser.getint(self, section, name) + except: + val = int(default) + return val + + def getfloat(self, section, name, default=0.0): + try: + val = ConfigParser.SafeConfigParser.getfloat(self, section, name) + except: + val = float(default) + return val + + def getbool(self, section, name, default=False): + if self.has_option(section, name): + val = self.get(section, name) + if val.lower() == 'true': + val = True + else: + val = False + else: + val = default + return val + + def setbool(self, section, name, value): + if value: + self.set(section, name, 'true') + else: + self.set(section, name, 'false') + + def dump(self): + s = StringIO.StringIO() + self.write(s) + print s.getvalue() + + def dump_safe(self, fp=None): + if not fp: + fp = StringIO.StringIO() + for section in self.sections(): + fp.write('[%s]\n' % section) + for option in self.options(section): + if option == 'aws_secret_access_key': + fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option) + else: + fp.write('%s = %s\n' % (option, self.get(section, option))) + + def dump_to_sdb(self, domain_name, item_name): + import simplejson + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + if not domain: + domain = sdb.create_domain(domain_name) + item = domain.new_item(item_name) + item.active = False + for section in self.sections(): + d = {} + for option in self.options(section): + d[option] = self.get(section, option) + item[section] = simplejson.dumps(d) + item.save() + + def load_from_sdb(self, domain_name, item_name): + import simplejson + sdb = boto.connect_sdb() + domain = sdb.lookup(domain_name) + item = domain.get_item(item_name) + for section in item.keys(): + if not self.has_section(section): + self.add_section(section) + d = simplejson.loads(item[section]) + for attr_name in d.keys(): + attr_value = d[attr_name] + if attr_value == None: + attr_value = 'None' + if isinstance(attr_value, bool): + self.setbool(section, attr_name, attr_value) + else: + self.set(section, attr_name, attr_value) diff --git a/vendor/boto/boto/pyami/copybot.cfg b/vendor/boto/boto/pyami/copybot.cfg new file mode 100644 index 000000000000..cbfdc5ad195d --- /dev/null +++ b/vendor/boto/boto/pyami/copybot.cfg @@ -0,0 +1,60 @@ +# +# Your AWS Credentials +# +[Credentials] +aws_access_key_id = +aws_secret_access_key = + +# +# If you want to use a separate set of credentials when writing +# to the destination bucket, put them here +#dest_aws_access_key_id = +#dest_aws_secret_access_key = + +# +# Fill out this section if you want emails from CopyBot +# when it starts and stops +# +[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +# +# If you leave this section as is, it will automatically +# update boto from subversion upon start up. +# If you don't want that to happen, comment this out +# +[Boto] +boto_location = /usr/local/boto +boto_update = svn:HEAD + +# +# This tells the Pyami code in boto what scripts +# to run during startup +# +[Pyami] +scripts = boto.pyami.copybot.CopyBot + +# +# Source bucket and Destination Bucket, obviously. +# If the Destination bucket does not exist, it will +# attempt to create it. +# If exit_on_completion is false, the instance +# will keep running after the copy operation is +# complete which might be handy for debugging. +# If copy_acls is false, the ACL's will not be +# copied with the objects to the new bucket. +# If replace_dst is false, copybot will not +# will only store the source file in the dest if +# that file does not already exist. If it's true +# it will replace it even if it does exist. +# +[CopyBot] +src_bucket = +dst_bucket = +exit_on_completion = true +copy_acls = true +replace_dst = true diff --git a/vendor/boto/boto/pyami/copybot.py b/vendor/boto/boto/pyami/copybot.py new file mode 100644 index 000000000000..ed397cb761b6 --- /dev/null +++ b/vendor/boto/boto/pyami/copybot.py @@ -0,0 +1,97 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto +from boto.pyami.scriptbase import ScriptBase +import os, StringIO + +class CopyBot(ScriptBase): + + def __init__(self): + ScriptBase.__init__(self) + self.wdir = boto.config.get('Pyami', 'working_dir') + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.wdir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + self.src_name = boto.config.get(self.name, 'src_bucket') + self.dst_name = boto.config.get(self.name, 'dst_bucket') + self.replace = boto.config.getbool(self.name, 'replace_dst', True) + s3 = boto.connect_s3() + self.src = s3.lookup(self.src_name) + if not self.src: + boto.log.error('Source bucket does not exist: %s' % self.src_name) + dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None) + if dest_access_key: + dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None) + s3 = boto.connect(dest_access_key, dest_secret_key) + self.dst = s3.lookup(self.dst_name) + if not self.dst: + self.dst = s3.create_bucket(self.dst_name) + + def copy_bucket_acl(self): + if boto.config.get(self.name, 'copy_acls', True): + acl = self.src.get_xml_acl() + self.dst.set_xml_acl(acl) + + def copy_key_acl(self, src, dst): + if boto.config.get(self.name, 'copy_acls', True): + acl = src.get_xml_acl() + dst.set_xml_acl(acl) + + def copy_keys(self): + boto.log.info('src=%s' % self.src.name) + boto.log.info('dst=%s' % self.dst.name) + try: + for key in self.src: + if not self.replace: + exists = self.dst.lookup(key.name) + if exists: + boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name)) + continue + boto.log.info('copying %d bytes from key=%s' % (key.size, key.name)) + prefix, base = os.path.split(key.name) + path = os.path.join(self.wdir, base) + key.get_contents_to_filename(path) + new_key = self.dst.new_key(key.name) + new_key.set_contents_from_filename(path) + self.copy_key_acl(key, new_key) + os.unlink(path) + except: + boto.log.exception('Error copying key: %s' % key.name) + + def copy_log(self): + key = self.dst.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + + def main(self): + fp = StringIO.StringIO() + boto.config.dump_safe(fp) + self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue()) + if self.src and self.dst: + self.copy_keys() + if self.dst: + self.copy_log() + self.notify('%s (%s) Stopping' % (self.name, self.instance_id), + 'Copy Operation Complete') + if boto.config.getbool(self.name, 'exit_on_completion', True): + ec2 = boto.connect_ec2() + ec2.terminate_instances([self.instance_id]) + diff --git a/vendor/boto/boto/pyami/helloworld.py b/vendor/boto/boto/pyami/helloworld.py new file mode 100644 index 000000000000..680873ce179e --- /dev/null +++ b/vendor/boto/boto/pyami/helloworld.py @@ -0,0 +1,28 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + +class HelloWorld(ScriptBase): + + def main(self): + self.log('Hello World!!!') + diff --git a/vendor/boto/boto/pyami/installers/__init__.py b/vendor/boto/boto/pyami/installers/__init__.py new file mode 100644 index 000000000000..cc689264bce3 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/__init__.py @@ -0,0 +1,64 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.scriptbase import ScriptBase + + +class Installer(ScriptBase): + """ + Abstract base class for installers + """ + + def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None): + """ + Add an entry to the system crontab. + """ + raise NotImplementedError + + def add_init_script(self, file): + """ + Add this file to the init.d directory + """ + + def add_env(self, key, value): + """ + Add an environemnt variable + """ + raise NotImplementedError + + def stop(self, service_name): + """ + Stop a service. + """ + raise NotImplementedError + + def start(self, service_name): + """ + Start a service. + """ + raise NotImplementedError + + def install(self): + """ + Do whatever is necessary to "install" the package. + """ + raise NotImplementedError + diff --git a/vendor/boto/boto/pyami/installers/ubuntu/__init__.py b/vendor/boto/boto/pyami/installers/ubuntu/__init__.py new file mode 100644 index 000000000000..60ee658e34a9 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + diff --git a/vendor/boto/boto/pyami/installers/ubuntu/apache.py b/vendor/boto/boto/pyami/installers/ubuntu/apache.py new file mode 100644 index 000000000000..febc2dfa25e1 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/apache.py @@ -0,0 +1,43 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer + +class Apache(Installer): + """ + Install apache2, mod_python, and libapache2-svn + """ + + def install(self): + self.run("apt-get update") + self.run('apt-get -y install apache2', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True) + self.run('a2enmod rewrite', notify=True, exit_on_error=True) + self.run('a2enmod ssl', notify=True, exit_on_error=True) + self.run('a2enmod proxy', notify=True, exit_on_error=True) + self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True) + + # Hard reboot the apache2 server to enable these module + self.stop("apache2") + self.start("apache2") + + def main(self): + self.install() diff --git a/vendor/boto/boto/pyami/installers/ubuntu/ebs.py b/vendor/boto/boto/pyami/installers/ubuntu/ebs.py new file mode 100644 index 000000000000..5486add99bc0 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/ebs.py @@ -0,0 +1,206 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +Automated installer to attach, format and mount an EBS volume. +This installer assumes that you want the volume formatted as +an XFS file system. To drive this installer, you need the +following section in the boto config passed to the new instance. +You also need to install dateutil by listing python-dateutil +in the list of packages to be installed in the Pyami seciont +of your boto config file. + +If there is already a device mounted at the specified mount point, +the installer assumes that it is the ephemeral drive and unmounts +it, remounts it as /tmp and chmods it to 777. + +Config file section:: + + [EBS] + volume_id = + logical_volume_name = + device = + mount_point = + +""" +import boto +from boto.manage.volume import Volume +import os, time +from boto.pyami.installers.ubuntu.installer import Installer +from string import Template + +BackupScriptTemplate = """#!/usr/bin/env python +# Backup EBS volume +import boto +from boto.pyami.scriptbase import ScriptBase +import traceback + +class Backup(ScriptBase): + + def main(self): + try: + ec2 = boto.connect_ec2() + self.run("/usr/sbin/xfs_freeze -f ${mount_point}") + snapshot = ec2.create_snapshot('${volume_id}') + boto.log.info("Snapshot created: %s " % snapshot) + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + boto.log.info("Snapshot created: ${volume_id}") + except Exception, e: + self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc()) + finally: + self.run("/usr/sbin/xfs_freeze -u ${mount_point}") + +if __name__ == "__main__": + b = Backup() + b.main() +""" + +BackupCleanupScript= """#!/usr/bin/env python +import boto +from boto.manage.volume import Volume + +# Cleans Backups of EBS volumes + +for v in Volume.all(): + v.trim_snapshots(True) +""" + +class EBSInstaller(Installer): + """ + Set up the EBS stuff + """ + + def __init__(self, config_file=None): + Installer.__init__(self, config_file) + self.instance_id = boto.config.get('Instance', 'instance-id') + self.device = boto.config.get('EBS', 'device', '/dev/sdp') + self.volume_id = boto.config.get('EBS', 'volume_id') + self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name') + self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs') + + def attach(self): + ec2 = boto.connect_ec2() + if self.logical_volume_name: + # if a logical volume was specified, override the specified volume_id + # (if there was one) with the current AWS volume for the logical volume: + logical_volume = Volume.find(name = self.logical_volume_name).next() + self.volume_id = logical_volume._volume_id + volume = ec2.get_all_volumes([self.volume_id])[0] + # wait for the volume to be available. The volume may still be being created + # from a snapshot. + while volume.update() != 'available': + boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status)) + time.sleep(5) + ec2.attach_volume(self.volume_id, self.instance_id, self.device) + # now wait for the volume device to appear + while not os.path.exists(self.device): + boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) + time.sleep(10) + + def make_fs(self): + boto.log.info('make_fs...') + has_fs = self.run('fsck %s' % self.device) + if has_fs != 0: + self.run('mkfs -t xfs %s' % self.device) + + def create_backup_script(self): + t = Template(BackupScriptTemplate) + s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id, + mount_point=self.mount_point) + fp = open('/usr/local/bin/ebs_backup', 'w') + fp.write(s) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup') + + def create_backup_cleanup_script(self): + fp = open('/usr/local/bin/ebs_backup_cleanup', 'w') + fp.write(BackupCleanupScript) + fp.close() + self.run('chmod +x /usr/local/bin/ebs_backup_cleanup') + + def handle_mount_point(self): + boto.log.info('handle_mount_point') + if not os.path.isdir(self.mount_point): + boto.log.info('making directory') + # mount directory doesn't exist so create it + self.run("mkdir %s" % self.mount_point) + else: + boto.log.info('directory exists already') + self.run('mount -l') + lines = self.last_command.output.split('\n') + for line in lines: + t = line.split() + if t and t[2] == self.mount_point: + # something is already mounted at the mount point + # unmount that and mount it as /tmp + if t[0] != self.device: + self.run('umount %s' % self.mount_point) + self.run('mount %s /tmp' % t[0]) + self.run('chmod 777 /tmp') + break + # Mount up our new EBS volume onto mount_point + self.run("mount %s %s" % (self.device, self.mount_point)) + self.run('xfs_growfs %s' % self.mount_point) + + def update_fstab(self): + f = open("/etc/fstab", "a") + f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.mount_point, self.device)) + f.close() + + def install(self): + # First, find and attach the volume + self.attach() + + # Install the xfs tools + self.run('apt-get -y install xfsprogs xfsdump') + + # Check to see if the filesystem was created or not + self.make_fs() + + # create the /ebs directory for mounting + self.handle_mount_point() + + # create the backup script + self.create_backup_script() + + # Set up the backup script + minute = boto.config.get('EBS', 'backup_cron_minute', '0') + hour = boto.config.get('EBS', 'backup_cron_hour', '4,16') + self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour) + + # Set up the backup cleanup script + minute = boto.config.get('EBS', 'backup_cleanup_cron_minute') + hour = boto.config.get('EBS', 'backup_cleanup_cron_hour') + if (minute != None) and (hour != None): + self.create_backup_cleanup_script(); + self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour) + + # Set up the fstab + self.update_fstab() + + def main(self): + if not os.path.exists(self.device): + self.install() + else: + boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device) diff --git a/vendor/boto/boto/pyami/installers/ubuntu/installer.py b/vendor/boto/boto/pyami/installers/ubuntu/installer.py new file mode 100644 index 000000000000..370d63fd7b67 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/installer.py @@ -0,0 +1,96 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import boto.pyami.installers +import os +import os.path +import stat +import boto +import random +from pwd import getpwnam + +class Installer(boto.pyami.installers.Installer): + """ + Base Installer class for Ubuntu-based AMI's + """ + def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None): + """ + Write a file to /etc/cron.d to schedule a command + env is a dict containing environment variables you want to set in the file + name will be used as the name of the file + """ + if minute == 'random': + minute = str(random.randrange(60)) + if hour == 'random': + hour = str(random.randrange(24)) + fp = open('/etc/cron.d/%s' % name, "w") + if env: + for key, value in env.items(): + fp.write('%s=%s\n' % (key, value)) + fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command)) + fp.close() + + def add_init_script(self, file, name): + """ + Add this file to the init.d directory + """ + f_path = os.path.join("/etc/init.d", name) + f = open(f_path, "w") + f.write(file) + f.close() + os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC) + self.run("/usr/sbin/update-rc.d %s defaults" % name) + + def add_env(self, key, value): + """ + Add an environemnt variable + For Ubuntu, the best place is /etc/environment. Values placed here do + not need to be exported. + """ + boto.log.info('Adding env variable: %s=%s' % (key, value)) + if not os.path.exists("/etc/environment.orig"): + self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False) + fp = open('/etc/environment', 'a') + fp.write('\n%s="%s"' % (key, value)) + fp.close() + os.environ[key] = value + + def stop(self, service_name): + self.run('/etc/init.d/%s stop' % service_name) + + def start(self, service_name): + self.run('/etc/init.d/%s start' % service_name) + + def create_user(self, user): + """ + Create a user on the local system + """ + self.run("useradd -m %s" % user) + usr = getpwnam(user) + return usr + + + def install(self): + """ + This is the only method you need to override + """ + raise NotImplementedError + diff --git a/vendor/boto/boto/pyami/installers/ubuntu/mysql.py b/vendor/boto/boto/pyami/installers/ubuntu/mysql.py new file mode 100644 index 000000000000..490e5dbb4ff0 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/mysql.py @@ -0,0 +1,109 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +""" +This installer will install mysql-server on an Ubuntu machine. +In addition to the normal installation done by apt-get, it will +also configure the new MySQL server to store it's data files in +a different location. By default, this is /mnt but that can be +configured in the [MySQL] section of the boto config file passed +to the instance. +""" +from boto.pyami.installers.ubuntu.installer import Installer +import os +import boto +from boto.utils import ShellCommand +from ConfigParser import SafeConfigParser +import time + +ConfigSection = """ +[MySQL] +root_password = +data_dir = +""" + +class MySQL(Installer): + + def install(self): + self.run('apt-get update') + self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True) + +# def set_root_password(self, password=None): +# if not password: +# password = boto.config.get('MySQL', 'root_password') +# if password: +# self.run('mysqladmin -u root password %s' % password) +# return password + + def change_data_dir(self, password=None): + data_dir = boto.config.get('MySQL', 'data_dir', '/mnt') + fresh_install = False; + is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running + is_mysql_running_command.run() + if is_mysql_running_command.getStatus() == 0: + # mysql is running. This is the state apt-get will leave it in. If it isn't running, + # that means mysql was already installed on the AMI and there's no need to stop it, + # saving 40 seconds on instance startup. + time.sleep(10) #trying to stop mysql immediately after installing it fails + # We need to wait until mysql creates the root account before we kill it + # or bad things will happen + i = 0 + while self.run("echo 'quit' | mysql -u root") != 0 and i<5: + time.sleep(5) + i = i + 1 + self.run('/etc/init.d/mysql stop') + self.run("pkill -9 mysql") + + mysql_path = os.path.join(data_dir, 'mysql') + if not os.path.exists(mysql_path): + self.run('mkdir %s' % mysql_path) + fresh_install = True; + self.run('chown -R mysql:mysql %s' % mysql_path) + fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w') + fp.write('# created by pyami\n') + fp.write('# use the %s volume for data\n' % data_dir) + fp.write('[mysqld]\n') + fp.write('datadir = %s\n' % mysql_path) + fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log')) + fp.close() + if fresh_install: + self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path) + self.start('mysql') + else: + #get the password ubuntu expects to use: + config_parser = SafeConfigParser() + config_parser.read('/etc/mysql/debian.cnf') + password = config_parser.get('client', 'password') + # start the mysql deamon, then mysql with the required grant statement piped into it: + self.start('mysql') + time.sleep(10) #time for mysql to start + grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password + while self.run(grant_command) != 0: + time.sleep(5) + # leave mysqld running + + def main(self): + self.install() + # change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i + # and changing that is too ugly to be worth it: + #self.set_root_password() + self.change_data_dir() + diff --git a/vendor/boto/boto/pyami/installers/ubuntu/trac.py b/vendor/boto/boto/pyami/installers/ubuntu/trac.py new file mode 100644 index 000000000000..ef83af7aac82 --- /dev/null +++ b/vendor/boto/boto/pyami/installers/ubuntu/trac.py @@ -0,0 +1,139 @@ +# Copyright (c) 2008 Chris Moyer http://coredumped.org +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.pyami.installers.ubuntu.installer import Installer +import boto +import os + +class Trac(Installer): + """ + Install Trac and DAV-SVN + Sets up a Vhost pointing to [Trac]->home + Using the config parameter [Trac]->hostname + Sets up a trac environment for every directory found under [Trac]->data_dir + + [Trac] + name = My Foo Server + hostname = trac.foo.com + home = /mnt/sites/trac + data_dir = /mnt/trac + svn_dir = /mnt/subversion + server_admin = root@foo.com + sdb_auth_domain = users + # Optional + SSLCertificateFile = /mnt/ssl/foo.crt + SSLCertificateKeyFile = /mnt/ssl/foo.key + SSLCertificateChainFile = /mnt/ssl/FooCA.crt + + """ + + def install(self): + self.run('apt-get -y install trac', notify=True, exit_on_error=True) + self.run('apt-get -y install libapache2-svn', notify=True, exit_on_error=True) + self.run("a2enmod ssl") + self.run("a2enmod mod_python") + self.run("a2enmod dav_svn") + self.run("a2enmod rewrite") + # Make sure that boto.log is writable by everyone so that subversion post-commit hooks can + # write to it. + self.run("touch /var/log/boto.log") + self.run("chmod a+w /var/log/boto.log") + + def setup_vhost(self): + domain = boto.config.get("Trac", "hostname").strip() + if domain: + domain_info = domain.split('.') + cnf = open("/etc/apache2/sites-available/%s" % domain_info[0], "w") + cnf.write("NameVirtualHost *:80\n") + if boto.config.get("Trac", "SSLCertificateFile"): + cnf.write("NameVirtualHost *:443\n\n") + cnf.write("\n") + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tRewriteEngine On\n") + cnf.write("\tRewriteRule ^(.*)$ https://%s$1\n" % domain) + cnf.write("\n\n") + + cnf.write("\n") + else: + cnf.write("\n") + + cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip()) + cnf.write("\tServerName %s\n" % domain) + cnf.write("\tDocumentRoot %s\n" % boto.config.get("Trac", "home").strip()) + + cnf.write("\t\n" % boto.config.get("Trac", "home").strip()) + cnf.write("\t\tOptions FollowSymLinks Indexes MultiViews\n") + cnf.write("\t\tAllowOverride All\n") + cnf.write("\t\tOrder allow,deny\n") + cnf.write("\t\tallow from all\n") + cnf.write("\t\n") + + cnf.write("\t\n") + cnf.write("\t\tAuthType Basic\n") + cnf.write("\t\tAuthName \"%s\"\n" % boto.config.get("Trac", "name")) + cnf.write("\t\tRequire valid-user\n") + cnf.write("\t\tAuthUserFile /mnt/apache/passwd/passwords\n") + cnf.write("\t\n") + + data_dir = boto.config.get("Trac", "data_dir") + for env in os.listdir(data_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tSetHandler mod_python\n") + cnf.write("\t\tPythonInterpreter main_interpreter\n") + cnf.write("\t\tPythonHandler trac.web.modpython_frontend\n") + cnf.write("\t\tPythonOption TracEnv %s/%s\n" % (data_dir, env)) + cnf.write("\t\tPythonOption TracUriRoot /trac/%s\n" % env) + cnf.write("\t\n") + + svn_dir = boto.config.get("Trac", "svn_dir") + for env in os.listdir(svn_dir): + if(env[0] != "."): + cnf.write("\t\n" % env) + cnf.write("\t\tDAV svn\n") + cnf.write("\t\tSVNPath %s/%s\n" % (svn_dir, env)) + cnf.write("\t\n") + + cnf.write("\tErrorLog /var/log/apache2/error.log\n") + cnf.write("\tLogLevel warn\n") + cnf.write("\tCustomLog /var/log/apache2/access.log combined\n") + cnf.write("\tServerSignature On\n") + SSLCertificateFile = boto.config.get("Trac", "SSLCertificateFile") + if SSLCertificateFile: + cnf.write("\tSSLEngine On\n") + cnf.write("\tSSLCertificateFile %s\n" % SSLCertificateFile) + + SSLCertificateKeyFile = boto.config.get("Trac", "SSLCertificateKeyFile") + if SSLCertificateKeyFile: + cnf.write("\tSSLCertificateKeyFile %s\n" % SSLCertificateKeyFile) + + SSLCertificateChainFile = boto.config.get("Trac", "SSLCertificateChainFile") + if SSLCertificateChainFile: + cnf.write("\tSSLCertificateChainFile %s\n" % SSLCertificateChainFile) + cnf.write("\n") + cnf.close() + self.run("a2ensite %s" % domain_info[0]) + self.run("/etc/init.d/apache2 force-reload") + + def main(self): + self.install() + self.setup_vhost() diff --git a/vendor/boto/boto/pyami/launch_ami.py b/vendor/boto/boto/pyami/launch_ami.py new file mode 100755 index 000000000000..243d56d2eb4d --- /dev/null +++ b/vendor/boto/boto/pyami/launch_ami.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt +import sys +import imp +import time +import boto + +usage_string = """ +SYNOPSIS + launch_ami.py -a ami_id [-b script_bucket] [-s script_name] + [-m module] [-c class_name] [-r] + [-g group] [-k key_name] [-n num_instances] + [-w] [extra_data] + Where: + ami_id - the id of the AMI you wish to launch + module - The name of the Python module containing the class you + want to run when the instance is started. If you use this + option the Python module must already be stored on the + instance in a location that is on the Python path. + script_file - The name of a local Python module that you would like + to have copied to S3 and then run on the instance + when it is started. The specified module must be + import'able (i.e. in your local Python path). It + will then be copied to the specified bucket in S3 + (see the -b option). Once the new instance(s) + start up the script will be copied from S3 and then + run locally on the instance. + class_name - The name of the class to be instantiated within the + module or script file specified. + script_bucket - the name of the bucket in which the script will be + stored + group - the name of the security group the instance will run in + key_name - the name of the keypair to use when launching the AMI + num_instances - how many instances of the AMI to launch (default 1) + input_queue_name - Name of SQS to read input messages from + output_queue_name - Name of SQS to write output messages to + extra_data - additional name-value pairs that will be passed as + userdata to the newly launched instance. These should + be of the form "name=value" + The -r option reloads the Python module to S3 without launching + another instance. This can be useful during debugging to allow + you to test a new version of your script without shutting down + your instance and starting up another one. + The -w option tells the script to run synchronously, meaning to + wait until the instance is actually up and running. It then prints + the IP address and internal and external DNS names before exiting. +""" + +def usage(): + print usage_string + sys.exit() + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w', + ['ami', 'bucket', 'class', 'group', 'help', + 'inputqueue', 'keypair', 'module', + 'numinstances', 'outputqueue', + 'reload', 'script_name', 'wait']) + except: + usage() + params = {'module_name' : None, + 'script_name' : None, + 'class_name' : None, + 'script_bucket' : None, + 'group' : 'default', + 'keypair' : None, + 'ami' : None, + 'num_instances' : 1, + 'input_queue_name' : None, + 'output_queue_name' : None} + reload = None + wait = None + for o, a in opts: + if o in ('-a', '--ami'): + params['ami'] = a + if o in ('-b', '--bucket'): + params['script_bucket'] = a + if o in ('-c', '--class'): + params['class_name'] = a + if o in ('-g', '--group'): + params['group'] = a + if o in ('-h', '--help'): + usage() + if o in ('-i', '--inputqueue'): + params['input_queue_name'] = a + if o in ('-k', '--keypair'): + params['keypair'] = a + if o in ('-m', '--module'): + params['module_name'] = a + if o in ('-n', '--num_instances'): + params['num_instances'] = int(a) + if o in ('-o', '--outputqueue'): + params['output_queue_name'] = a + if o in ('-r', '--reload'): + reload = True + if o in ('-s', '--script'): + params['script_name'] = a + if o in ('-w', '--wait'): + wait = True + + # check required fields + required = ['ami'] + for pname in required: + if not params.get(pname, None): + print '%s is required' % pname + usage() + if params['script_name']: + # first copy the desired module file to S3 bucket + if reload: + print 'Reloading module %s to S3' % params['script_name'] + else: + print 'Copying module %s to S3' % params['script_name'] + l = imp.find_module(params['script_name']) + c = boto.connect_s3() + bucket = c.get_bucket(params['script_bucket']) + key = bucket.new_key(params['script_name']+'.py') + key.set_contents_from_file(l[0]) + params['script_md5'] = key.md5 + # we have everything we need, now build userdata string + l = [] + for k, v in params.items(): + if v: + l.append('%s=%s' % (k, v)) + c = boto.connect_ec2() + l.append('aws_access_key_id=%s' % c.aws_access_key_id) + l.append('aws_secret_access_key=%s' % c.aws_secret_access_key) + for kv in args: + l.append(kv) + s = '|'.join(l) + if not reload: + rs = c.get_all_images([params['ami']]) + img = rs[0] + r = img.run(user_data=s, key_name=params['keypair'], + security_groups=[params['group']], + max_count=params.get('num_instances', 1)) + print 'AMI: %s - %s (Started)' % (params['ami'], img.location) + print 'Reservation %s contains the following instances:' % r.id + for i in r.instances: + print '\t%s' % i.id + if wait: + running = False + while not running: + time.sleep(30) + [i.update() for i in r.instances] + status = [i.state for i in r.instances] + print status + if status.count('running') == len(r.instances): + running = True + for i in r.instances: + print 'Instance: %s' % i.ami_launch_index + print 'Public DNS Name: %s' % i.public_dns_name + print 'Private DNS Name: %s' % i.private_dns_name + +if __name__ == "__main__": + main() + diff --git a/vendor/boto/boto/pyami/scriptbase.py b/vendor/boto/boto/pyami/scriptbase.py new file mode 100644 index 000000000000..ef8bd28f1ea5 --- /dev/null +++ b/vendor/boto/boto/pyami/scriptbase.py @@ -0,0 +1,44 @@ +import os +import sys +from boto.utils import ShellCommand, get_ts +import boto +import boto.utils + +class ScriptBase: + + def __init__(self, config_file=None): + self.instance_id = boto.config.get('Instance', 'instance-id', 'default') + self.name = self.__class__.__name__ + self.ts = get_ts() + if config_file: + boto.config.read(config_file) + + def notify(self, subject, body=''): + boto.utils.notify(subject, body) + + def mkdir(self, path): + if not os.path.isdir(path): + try: + os.mkdir(path) + except: + boto.log.error('Error creating directory: %s' % path) + + def umount(self, path): + if os.path.ismount(path): + self.run('umount %s' % path) + + def run(self, command, notify=True, exit_on_error=False): + self.last_command = ShellCommand(command) + if self.last_command.status != 0: + boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output)) + if notify: + self.notify('Error encountered', \ + 'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \ + (command, self.last_command.output)) + if exit_on_error: + sys.exit(-1) + return self.last_command.status + + def main(self): + pass + diff --git a/vendor/boto/boto/pyami/startup.py b/vendor/boto/boto/pyami/startup.py new file mode 100644 index 000000000000..8443bff12252 --- /dev/null +++ b/vendor/boto/boto/pyami/startup.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import sys +import boto +from boto.utils import find_class +from boto import config +from boto.pyami.scriptbase import ScriptBase + + +class Startup(ScriptBase): + + def run_scripts(self): + scripts = config.get('Pyami', 'scripts') + if scripts: + for script in scripts.split(','): + script = script.strip(" ") + try: + pos = script.rfind('.') + if pos > 0: + mod_name = script[0:pos] + cls_name = script[pos+1:] + cls = find_class(mod_name, cls_name) + boto.log.info('Running Script: %s' % script) + s = cls() + s.main() + else: + boto.log.warning('Trouble parsing script: %s' % script) + except Exception: + boto.log.exception('Problem Running Script: %s' % script) + + def main(self): + self.run_scripts() + self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id')) + +if __name__ == "__main__": + if not config.has_section('loggers'): + boto.set_file_logger('startup', '/var/log/boto.log') + sys.path.append(config.get('Pyami', 'working_dir')) + su = Startup() + su.main() diff --git a/vendor/boto/boto/rds/__init__.py b/vendor/boto/boto/rds/__init__.py new file mode 100644 index 000000000000..2283e2ce4af5 --- /dev/null +++ b/vendor/boto/boto/rds/__init__.py @@ -0,0 +1,810 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto.utils +import urllib +from boto.connection import AWSQueryConnection +from boto.rds.dbinstance import DBInstance +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.parametergroup import ParameterGroup +from boto.rds.dbsnapshot import DBSnapshot +from boto.rds.event import Event + +#boto.set_stream_logger('rds') + +class RDSConnection(AWSQueryConnection): + + DefaultHost = 'rds.amazonaws.com' + APIVersion = '2009-10-16' + SignatureVersion = '2' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, host=DefaultHost, debug=0, + https_connection_factory=None, path='/'): + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, + proxy_pass, self.DefaultHost, debug, + https_connection_factory, path) + + # DB Instance methods + + def get_all_dbinstances(self, instance_id=None, max_records=None, + marker=None): + """ + Retrieve all the DBInstances in your account. + + :type instance_id: str + :param instance_id: DB Instance identifier. If supplied, only information + this instance will be returned. Otherwise, info + about all DB Instances will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbinstance.DBInstance` + """ + params = {} + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBInstances', params, [('DBInstance', DBInstance)]) + + def create_dbinstance(self, id, allocated_storage, instance_class, + master_username, master_password, port=3306, + engine='MySQL5.1', db_name=None, param_group=None, + security_groups=None, availability_zone=None, + preferred_maintenance_window=None, + backup_retention_period=None, + preferred_backup_window=None): + """ + Create a new DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + Must contain 1-63 alphanumeric characters. + First character must be a letter. + May not end with a hyphen or contain two consecutive hyphens + + :type allocated_storage: int + :param allocated_storage: Initially allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the DBInstance. + Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type engine: str + :param engine: Name of database engine. Must be MySQL5.1 for now. + + :type master_username: str + :param master_username: Name of master user for the DBInstance. + Must be 1-15 alphanumeric characters, first must be + a letter. + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-16 alphanumeric characters. + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type db_name: str + :param db_name: Name of a database to create when the DBInstance + is created. Default is to create no databases. + + :type param_group: str + :param param_group: Name of DBParameterGroup to associate with + this DBInstance. If no groups are specified + no parameter groups will be used. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to authorize on + this DBInstance. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) during + which maintenance can occur. + Default is Sun:05:00-Sun:09:00 + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The new db instance. + """ + params = {'DBInstanceIdentifier' : id, + 'AllocatedStorage' : allocated_storage, + 'DBInstanceClass' : instance_class, + 'Engine' : engine, + 'MasterUsername' : master_username, + 'MasterUserPassword' : master_password} + if port: + params['Port'] = port + if db_name: + params['DBName'] = db_name + if param_group: + params['DBParameterGroup'] = param_group + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + if availability_zone: + params['AvailabilityZone'] = availability_zone + if preferred_maintenance_window: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if backup_retention_period: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + + return self.get_object('CreateDBInstance', params, DBInstance) + + def modify_dbinstance(self, id, param_group=None, security_groups=None, + preferred_maintenance_window=None, + master_password=None, allocated_storage=None, + instance_class=None, + backup_retention_period=None, + preferred_backup_window=None, + apply_immediately=False): + """ + Modify an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type security_groups: list of str or list of DBSecurityGroup objects + :param security_groups: List of names of DBSecurityGroup to authorize on + this DBInstance. + + :type preferred_maintenance_window: str + :param preferred_maintenance_window: The weekly time range (in UTC) during + which maintenance can occur. + Default is Sun:05:00-Sun:09:00 + + :type master_password: str + :param master_password: Password of master user for the DBInstance. + Must be 4-15 alphanumeric characters. + + :type allocated_storage: int + :param allocated_storage: The new allocated storage size, in GBs. + Valid values are [5-1024] + + :type instance_class: str + :param instance_class: The compute and memory capacity of the DBInstance. + Changes will be applied at next maintenance + window unless apply_immediately is True. + Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type apply_immediately: bool + :param apply_immediately: If true, the modifications will be applied + as soon as possible rather than waiting for + the next preferred maintenance window. + + :type backup_retention_period: int + :param backup_retention_period: The number of days for which automated + backups are retained. Setting this to + zero disables automated backups. + + :type preferred_backup_window: str + :param preferred_backup_window: The daily time range during which + automated backups are created (if + enabled). Must be in h24:mi-hh24:mi + format (UTC). + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The modified db instance. + """ + params = {'DBInstanceIdentifier' : id} + if param_group: + params['DBParameterGroupName'] = param_group + if security_groups: + l = [] + for group in security_groups: + if isinstance(group, DBSecurityGroup): + l.append(group.name) + else: + l.append(group) + self.build_list_params(params, l, 'DBSecurityGroups.member') + if preferred_maintenance_window: + params['PreferredMaintenanceWindow'] = preferred_maintenance_window + if master_password: + params['MasterUserPassword'] = master_password + if allocated_storage: + params['AllocatedStorage'] = allocated_storage + if instance_class: + params['DBInstanceClass'] = instance_class + if backup_retention_period: + params['BackupRetentionPeriod'] = backup_retention_period + if preferred_backup_window: + params['PreferredBackupWindow'] = preferred_backup_window + if apply_immediately: + params['ApplyImmediately'] = 'true' + + return self.get_object('ModifyDBInstance', params, DBInstance) + + def delete_dbinstance(self, id, skip_final_snapshot=False, + final_snapshot_id=''): + """ + Delete an existing DBInstance. + + :type id: str + :param id: Unique identifier for the new instance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether a final + db snapshot is created before the instance + is deleted. If True, no snapshot is created. + If False, a snapshot is created before + deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + params = {'DBInstanceIdentifier' : id} + if skip_final_snapshot: + params['SkipFinalSnapshot'] = 'true' + else: + params['SkipFinalSnapshot'] = 'false' + params['FinalDBSnapshotIdentifier'] = final_snapshot_id + return self.get_object('DeleteDBInstance', params, DBInstance) + + # DBParameterGroup methods + + def get_all_dbparameter_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all parameter groups associated with your account in a region. + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + If not provided, all DBParameter groups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.ec2.parametergroup.ParameterGroup` + """ + params = {} + if groupname: + params['DBParameterGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBParameterGroups', params, + [('DBParameterGroup', ParameterGroup)]) + + def get_all_dbparameters(self, groupname, source=None, + max_records=None, marker=None): + """ + Get all parameters associated with a ParameterGroup + + :type groupname: str + :param groupname: The name of the DBParameter group to retrieve. + + :type source: str + :param source: Specifies which parameters to return. + If not specified, all parameters will be returned. + Valid values are: user|system|engine-default + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: :class:`boto.ec2.parametergroup.ParameterGroup` + :return: The ParameterGroup + """ + params = {'DBParameterGroupName' : groupname} + if source: + params['Source'] = source + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + pg = self.get_object('DescribeDBParameters', params, ParameterGroup) + pg.name = groupname + return pg + + def create_parameter_group(self, name, engine='MySQL5.1', description=''): + """ + Create a new dbparameter group for your account. + + :type name: string + :param name: The name of the new dbparameter group + + :type engine: str + :param engine: Name of database engine. Must be MySQL5.1 for now. + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + :return: The newly created DBSecurityGroup + """ + params = {'DBParameterGroupName': name, + 'Engine': engine, + 'Description' : description} + return self.get_object('CreateDBParameterGroup', params, ParameterGroup) + + def modify_parameter_group(self, name, parameters=None): + """ + Modify a parameter group for your account. + + :type name: string + :param name: The name of the new parameter group + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The new parameters + + :rtype: :class:`boto.rds.parametergroup.ParameterGroup` + :return: The newly created ParameterGroup + """ + params = {'DBParameterGroupName': name} + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_list('ModifyDBParameterGroup', params, ParameterGroup) + + def reset_parameter_group(self, name, reset_all_params=False, parameters=None): + """ + Resets some or all of the parameters of a ParameterGroup to the + default value + + :type key_name: string + :param key_name: The name of the ParameterGroup to reset + + :type parameters: list of :class:`boto.rds.parametergroup.Parameter` + :param parameters: The parameters to reset. If not supplied, all parameters + will be reset. + """ + params = {'DBParameterGroupName':name} + if reset_all_params: + params['ResetAllParameters'] = 'true' + else: + params['ResetAllParameters'] = 'false' + for i in range(0, len(parameters)): + parameter = parameters[i] + parameter.merge(params, i+1) + return self.get_status('ResetDBParameterGroup', params) + + def delete_parameter_group(self, name): + """ + Delete a DBSecurityGroup from your account. + + :type key_name: string + :param key_name: The name of the DBSecurityGroup to delete + """ + params = {'DBParameterGroupName':name} + return self.get_status('DeleteDBParameterGroup', params) + + # DBSecurityGroup methods + + def get_all_dbsecurity_groups(self, groupname=None, max_records=None, + marker=None): + """ + Get all security groups associated with your account in a region. + + :type groupnames: list + :param groupnames: A list of the names of security groups to retrieve. + If not provided, all security groups will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + """ + params = {} + if groupname: + params['DBSecurityGroupName'] = groupname + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSecurityGroups', params, + [('DBSecurityGroup', DBSecurityGroup)]) + + def create_dbsecurity_group(self, name, description=None): + """ + Create a new security group for your account. + This will create the security group within the region you + are currently connected to. + + :type name: string + :param name: The name of the new security group + + :type description: string + :param description: The description of the new security group + + :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup` + :return: The newly created DBSecurityGroup + """ + params = {'DBSecurityGroupName':name} + if description: + params['DBSecurityGroupDescription'] = description + group = self.get_object('CreateDBSecurityGroup', params, DBSecurityGroup) + group.name = name + group.description = description + return group + + def delete_dbsecurity_group(self, name): + """ + Delete a DBSecurityGroup from your account. + + :type key_name: string + :param key_name: The name of the DBSecurityGroup to delete + """ + params = {'DBSecurityGroupName':name} + return self.get_status('DeleteDBSecurityGroup', params) + + def authorize_dbsecurity_group(self, group_name, cidr_ip=None, + ec2_security_group_name=None, + ec2_security_group_owner_id=None): + """ + Add a new rule to an existing security group. + You need to pass in either src_security_group_name and + src_security_group_owner_id OR a CIDR block but not both. + + :type group_name: string + :param group_name: The name of the security group you are adding + the rule to. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group you are + granting access to. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 security + group you are granting access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName':group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = urllib.quote(cidr_ip) + return self.get_object('AuthorizeDBSecurityGroupIngress', params, DBSecurityGroup) + + def revoke_security_group(self, group_name, ec2_security_group_name=None, + ec2_security_group_owner_id=None, cidr_ip=None): + """ + Remove an existing rule from an existing security group. + You need to pass in either ec2_security_group_name and + ec2_security_group_owner_id OR a CIDR block. + + :type group_name: string + :param group_name: The name of the security group you are removing + the rule from. + + :type ec2_security_group_name: string + :param ec2_security_group_name: The name of the EC2 security group you are + granting access to. + + :type ec2_security_group_owner_id: string + :param ec2_security_group_owner_id: The ID of the owner of the EC2 security + group you are granting access to. + + :type cidr_ip: string + :param cidr_ip: The CIDR block you are providing access to. + See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing + + :rtype: bool + :return: True if successful. + """ + params = {'DBSecurityGroupName':group_name} + if ec2_security_group_name: + params['EC2SecurityGroupName'] = ec2_security_group_name + if ec2_security_group_owner_id: + params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id + if cidr_ip: + params['CIDRIP'] = cidr_ip + return self.get_object('RevokeDBSecurityGroupIngress', params, DBSecurityGroup) + + # DBSnapshot methods + + def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None, + max_records=None, marker=None): + """ + Get information about DB Snapshots. + + :type snapshot_id: str + :param snapshot_id: The unique identifier of an RDS snapshot. + If not provided, all RDS snapshots will be returned. + + :type instance_id: str + :param instance_id: The identifier of a DBInstance. If provided, + only the DBSnapshots related to that instance will + be returned. + If not provided, all RDS snapshots will be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot` + """ + params = {} + if snapshot_id: + params['DBSnapshotIdentifier'] = snapshot_id + if instance_id: + params['DBInstanceIdentifier'] = instance_id + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeDBSnapshots', params, + [('DBSnapshot', DBSnapshot)]) + + def create_dbsnapshot(self, snapshot_id, dbinstance_id): + """ + Create a new DB snapshot. + + :type snapshot_id: string + :param snapshot_id: The identifier for the DBSnapshot + + :type dbinstance_id: string + :param dbinstance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + params = {'DBSnapshotIdentifier' : snapshot_id, + 'DBInstanceIdentifier' : dbinstance_id} + return self.get_object('CreateDBSnapshot', params, DBSnapshot) + + def delete_dbsnapshot(self, identifier): + """ + Delete a DBSnapshot + + :type identifier: string + :param identifier: The identifier of the DBSnapshot to delete + """ + params = {'DBSnapshotIdentifier' : identifier} + return self.get_object('DeleteDBSnapshot', params, DBSnapshot) + + def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id, + instance_class, port=None, + availability_zone=None): + + """ + Create a new DBInstance from a DB snapshot. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :type instance_id: string + :param instance_id: The source identifier for the RDS instance from + which the snapshot is created. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the DBInstance. + Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'DBSnapshotIdentifier' : identifier, + 'DBInstanceIdentifier' : instance_id, + 'DBInstanceClass' : instance_class} + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + return self.get_object('RestoreDBInstanceFromDBSnapshot', + params, DBInstance) + + def restore_dbinstance_from_point_in_time(self, source_instance_id, + target_instance_id, + use_latest=False, + restore_time=None, + dbinstance_class=None, + port=None, + availability_zone=None): + + """ + Create a new DBInstance from a point in time. + + :type source_instance_id: string + :param source_instance_id: The identifier for the source DBInstance. + + :type target_instance_id: string + :param target_instance_id: The identifier of the new DBInstance. + + :type use_latest: bool + :param use_latest: If True, the latest snapshot availabile will + be used. + + :type restore_time: datetime + :param restore_time: The date and time to restore from. Only + used if use_latest is False. + + :type instance_class: str + :param instance_class: The compute and memory capacity of the DBInstance. + Valid values are: + db.m1.small | db.m1.large | db.m1.xlarge | + db.m2.2xlarge | db.m2.4xlarge + + :type port: int + :param port: Port number on which database accepts connections. + Valid values [1115-65535]. Defaults to 3306. + + :type availability_zone: str + :param availability_zone: Name of the availability zone to place + DBInstance into. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The newly created DBInstance + """ + params = {'SourceDBInstanceIdentifier' : source_instance_id, + 'TargetDBInstanceIdentifier' : target_instance_id} + if use_latest: + params['UseLatestRestorableTime'] = 'true' + elif restore_time: + params['RestoreTime'] = restore_time.isoformat() + if dbinstance_class: + params['DBInstanceClass'] = dbinstance_class + if port: + params['Port'] = port + if availability_zone: + params['AvailabilityZone'] = availability_zone + return self.get_object('RestoreDBInstanceToPointInTime', + params, DBInstance) + + # Events + + def get_all_events(self, source_identifier=None, source_type=None, + start_time=None, end_time=None, + max_records=None, marker=None): + """ + Get information about events related to your DBInstances, + DBSecurityGroups and DBParameterGroups. + + :type source_identifier: str + :param source_identifier: If supplied, the events returned will be + limited to those that apply to the identified + source. The value of this parameter depends + on the value of source_type. If neither + parameter is specified, all events in the time + span will be returned. + + :type source_type: str + :param source_type: Specifies how the source_identifier should + be interpreted. Valid values are: + b-instance | db-security-group | + db-parameter-group | db-snapshot + + :type start_time: datetime + :param start_time: The beginning of the time interval for events. + If not supplied, all available events will + be returned. + + :type end_time: datetime + :param end_time: The ending of the time interval for events. + If not supplied, all available events will + be returned. + + :type max_records: int + :param max_records: The maximum number of records to be returned. + If more results are available, a MoreToken will + be returned in the response that can be used to + retrieve additional records. Default is 100. + + :type marker: str + :param marker: The marker provided by a previous request. + + :rtype: list + :return: A list of class:`boto.rds.event.Event` + """ + params = {} + if source_identifier and source_type: + params['SourceIdentifier'] = source_identifier + params['SourceType'] = source_type + if start_time: + params['StartTime'] = start_time.isoformat() + if end_time: + params['EndTime'] = end_time.isoformat() + if max_records: + params['MaxRecords'] = max_records + if marker: + params['Marker'] = marker + return self.get_list('DescribeEvents', params, [('Event', Event)]) + + diff --git a/vendor/boto/boto/rds/dbinstance.py b/vendor/boto/boto/rds/dbinstance.py new file mode 100644 index 000000000000..23e1c984a439 --- /dev/null +++ b/vendor/boto/boto/rds/dbinstance.py @@ -0,0 +1,136 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.rds.dbsecuritygroup import DBSecurityGroup +from boto.rds.parametergroup import ParameterGroup + +class DBInstance(object): + """ + Represents a RDS DBInstance + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.create_time = None + self.engine = None + self.status = None + self.allocated_storage = None + self.endpoint = None + self.instance_class = None + self.master_username = None + self.parameter_group = None + self.security_group = None + self.availability_zone = None + self.backup_retention_period = None + self.preferred_backup_window = None + self.preferred_maintenance_window = None + self.latest_restorable_time = None + self._in_endpoint = False + self._port = None + self._address = None + + def __repr__(self): + return 'DBInstance:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'Endpoint': + self._in_endpoint = True + elif name == 'DBParameterGroup': + self.parameter_group = ParameterGroup(self.connection) + return self.parameter_group + elif name == 'DBSecurityGroup': + self.security_group = DBSecurityGroup(self.connection) + return self.security_group + return None + + def endElement(self, name, value, connection): + if name == 'DBInstanceIdentifier': + self.id = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'InstanceCreateTime': + self.create_time = value + elif name == 'Engine': + self.engine = value + elif name == 'DBInstanceStatus': + self.status = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'DBInstanceClass': + self.instance_class = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'Port': + if self._in_endpoint: + self._port = int(value) + elif name == 'Address': + if self._in_endpoint: + self._address = value + elif name == 'Endpoint': + self.endpoint = (self._address, self._port) + self._in_endpoint = False + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'BackupRetentionPeriod': + self.backup_retention_period = value + elif name == 'LatestRestorableTime': + self.latest_restorable_time = value + elif name == 'PreferredMaintenanceWindow': + self.preferred_maintenance_window = value + elif name == 'PreferredBackupWindow': + self.preferred_backup_window = value + else: + setattr(self, name, value) + + def snapshot(self, snapshot_id): + """ + Create a new DB snapshot of this DBInstance. + + :type identifier: string + :param identifier: The identifier for the DBSnapshot + + :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot` + :return: The newly created DBSnapshot + """ + return self.connection.create_dbsnapshot(snapshot_id, self.id) + + def stop(self, skip_final_snapshot, final_snapshot_id): + """ + Delete this DBInstance. + + :type skip_final_snapshot: bool + :param skip_final_snapshot: This parameter determines whether a final + db snapshot is created before the instance + is deleted. If True, no snapshot is created. + If False, a snapshot is created before + deleting the instance. + + :type final_snapshot_id: str + :param final_snapshot_id: If a final snapshot is requested, this + is the identifier used for that snapshot. + + :rtype: :class:`boto.rds.dbinstance.DBInstance` + :return: The deleted db instance. + """ + return self.connection.delete_dbinstance(self.id, + skip_final_snapshot, + final_snapshot_id) diff --git a/vendor/boto/boto/rds/dbsecuritygroup.py b/vendor/boto/boto/rds/dbsecuritygroup.py new file mode 100644 index 000000000000..24cdad2da5ca --- /dev/null +++ b/vendor/boto/boto/rds/dbsecuritygroup.py @@ -0,0 +1,159 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an DBSecurityGroup +""" +from boto.ec2.securitygroup import SecurityGroup + +class DBSecurityGroup(object): + + def __init__(self, connection=None, owner_id=None, + name=None, description=None): + self.connection = connection + self.owner_id = owner_id + self.name = name + self.description = description + self.ec2_groups = [] + self.ip_ranges = [] + + def __repr__(self): + return 'DBSecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'IPRange': + cidr = IPRange(self) + self.ip_ranges.append(cidr) + return cidr + elif name == 'EC2SecurityGroup': + ec2_grp = EC2SecurityGroup(self) + self.ec2_groups.append(ec2_grp) + return ec2_grp + else: + return None + + def endElement(self, name, value, connection): + if name == 'OwnerId': + self.owner_id = value + elif name == 'DBSecurityGroupName': + self.name = value + elif name == 'DBSecurityGroupDescription': + self.description = value + elif name == 'IPRanges': + pass + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_dbsecurity_group(self.name) + + def authorize(self, cidr_ip=None, ec2_group=None): + """ + Add a new rule to this DBSecurity group. + You need to pass in either a CIDR block to authorize or + and EC2 SecurityGroup. + + @type cidr_ip: string + @param cidr_ip: A valid CIDR IP range to authorize + + @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` + + @rtype: bool + @return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + else: + group_name = None + group_owner_id = None + return self.connection.authorize_dbsecurity_group(self.name, + cidr_ip, + group_name, + group_owner_id) + + def revoke(self, cidr_ip=None, ec2_group=None): + """ + Revoke access to a CIDR range or EC2 SecurityGroup + You need to pass in either a CIDR block to authorize or + and EC2 SecurityGroup. + + @type cidr_ip: string + @param cidr_ip: A valid CIDR IP range to authorize + + @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>` + + @rtype: bool + @return: True if successful. + """ + if isinstance(ec2_group, SecurityGroup): + group_name = ec2_group.name + group_owner_id = ec2_group.owner_id + else: + group_name = None + group_owner_id = None + return self.connection.revoke_dbsecurity_group(self.name, + cidr_ip, + group_name, + group_owner_id) + +class IPRange(object): + + def __init__(self, parent=None): + self.parent = parent + self.cidr_ip = None + self.status = None + + def __repr__(self): + return 'IPRange:%s' % self.cidr_ip + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'CIDRIP': + self.cidr_ip = value + elif name == 'Status': + self.status = value + else: + setattr(self, name, value) + +class EC2SecurityGroup(object): + + def __init__(self, parent=None): + self.parent = parent + self.name = None + self.owner_id = None + + def __repr__(self): + return 'EC2SecurityGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'EC2SecurityGroupName': + self.name = value + elif name == 'EC2SecurityGroupOwnerId': + self.owner_id = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/rds/dbsnapshot.py b/vendor/boto/boto/rds/dbsnapshot.py new file mode 100644 index 000000000000..78d0230c21ec --- /dev/null +++ b/vendor/boto/boto/rds/dbsnapshot.py @@ -0,0 +1,74 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class DBSnapshot(object): + """ + Represents a RDS DB Snapshot + """ + + def __init__(self, connection=None, id=None): + self.connection = connection + self.id = id + self.engine = None + self.snapshot_create_time = None + self.instance_create_time = None + self.port = None + self.status = None + self.availability_zone = None + self.master_username = None + self.allocated_storage = None + self.instance_id = None + self.availability_zone = None + + def __repr__(self): + return 'DBSnapshot:%s' % self.id + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Engine': + self.engine = value + elif name == 'InstanceCreateTime': + self.instance_create_time = value + elif name == 'SnapshotCreateTime': + self.snapshot_create_time = value + elif name == 'DBInstanceIdentifier': + self.instance_id = value + elif name == 'DBSnapshotIdentifier': + self.id = value + elif name == 'Port': + self.port = int(value) + elif name == 'Status': + self.status = value + elif name == 'AvailabilityZone': + self.availability_zone = value + elif name == 'MasterUsername': + self.master_username = value + elif name == 'AllocatedStorage': + self.allocated_storage = int(value) + elif name == 'SnapshotTime': + self.time = value + else: + setattr(self, name, value) + + + diff --git a/vendor/boto/boto/rds/event.py b/vendor/boto/boto/rds/event.py new file mode 100644 index 000000000000..a91f8f08a50c --- /dev/null +++ b/vendor/boto/boto/rds/event.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Event(object): + + def __init__(self, connection=None): + self.connection = connection + self.message = None + self.source_identifier = None + self.source_type = None + self.engine = None + self.date = None + + def __repr__(self): + return '"%s"' % self.message + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'SourceIdentifier': + self.source_identifier = value + elif name == 'SourceType': + self.source_type = value + elif name == 'Message': + self.message = value + elif name == 'Date': + self.date = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/rds/parametergroup.py b/vendor/boto/boto/rds/parametergroup.py new file mode 100644 index 000000000000..081e263575b9 --- /dev/null +++ b/vendor/boto/boto/rds/parametergroup.py @@ -0,0 +1,201 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ParameterGroup(dict): + + def __init__(self, connection=None): + dict.__init__(self) + self.connection = connection + self.name = None + self.description = None + self.engine = None + self._current_param = None + + def __repr__(self): + return 'ParameterGroup:%s' % self.name + + def startElement(self, name, attrs, connection): + if name == 'Parameter': + if self._current_param: + self[self._current_param.name] = self._current_param + self._current_param = Parameter(self) + return self._current_param + + def endElement(self, name, value, connection): + if name == 'DBParameterGroupName': + self.name = value + elif name == 'Description': + self.description = value + elif name == 'Engine': + self.engine = value + else: + setattr(self, name, value) + + def modifiable(self): + mod = [] + for key in self: + p = self[key] + if p.is_modifiable: + mod.append(p) + return mod + + def get_params(self): + pg = self.connection.get_all_dbparameters(self.name) + self.update(pg) + + def add_param(self, name, value, apply_method): + param = Parameter() + param.name = name + param.value = value + param.apply_method = apply_method + self.params.append(param) + +class Parameter(object): + """ + Represents a RDS Parameter + """ + + ValidTypes = {'integer' : int, + 'string' : str, + 'boolean' : bool} + ValidSources = ['user', 'system', 'engine-default'] + ValidApplyTypes = ['static', 'dynamic'] + ValidApplyMethods = ['immediate', 'pending-reboot'] + + def __init__(self, group=None, name=None): + self.group = group + self.name = name + self._value = None + self.type = str + self.source = None + self.is_modifiable = True + self.description = None + self.apply_method = None + self.allowed_values = None + + def __repr__(self): + return 'Parameter:%s' % self.name + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'ParameterName': + self.name = value + elif name == 'ParameterValue': + self._value = value + elif name == 'DataType': + if value in self.ValidTypes: + self.type = value + elif name == 'Source': + if value in self.ValidSources: + self.source = value + elif name == 'IsModifiable': + if value.lower() == 'true': + self.is_modifiable = True + else: + self.is_modifiable = False + elif name == 'Description': + self.description = value + elif name == 'ApplyType': + if value in self.ValidApplyTypes: + self.apply_type = value + elif name == 'AllowedValues': + self.allowed_values = value + else: + setattr(self, name, value) + + def merge(self, d, i): + prefix = 'Parameters.member.%d.' % i + if self.name: + d[prefix+'ParameterName'] = self.name + if self._value: + d[prefix+'ParameterValue'] = self._value + if self.apply_type: + d[prefix+'ApplyMethod'] = self.apply_method + + def _set_string_value(self, value): + if not isinstance(value, str) or isinstance(value, unicode): + raise ValueError, 'value must be of type str' + if self.allowed_values: + choices = self.allowed_values.split(',') + if value not in choices: + raise ValueError, 'value must be in %s' % self.allowed_values + set._value = value + + def _set_integer_value(self, value): + if isinstance(value, str) or isinstance(value, unicode): + value = int(value) + if isinstance(value, int) or isinstance(value, long): + if self.allowed_values: + min, max = self.allowed_values.split('-') + if value < int(min) or value > int(max): + raise ValueError, 'range is %s' % self.allowed_values + self._value = value + else: + raise ValueError, 'value must be integer' + + def _set_boolean_value(self, value): + if isinstance(value, bool): + self._value = value + elif isinstance(value, str) or isinstance(value, unicode): + if value.lower() == 'true': + self._value = True + else: + self._value = False + else: + raise ValueError, 'value must be boolean' + + def set_value(self, value): + if self.type == 'string': + self._set_string_value(value) + elif self.type == 'integer': + self._set_integer_value(value) + elif self.type == 'boolean': + self._set_boolean_value(value) + else: + raise TypeError, 'unknown type (%s)' % self.type + + def get_value(self): + if self._value == None: + return self._value + if self.type == 'string': + return self._value + elif self.type == 'integer': + if not isinstance(self._value, int) and not isinstance(self._value, long): + self._set_integer_value(self._value) + return self._value + elif self.type == 'boolean': + if not isinstance(self._value, bool): + self._set_boolean_value(self._value) + return self._value + else: + raise TypeError, 'unknown type (%s)' % self.type + + value = property(get_value, set_value, 'The value of the parameter') + + def apply(self, immediate=False): + if immediate: + self.apply_method = 'immediate' + else: + self.apply_method = 'pending-reboot' + self.group.connection.modify_parameter_group(self.group.name, [self]) + diff --git a/vendor/boto/boto/resultset.py b/vendor/boto/boto/resultset.py new file mode 100644 index 000000000000..cf6f1fdcf706 --- /dev/null +++ b/vendor/boto/boto/resultset.py @@ -0,0 +1,136 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class ResultSet(list): + """ + The ResultSet is used to pass results back from the Amazon services + to the client. It has an ugly but workable mechanism for parsing + the XML results from AWS. Because I don't really want any dependencies + on external libraries, I'm using the standard SAX parser that comes + with Python. The good news is that it's quite fast and efficient but + it makes some things rather difficult. + + You can pass in, as the marker_elem parameter, a list of tuples. + Each tuple contains a string as the first element which represents + the XML element that the resultset needs to be on the lookout for + and a Python class as the second element of the tuple. Each time the + specified element is found in the XML, a new instance of the class + will be created and popped onto the stack. + + """ + + def __init__(self, marker_elem=None): + list.__init__(self) + if isinstance(marker_elem, list): + self.markers = marker_elem + else: + self.markers = [] + self.marker = None + self.key_marker = None + self.version_id_marker = None + self.is_truncated = False + self.next_token = None + self.status = True + + def startElement(self, name, attrs, connection): + for t in self.markers: + if name == t[0]: + obj = t[1](connection) + self.append(obj) + return obj + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'KeyMarker': + self.key_marker = value + elif name == 'VersionIdMarker': + self.version_id_marker = value + elif name == 'Prefix': + self.prefix = value + elif name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'ItemName': + self.append(value) + elif name == 'NextToken': + self.next_token = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + else: + setattr(self, name, value) + +class BooleanResult(object): + + def __init__(self, marker_elem=None): + self.status = True + self.request_id = None + self.box_usage = None + + def __repr__(self): + if self.status: + return 'True' + else: + return 'False' + + def __nonzero__(self): + return self.status + + def startElement(self, name, attrs, connection): + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + elif name == 'RequestId': + self.request_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'BoxUsage': + self.request_id = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/s3/__init__.py b/vendor/boto/boto/s3/__init__.py new file mode 100644 index 000000000000..be2de1d511d2 --- /dev/null +++ b/vendor/boto/boto/s3/__init__.py @@ -0,0 +1,31 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto + +boto.check_extensions(__name__, __path__) + +from connection import S3Connection as Connection +from key import Key +from bucket import Bucket + +__all__ = ['Connection', 'Key', 'Bucket'] diff --git a/vendor/boto/boto/s3/acl.py b/vendor/boto/boto/s3/acl.py new file mode 100644 index 000000000000..59d3687bcbb5 --- /dev/null +++ b/vendor/boto/boto/s3/acl.py @@ -0,0 +1,162 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +CannedACLStrings = ['private', 'public-read', + 'public-read-write', 'authenticated-read'] + + +class Policy: + + def __init__(self, parent=None): + self.parent = parent + self.acl = None + + def __repr__(self): + grants = [] + for g in self.acl.grants: + if g.id == self.owner.id: + grants.append("%s (owner) = %s" % (g.display_name, g.permission)) + else: + if g.type == 'CanonicalUser': + u = g.display_name + elif g.type == 'Group': + u = g.uri + else: + u = g.email + grants.append("%s = %s" % (u, g.permission)) + return "" % ", ".join(grants) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + elif name == 'AccessControlList': + self.acl = ACL(self) + return self.acl + else: + return None + + def endElement(self, name, value, connection): + if name == 'Owner': + pass + elif name == 'AccessControlList': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += self.owner.to_xml() + s += self.acl.to_xml() + s += '' + return s + +class ACL: + + def __init__(self, policy=None): + self.policy = policy + self.grants = [] + + def add_grant(self, grant): + self.grants.append(grant) + + def add_email_grant(self, permission, email_address): + grant = Grant(permission=permission, type='AmazonCustomerByEmail', + email_address=email_address) + self.grants.append(grant) + + def add_user_grant(self, permission, user_id): + grant = Grant(permission=permission, type='CanonicalUser', id=user_id) + self.grants.append(grant) + + def startElement(self, name, attrs, connection): + if name == 'Grant': + self.grants.append(Grant(self)) + return self.grants[-1] + else: + return None + + def endElement(self, name, value, connection): + if name == 'Grant': + pass + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + for grant in self.grants: + s += grant.to_xml() + s += '' + return s + +class Grant: + + NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"' + + def __init__(self, permission=None, type=None, id=None, + display_name=None, uri=None, email_address=None): + self.permission = permission + self.id = id + self.display_name = display_name + self.uri = uri + self.email_address = email_address + self.type = type + + def startElement(self, name, attrs, connection): + if name == 'Grantee': + self.type = attrs['xsi:type'] + return None + + def endElement(self, name, value, connection): + if name == 'ID': + self.id = value + elif name == 'DisplayName': + self.display_name = value + elif name == 'URI': + self.uri = value + elif name == 'EmailAddress': + self.email_address = value + elif name == 'Grantee': + pass + elif name == 'Permission': + self.permission = value + else: + setattr(self, name, value) + + def to_xml(self): + s = '' + s += '' % (self.NameSpace, self.type) + if self.type == 'CanonicalUser': + s += '%s' % self.id + s += '%s' % self.display_name + elif self.type == 'Group': + s += '%s' % self.uri + else: + s += '%s' % self.email_address + s += '' + s += '%s' % self.permission + s += '' + return s + + diff --git a/vendor/boto/boto/s3/bucket.py b/vendor/boto/boto/s3/bucket.py new file mode 100644 index 000000000000..42c32f8daf44 --- /dev/null +++ b/vendor/boto/boto/s3/bucket.py @@ -0,0 +1,721 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto import handler +from boto.resultset import ResultSet +from boto.s3.acl import Policy, CannedACLStrings, Grant +from boto.s3.key import Key +from boto.s3.prefix import Prefix +from boto.s3.deletemarker import DeleteMarker +from boto.exception import S3ResponseError, S3PermissionsError, S3CopyError +from boto.s3.bucketlistresultset import BucketListResultSet +from boto.s3.bucketlistresultset import VersionedBucketListResultSet +import boto.utils +import xml.sax +import urllib +import re + +S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL'] + +class Bucket: + + BucketLoggingBody = """ + + + %s + %s + + """ + + EmptyBucketLoggingBody = """ + + """ + + LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery' + + BucketPaymentBody = """ + + %s + """ + + VersioningBody = """ + + %s + %s + """ + + VersionRE = '([A-Za-z]+)' + MFADeleteRE = '([A-Za-z]+)' + + def __init__(self, connection=None, name=None, key_class=Key): + self.name = name + self.connection = connection + self.key_class = key_class + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(BucketListResultSet(self)) + + def __contains__(self, key_name): + return not (self.get_key(key_name) is None) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Name': + self.name = value + elif name == 'CreationDate': + self.creation_date = value + else: + setattr(self, name, value) + + def set_key_class(self, key_class): + """ + Set the Key class associated with this bucket. By default, this + would be the boto.s3.key.Key class but if you want to subclass that + for some reason this allows you to associate your new class with a + bucket so that when you call bucket.new_key() or when you get a listing + of keys in the bucket you will get an instances of your key class + rather than the default. + + :type key_class: class + :param key_class: A subclass of Key that can be more specific + """ + self.key_class = key_class + + def lookup(self, key_name, headers=None): + """ + Deprecated: Please use get_key method. + + :type key_name: string + :param key_name: The name of the key to retrieve + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + return self.get_key(key_name, headers=headers) + + def get_key(self, key_name, headers=None, version_id=None): + """ + Check to see if a particular key exists within the bucket. This + method uses a HEAD request to check for the existance of the key. + Returns: An instance of a Key object or None + + :type key_name: string + :param key_name: The name of the key to retrieve + + :rtype: :class:`boto.s3.key.Key` + :returns: A Key object from this bucket. + """ + if version_id: + query_args = 'versionId=%s' % version_id + else: + query_args = None + response = self.connection.make_request('HEAD', self.name, key_name, + headers=headers, + query_args=query_args) + if response.status == 200: + response.read() + k = self.key_class(self) + k.metadata = boto.utils.get_aws_metadata(response.msg) + k.etag = response.getheader('etag') + k.content_type = response.getheader('content-type') + k.content_encoding = response.getheader('content-encoding') + k.last_modified = response.getheader('last-modified') + k.size = int(response.getheader('content-length')) + k.name = key_name + k.handle_version_headers(response) + return k + else: + if response.status == 404: + response.read() + return None + else: + raise S3ResponseError(response.status, response.reason, '') + + def list(self, prefix='', delimiter='', marker='', headers=None): + """ + List key objects within a bucket. This returns an instance of an + BucketListResultSet that automatically handles all of the result + paging, etc. from S3. You just need to keep iterating until + there are no more results. + Called with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle + through the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ + for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return BucketListResultSet(self, prefix, delimiter, marker, headers) + + def list_versions(self, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None): + """ + List key objects within a bucket. This returns an instance of an + BucketListResultSet that automatically handles all of the result + paging, etc. from S3. You just need to keep iterating until + there are no more results. + Called with no arguments, this will return an iterator object across + all keys within the bucket. + + :type prefix: string + :param prefix: allows you to limit the listing to a particular + prefix. For example, if you call the method with + prefix='/foo/' then the iterator will only cycle + through the keys that begin with the string '/foo/'. + + :type delimiter: string + :param delimiter: can be used in conjunction with the prefix + to allow you to organize and browse your keys + hierarchically. See: + http://docs.amazonwebservices.com/AmazonS3/2006-03-01/ + for more details. + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet` + :return: an instance of a BucketListResultSet that handles paging, etc + """ + return VersionedBucketListResultSet(self, prefix, delimiter, key_marker, + version_id_marker, headers) + + def _get_all(self, element_map, initial_query_string='', + headers=None, **params): + l = [] + for k,v in params.items(): + k = k.replace('_', '-') + if k == 'maxkeys': + k = 'max-keys' + if isinstance(v, unicode): + v = v.encode('utf-8') + if v is not None and v != '': + l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v)))) + if len(l): + s = initial_query_string + '&' + '&'.join(l) + else: + s = initial_query_string + response = self.connection.make_request('GET', self.name, + headers=headers, query_args=s) + body = response.read() + boto.log.debug(body) + if response.status == 200: + rs = ResultSet(element_map) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + else: + raise S3ResponseError(response.status, response.reason, body) + + def get_all_keys(self, headers=None, **params): + """ + A lower-level method for listing contents of a bucket. + This closely models the actual S3 API and requires you to manually + handle the paging of results. For a higher-level method + that handles the details of paging for you, you can use the list method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type marker: string + :param marker: The "marker" of where you are in the result set + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that + contain the same string between the prefix and + the first occurrence of the delimiter will be + rolled up into a single result element in the + CommonPrefixes collection. These rolled-up keys + are not returned elsewhere in the response. + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + + """ + return self._get_all([('Contents', self.key_class), + ('CommonPrefixes', Prefix)], + '', headers, **params) + + def get_all_versions(self, headers=None, **params): + """ + A lower-level, version-aware method for listing contents of a bucket. + This closely models the actual S3 API and requires you to manually + handle the paging of results. For a higher-level method + that handles the details of paging for you, you can use the list method. + + :type max_keys: int + :param max_keys: The maximum number of keys to retrieve + + :type prefix: string + :param prefix: The prefix of the keys you want to retrieve + + :type key_marker: string + :param key_marker: The "marker" of where you are in the result set + with respect to keys. + + :type version_id_marker: string + :param version_id_marker: The "marker" of where you are in the result + set with respect to version-id's. + + :type delimiter: string + :param delimiter: If this optional, Unicode string parameter + is included with your request, then keys that + contain the same string between the prefix and + the first occurrence of the delimiter will be + rolled up into a single result element in the + CommonPrefixes collection. These rolled-up keys + are not returned elsewhere in the response. + + :rtype: ResultSet + :return: The result from S3 listing the keys requested + + """ + return self._get_all([('Version', self.key_class), + ('CommonPrefixes', Prefix), + ('DeleteMarker', DeleteMarker)], + 'versions', headers, **params) + + def new_key(self, key_name=None): + """ + Creates a new key + + :type key_name: string + :param key_name: The name of the key to create + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + return self.key_class(self, key_name) + + def generate_url(self, expires_in, method='GET', + headers=None, force_http=False): + return self.connection.generate_url(expires_in, method, self.name, + headers=headers, + force_http=force_http) + + def delete_key(self, key_name, headers=None, + version_id=None, mfa_token=None): + """ + Deletes a key from the bucket. If a version_id is provided, + only that version of the key will be deleted. + + :type key_name: string + :param key_name: The key name to delete + + :type version_id: string + :param version_id: The version ID (optional) + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial number + from the MFA device and the current value of + the six-digit token associated with the device. + This value is required anytime you are + deleting versioned objects from a bucket + that has the MFADelete option on the bucket. + """ + if version_id: + query_args = 'versionId=%s' % version_id + else: + query_args = None + if mfa_token: + if not headers: + headers = {} + headers['x-amz-mfa'] = ' '.join(mfa_token) + response = self.connection.make_request('DELETE', self.name, key_name, + headers=headers, + query_args=query_args) + body = response.read() + if response.status != 204: + raise S3ResponseError(response.status, response.reason, body) + + def copy_key(self, new_key_name, src_bucket_name, + src_key_name, metadata=None, src_version_id=None): + """ + Create a new key in the bucket by copying another existing key. + + :type new_key_name: string + :param new_key_name: The name of the new key + + :type src_bucket_name: string + :param src_bucket_name: The name of the source bucket + + :type src_key_name: string + :param src_key_name: The name of the source key + + :type src_version_id: string + :param src_version_id: The version id for the key. This param + is optional. If not specified, the newest + version of the key will be copied. + + :type metadata: dict + :param metadata: Metadata to be associated with new key. + If metadata is supplied, it will replace the + metadata of the source key being copied. + If no metadata is supplied, the source key's + metadata will be copied to the new key. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) + if src_version_id: + src += '?version_id=%s' % src_version_id + if metadata: + headers = {'x-amz-copy-source' : src, + 'x-amz-metadata-directive' : 'REPLACE'} + headers = boto.utils.merge_meta(headers, metadata) + else: + headers = {'x-amz-copy-source' : src, + 'x-amz-metadata-directive' : 'COPY'} + response = self.connection.make_request('PUT', self.name, new_key_name, + headers=headers) + body = response.read() + if response.status == 200: + key = self.new_key(new_key_name) + h = handler.XmlHandler(key, self) + xml.sax.parseString(body, h) + if hasattr(key, 'Error'): + raise S3CopyError(key.Code, key.Message, body) + key.handle_version_headers(response) + return key + else: + raise S3ResponseError(response.status, response.reason, body) + + def set_canned_acl(self, acl_str, key_name='', headers=None, + version_id=None): + assert acl_str in CannedACLStrings + + if headers: + headers['x-amz-acl'] = acl_str + else: + headers={'x-amz-acl': acl_str} + + query_args='acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + headers=headers, query_args=query_args) + body = response.read() + if response.status != 200: + raise S3ResponseError(response.status, response.reason, body) + + def get_xml_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise S3ResponseError(response.status, response.reason, body) + return body + + def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('PUT', self.name, key_name, + data=acl_str, + query_args=query_args, + headers=headers) + body = response.read() + if response.status != 200: + raise S3ResponseError(response.status, response.reason, body) + + def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None): + if isinstance(acl_or_str, Policy): + self.set_xml_acl(acl_or_str.to_xml(), key_name, + headers, version_id) + else: + self.set_canned_acl(acl_or_str, key_name, + headers, version_id) + + def get_acl(self, key_name='', headers=None, version_id=None): + query_args = 'acl' + if version_id: + query_args += '&versionId=%s' % version_id + response = self.connection.make_request('GET', self.name, key_name, + query_args=query_args, + headers=headers) + body = response.read() + if response.status == 200: + policy = Policy(self) + h = handler.XmlHandler(policy, self) + xml.sax.parseString(body, h) + return policy + else: + raise S3ResponseError(response.status, response.reason, body) + + def make_public(self, recursive=False, headers=None): + self.set_canned_acl('public-read', headers=headers) + if recursive: + for key in self: + self.set_canned_acl('public-read', key.name, headers=headers) + + def add_email_grant(self, permission, email_address, + recursive=False, headers=None): + """ + Convenience method that provides a quick way to add an email grant + to a bucket. This method retrieves the current ACL, creates a new + grant based on the parameters passed in, adds that grant to the ACL + and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type email_address: string + :param email_address: The email address associated with the AWS + account your are granting the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the command + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in S3Permissions: + raise S3PermissionsError('Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_email_grant(permission, email_address, headers=headers) + + def add_user_grant(self, permission, user_id, recursive=False, headers=None): + """ + Convenience method that provides a quick way to add a canonical user grant to a bucket. + This method retrieves the current ACL, creates a new grant based on the parameters + passed in, adds that grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL). + + :type user_id: string + :param user_id: The canonical user id associated with the AWS account your are granting + the permission to. + + :type recursive: boolean + :param recursive: A boolean value to controls whether the command + will apply the grant to all keys within the bucket + or not. The default value is False. By passing a + True value, the call will iterate through all keys + in the bucket and apply the same grant to each key. + CAUTION: If you have a lot of keys, this could take + a long time! + """ + if permission not in S3Permissions: + raise S3PermissionsError('Unknown Permission: %s' % permission) + policy = self.get_acl(headers=headers) + policy.acl.add_user_grant(permission, user_id) + self.set_acl(policy, headers=headers) + if recursive: + for key in self: + key.add_user_grant(permission, user_id, headers=headers) + + def list_grants(self, headers=None): + policy = self.get_acl(headers=headers) + return policy.acl.grants + + def get_location(self): + """ + Returns the LocationConstraint for the bucket. + + :rtype: str + :return: The LocationConstraint for the bucket or the empty string if + no constraint was specified when bucket was created. + """ + response = self.connection.make_request('GET', self.name, + query_args='location') + body = response.read() + if response.status == 200: + rs = ResultSet(self) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs.LocationConstraint + else: + raise S3ResponseError(response.status, response.reason, body) + + def enable_logging(self, target_bucket, target_prefix='', headers=None): + if isinstance(target_bucket, Bucket): + target_bucket = target_bucket.name + body = self.BucketLoggingBody % (target_bucket, target_prefix) + response = self.connection.make_request('PUT', self.name, data=body, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise S3ResponseError(response.status, response.reason, body) + + def disable_logging(self, headers=None): + body = self.EmptyBucketLoggingBody + response = self.connection.make_request('PUT', self.name, data=body, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise S3ResponseError(response.status, response.reason, body) + + def get_logging_status(self, headers=None): + response = self.connection.make_request('GET', self.name, + query_args='logging', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise S3ResponseError(response.status, response.reason, body) + + def set_as_logging_target(self, headers=None): + policy = self.get_acl(headers=headers) + g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup) + g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup) + policy.acl.add_grant(g1) + policy.acl.add_grant(g2) + self.set_acl(policy, headers=headers) + + def get_request_payment(self, headers=None): + response = self.connection.make_request('GET', self.name, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return body + else: + raise S3ResponseError(response.status, response.reason, body) + + def set_request_payment(self, payer='BucketOwner', headers=None): + body = self.BucketPaymentBody % payer + response = self.connection.make_request('PUT', self.name, data=body, + query_args='requestPayment', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise S3ResponseError(response.status, response.reason, body) + + def configure_versioning(self, versioning, mfa_delete=False, + mfa_token=None, headers=None): + """ + Configure versioning for this bucket. + Note: This feature is currently in beta release and is available + only in the Northern California region. + + :type versioning: bool + :param versioning: A boolean indicating whether version is + enabled (True) or disabled (False). + + :type mfa_delete: bool + :param mfa_delete: A boolean indicating whether the Multi-Factor + Authentication Delete feature is enabled (True) + or disabled (False). If mfa_delete is enabled + then all Delete operations will require the + token from your MFA device to be passed in + the request. + + :type mfa_token: tuple or list of strings + :param mfa_token: A tuple or list consisting of the serial number + from the MFA device and the current value of + the six-digit token associated with the device. + This value is required when you are changing + the status of the MfaDelete property of + the bucket. + """ + if versioning: + ver = 'Enabled' + else: + ver = 'Suspended' + if mfa_delete: + mfa = 'Enabled' + else: + mfa = 'Disabled' + body = self.VersioningBody % (ver, mfa) + if mfa_token: + if not headers: + headers = {} + headers['x-amz-mfa'] = ' '.join(mfa_token) + response = self.connection.make_request('PUT', self.name, data=body, + query_args='versioning', headers=headers) + body = response.read() + if response.status == 200: + return True + else: + raise S3ResponseError(response.status, response.reason, body) + + def get_versioning_status(self, headers=None): + """ + Returns the current status of versioning on the bucket. + + :rtype: dict + :returns: A dictionary containing a key named 'Versioning' + that can have a value of either Enabled, Disabled, + or Suspended. Also, if MFADelete has ever been enabled + on the bucket, the dictionary will contain a key + named 'MFADelete' which will have a value of either + Enabled or Suspended. + """ + response = self.connection.make_request('GET', self.name, + query_args='versioning', headers=headers) + body = response.read() + boto.log.debug(body) + if response.status == 200: + d = {} + ver = re.search(self.VersionRE, body) + if ver: + d['Versioning'] = ver.group(1) + mfa = re.search(self.MFADeleteRE, body) + if mfa: + d['MfaDelete'] = mfa.group(1) + return d + else: + raise S3ResponseError(response.status, response.reason, body) + + def delete(self, headers=None): + return self.connection.delete_bucket(self.name, headers=headers) diff --git a/vendor/boto/boto/s3/bucketlistresultset.py b/vendor/boto/boto/s3/bucketlistresultset.py new file mode 100644 index 000000000000..9fc79bdf3109 --- /dev/null +++ b/vendor/boto/boto/s3/bucketlistresultset.py @@ -0,0 +1,99 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None): + """ + A generator function for listing keys in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_keys(prefix=prefix, marker=marker, + delimiter=delimiter, headers=headers) + for k in rs: + yield k + if k: + marker = k.name + more_results= rs.is_truncated + +class BucketListResultSet: + """ + A resultset for listing keys within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.marker = marker + self.headers = headers + + def __iter__(self): + return bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, marker=self.marker, headers=self.headers) + +def versioned_bucket_lister(bucket, prefix='', delimiter='', + key_marker='', version_id_marker='', headers=None): + """ + A generator function for listing versions in a bucket. + """ + more_results = True + k = None + while more_results: + rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker, + version_id_marker=version_id_marker, + delimiter=delimiter, headers=headers) + for k in rs: + yield k + key_marker = rs.key_marker + version_id_marker = rs.version_id_marker + more_results= rs.is_truncated + +class VersionedBucketListResultSet: + """ + A resultset for listing versions within a bucket. Uses the bucket_lister + generator function and implements the iterator interface. This + transparently handles the results paging from S3 so even if you have + many thousands of keys within the bucket you can iterate over all + keys in a reasonably efficient manner. + """ + + def __init__(self, bucket=None, prefix='', delimiter='', key_marker='', + version_id_marker='', headers=None): + self.bucket = bucket + self.prefix = prefix + self.delimiter = delimiter + self.key_marker = key_marker + self.version_id_marker = version_id_marker + self.headers = headers + + def __iter__(self): + return versioned_bucket_lister(self.bucket, prefix=self.prefix, + delimiter=self.delimiter, + key_marker=self.key_marker, + version_id_marker=self.version_id_marker, + headers=self.headers) + + diff --git a/vendor/boto/boto/s3/connection.py b/vendor/boto/boto/s3/connection.py new file mode 100644 index 000000000000..614de0b8613b --- /dev/null +++ b/vendor/boto/boto/s3/connection.py @@ -0,0 +1,350 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import urllib, base64 +import time +import boto.utils +from boto.connection import AWSAuthConnection +from boto import handler +from boto.s3.bucket import Bucket +from boto.s3.key import Key +from boto.resultset import ResultSet +from boto.exception import S3ResponseError, S3CreateError, BotoClientError + +def assert_case_insensitive(f): + def wrapper(*args, **kwargs): + if len(args) == 3 and not (args[2].islower() or args[2].isalnum()): + raise BotoClientError("Bucket names cannot contain upper-case " \ + "characters when using either the sub-domain or virtual " \ + "hosting calling format.") + return f(*args, **kwargs) + return wrapper + +class _CallingFormat: + def build_url_base(self, protocol, server, bucket, key=''): + url_base = '%s://' % protocol + url_base += self.build_host(server, bucket) + url_base += self.build_path_base(bucket, key) + return url_base + + def build_host(self, server, bucket): + if bucket == '': + return server + else: + return self.get_bucket_server(server, bucket) + + def build_auth_path(self, bucket, key=''): + path = '' + if bucket != '': + path = '/' + bucket + return path + '/%s' % urllib.quote(key) + + def build_path_base(self, bucket, key=''): + return '/%s' % urllib.quote(key) + +class SubdomainCallingFormat(_CallingFormat): + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return '%s.%s' % (bucket, server) + +class VHostCallingFormat(_CallingFormat): + @assert_case_insensitive + def get_bucket_server(self, server, bucket): + return bucket + +class OrdinaryCallingFormat(_CallingFormat): + def get_bucket_server(self, server, bucket): + return server + + def build_path_base(self, bucket, key=''): + path_base = '/' + if bucket: + path_base += "%s/" % bucket + return path_base + urllib.quote(key) + +class Location: + DEFAULT = '' + EU = 'EU' + USWest = 'us-west-1' + +#boto.set_stream_logger('s3') + +class S3Connection(AWSAuthConnection): + + DefaultHost = 's3.amazonaws.com' + QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, + host=DefaultHost, debug=0, https_connection_factory=None, + calling_format=SubdomainCallingFormat(), path='/'): + self.calling_format = calling_format + AWSAuthConnection.__init__(self, host, + aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + debug=debug, https_connection_factory=https_connection_factory, + path=path) + + def __iter__(self): + return self.get_all_buckets() + + def __contains__(self, bucket_name): + return not (self.lookup(bucket_name) is None) + + def build_post_policy(self, expiration_time, conditions): + """ + Taken from the AWS book Python examples and modified for use with boto + """ + if type(expiration_time) != time.struct_time: + raise 'Policy document must include a valid expiration Time object' + + # Convert conditions object mappings to condition statements + + return '{"expiration": "%s",\n"conditions": [%s]}' % \ + (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions)) + + + def build_post_form_args(self, bucket_name, key, expires_in = 6000, + acl = None, success_action_redirect = None, max_content_length = None, + http_method = "http", fields=None, conditions=None): + """ + Taken from the AWS book Python examples and modified for use with boto + This only returns the arguments required for the post form, not the actual form + This does not return the file input field which also needs to be added + + :param bucket_name: Bucket to submit to + :type bucket_name: string + + :param key: Key name, optionally add ${filename} to the end to attach the submitted filename + :type key: string + + :param expires_in: Time (in seconds) before this expires, defaults to 6000 + :type expires_in: integer + + :param acl: ACL rule to use, if any + :type acl: :class:`boto.s3.acl.ACL` + + :param success_action_redirect: URL to redirect to on success + :type success_action_redirect: string + + :param max_content_length: Maximum size for this file + :type max_content_length: integer + + :type http_method: string + :param http_method: HTTP Method to use, "http" or "https" + + + :rtype: dict + :return: A dictionary containing field names/values as well as a url to POST to + + .. code-block:: python + + { + "action": action_url_to_post_to, + "fields": [ + { + "name": field_name, + "value": field_value + }, + { + "name": field_name2, + "value": field_value2 + } + ] + } + + """ + if fields == None: + fields = [] + if conditions == None: + conditions = [] + expiration = time.gmtime(int(time.time() + expires_in)) + + # Generate policy document + conditions.append('{"bucket": "%s"}' % bucket_name) + if key.endswith("${filename}"): + conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")]) + else: + conditions.append('{"key": "%s"}' % key) + if acl: + conditions.append('{"acl": "%s"}' % acl) + fields.append({ "name": "acl", "value": acl}) + if success_action_redirect: + conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect) + fields.append({ "name": "success_action_redirect", "value": success_action_redirect}) + if max_content_length: + conditions.append('["content-length-range", 0, %i]' % max_content_length) + fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length}) + + policy = self.build_post_policy(expiration, conditions) + + # Add the base64-encoded policy document as the 'policy' field + policy_b64 = base64.b64encode(policy) + fields.append({"name": "policy", "value": policy_b64}) + + # Add the AWS access key as the 'AWSAccessKeyId' field + fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id}) + + # Add signature for encoded policy document as the 'AWSAccessKeyId' field + hmac_copy = self.hmac.copy() + hmac_copy.update(policy_b64) + signature = base64.encodestring(hmac_copy.digest()).strip() + fields.append({"name": "signature", "value": signature}) + fields.append({"name": "key", "value": key}) + + # HTTPS protocol will be used if the secure HTTP option is enabled. + url = '%s://%s.s3.amazonaws.com/' % (http_method, bucket_name) + + return {"action": url, "fields": fields} + + + def generate_url(self, expires_in, method, bucket='', key='', + headers=None, query_auth=True, force_http=False): + if not headers: + headers = {} + expires = int(time.time() + expires_in) + auth_path = self.calling_format.build_auth_path(bucket, key) + canonical_str = boto.utils.canonical_string(method, auth_path, + headers, expires) + hmac_copy = self.hmac.copy() + hmac_copy.update(canonical_str) + b64_hmac = base64.encodestring(hmac_copy.digest()).strip() + encoded_canonical = urllib.quote_plus(b64_hmac) + self.calling_format.build_path_base(bucket, key) + if query_auth: + query_part = '?' + self.QueryString % (encoded_canonical, expires, + self.aws_access_key_id) + if 'x-amz-security-token' in headers: + query_part += '&x-amz-security-token=%s' % urllib.quote(headers['x-amz-security-token']); + else: + query_part = '' + if force_http: + protocol = 'http' + port = 80 + else: + protocol = self.protocol + port = self.port + return self.calling_format.build_url_base(protocol, self.server_name(port), + bucket, key) + query_part + + def get_all_buckets(self, headers=None): + response = self.make_request('GET') + body = response.read() + if response.status > 300: + raise S3ResponseError(response.status, response.reason, body, headers=headers) + rs = ResultSet([('Bucket', Bucket)]) + h = handler.XmlHandler(rs, self) + xml.sax.parseString(body, h) + return rs + + def get_canonical_user_id(self, headers=None): + """ + Convenience method that returns the "CanonicalUserID" of the user who's credentials + are associated with the connection. The only way to get this value is to do a GET + request on the service which returns all buckets associated with the account. As part + of that response, the canonical userid is returned. This method simply does all of + that and then returns just the user id. + + :rtype: string + :return: A string containing the canonical user id. + """ + rs = self.get_all_buckets(headers=headers) + return rs.ID + + def get_bucket(self, bucket_name, validate=True, headers=None): + bucket = Bucket(self, bucket_name) + if validate: + bucket.get_all_keys(headers, maxkeys=0) + return bucket + + def lookup(self, bucket_name, validate=True, headers=None): + try: + bucket = self.get_bucket(bucket_name, validate, headers=headers) + except: + bucket = None + return bucket + + def create_bucket(self, bucket_name, headers=None, + location=Location.DEFAULT, policy=None): + """ + Creates a new located bucket. By default it's in the USA. You can pass + Location.EU to create an European bucket. + + :type bucket_name: string + :param bucket_name: The name of the new bucket + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type location: :class:`boto.s3.connection.Location` + :param location: The location of the new bucket + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key in S3. + + """ + # TODO: Not sure what Exception Type from boto.exception to use. + if not bucket_name.islower(): + raise Exception("Bucket names must be lower case.") + + if policy: + if headers: + headers['x-amz-acl'] = policy + else: + headers = {'x-amz-acl' : policy} + if location == Location.DEFAULT: + data = '' + else: + data = '' + \ + location + '' + response = self.make_request('PUT', bucket_name, headers=headers, + data=data) + body = response.read() + if response.status == 409: + raise S3CreateError(response.status, response.reason, body) + if response.status == 200: + return Bucket(self, bucket_name) + else: + raise S3ResponseError(response.status, response.reason, body) + + def delete_bucket(self, bucket, headers=None): + response = self.make_request('DELETE', bucket, headers=headers) + body = response.read() + if response.status != 204: + raise S3ResponseError(response.status, response.reason, body) + + def make_request(self, method, bucket='', key='', headers=None, data='', + query_args=None, sender=None): + if isinstance(bucket, Bucket): + bucket = bucket.name + if isinstance(key, Key): + key = key.name + path = self.calling_format.build_path_base(bucket, key) + auth_path = self.calling_format.build_auth_path(bucket, key) + host = self.calling_format.build_host(self.server_name(), bucket) + if query_args: + path += '?' + query_args + auth_path += '?' + query_args + return AWSAuthConnection.make_request(self, method, path, headers, + data, host, auth_path, sender) + diff --git a/vendor/boto/boto/s3/deletemarker.py b/vendor/boto/boto/s3/deletemarker.py new file mode 100644 index 000000000000..3462d42eebb7 --- /dev/null +++ b/vendor/boto/boto/s3/deletemarker.py @@ -0,0 +1,56 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + +class DeleteMarker: + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.is_latest = False + self.last_modified = None + self.owner = None + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value.encode('utf-8') + elif name == 'IsLatest': + if value == 'true': + self.is_lastest = True + else: + self.is_latest = False + elif name == 'LastModified': + self.last_modified = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) + + diff --git a/vendor/boto/boto/s3/key.py b/vendor/boto/boto/s3/key.py new file mode 100644 index 000000000000..a0bf840d0bb5 --- /dev/null +++ b/vendor/boto/boto/s3/key.py @@ -0,0 +1,804 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import mimetypes +import os +import rfc822 +import StringIO +import base64 +import boto.utils +from boto.exception import S3ResponseError, S3DataError, BotoClientError +from boto.s3.user import User +from boto import UserAgent + +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + + +class Key(object): + + DefaultContentType = 'application/octet-stream' + + BufferSize = 8192 + + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + self.metadata = {} + self.content_type = self.DefaultContentType + self.content_encoding = None + self.filename = None + self.etag = None + self.last_modified = None + self.owner = None + self.storage_class = None + self.md5 = None + self.base64md5 = None + self.path = None + self.resp = None + self.mode = None + self.size = None + self.version_id = None + self.source_version_id = None + self.delete_marker = False + + def __repr__(self): + if self.bucket: + return '' % (self.bucket.name, self.name) + else: + return '' % self.name + + def __getattr__(self, name): + if name == 'key': + return self.name + else: + raise AttributeError + + def __setattr__(self, name, value): + if name == 'key': + self.__dict__['name'] = value + else: + self.__dict__[name] = value + + def __iter__(self): + return self + + def handle_version_headers(self, resp): + self.version_id = resp.getheader('x-amz-version-id', None) + self.source_version_id = resp.getheader('x-amz-copy-source-version-id', None) + if resp.getheader('x-amz-delete-marker', 'false') == 'true': + self.delete_marker = True + else: + self.delete_marker = False + + def open_read(self, headers=None, query_args=None): + """ + Open this key for reading + + :type headers: dict + :param headers: Headers to pass in the web request + + :type query_args: string + :param query_args: Arguments to pass in the query string (ie, 'torrent') + """ + if self.resp == None: + self.mode = 'r' + + self.resp = self.bucket.connection.make_request('GET', + self.bucket.name, + self.name, headers, + query_args=query_args) + if self.resp.status < 199 or self.resp.status > 299: + body = self.resp.read() + raise S3ResponseError(self.resp.status, self.resp.reason, body) + response_headers = self.resp.msg + self.metadata = boto.utils.get_aws_metadata(response_headers) + for name,value in response_headers.items(): + if name.lower() == 'content-length': + self.size = int(value) + elif name.lower() == 'etag': + self.etag = value + elif name.lower() == 'content-type': + self.content_type = value + elif name.lower() == 'content-encoding': + self.content_encoding = value + elif name.lower() == 'last-modified': + self.last_modified = value + self.handle_version_headers(self.resp) + + def open_write(self, headers=None): + """ + Open this key for writing. + Not yet implemented + + :type headers: dict + :param headers: Headers to pass in the write request + """ + raise BotoClientError('Not Implemented') + + def open(self, mode='r', headers=None, query_args=None): + if mode == 'r': + self.mode = 'r' + self.open_read(headers=headers, query_args=query_args) + elif mode == 'w': + self.mode = 'w' + self.open_write(headers=headers) + else: + raise BotoClientError('Invalid mode: %s' % mode) + + closed = False + def close(self): + if self.resp: + self.resp.read() + self.resp = None + self.mode = None + self.closed = True + + def next(self): + """ + By providing a next method, the key object supports use as an iterator. + For example, you can now say: + + for bytes in key: + write bytes to a file or whatever + + All of the HTTP connection stuff is handled for you. + """ + self.open_read() + data = self.resp.read(self.BufferSize) + if not data: + self.close() + raise StopIteration + return data + + def read(self, size=0): + if size == 0: + size = self.BufferSize + self.open_read() + data = self.resp.read(size) + if not data: + self.close() + return data + + def copy(self, dst_bucket, dst_key, metadata=None): + """ + Copy this Key to another bucket. + + :type dst_bucket: string + :param dst_bucket: The name of the destination bucket + + :type dst_key: string + :param dst_key: The name of the destinatino key + + :type metadata: dict + :param metadata: Metadata to be associated with new key. + If metadata is supplied, it will replace the + metadata of the source key being copied. + If no metadata is supplied, the source key's + metadata will be copied to the new key. + + :rtype: :class:`boto.s3.key.Key` or subclass + :returns: An instance of the newly created key object + """ + dst_bucket = self.bucket.connection.lookup(dst_bucket) + return dst_bucket.copy_key(dst_key, self.bucket.name, self.name, metadata) + + def startElement(self, name, attrs, connection): + if name == 'Owner': + self.owner = User(self) + return self.owner + else: + return None + + def endElement(self, name, value, connection): + if name == 'Key': + self.name = value.encode('utf-8') + elif name == 'ETag': + self.etag = value + elif name == 'LastModified': + self.last_modified = value + elif name == 'Size': + self.size = int(value) + elif name == 'StorageClass': + self.storage_class = value + elif name == 'Owner': + pass + elif name == 'VersionId': + self.version_id = value + else: + setattr(self, name, value) + + def exists(self): + """ + Returns True if the key exists + + :rtype: bool + :return: Whether the key exists on S3 + """ + return bool(self.bucket.lookup(self.name)) + + def delete(self): + """ + Delete this key from S3 + """ + return self.bucket.delete_key(self.name) + + def get_metadata(self, name): + return self.metadata.get(name) + + def set_metadata(self, name, value): + self.metadata[name] = value + + def update_metadata(self, d): + self.metadata.update(d) + + # convenience methods for setting/getting ACL + def set_acl(self, acl_str, headers=None): + if self.bucket != None: + self.bucket.set_acl(acl_str, self.name, headers=headers) + + def get_acl(self, headers=None): + if self.bucket != None: + return self.bucket.get_acl(self.name, headers=headers) + + def get_xml_acl(self, headers=None): + if self.bucket != None: + return self.bucket.get_xml_acl(self.name, headers=headers) + + def set_xml_acl(self, acl_str, headers=None): + if self.bucket != None: + return self.bucket.set_xml_acl(acl_str, self.name, headers=headers) + + def set_canned_acl(self, acl_str, headers=None): + return self.bucket.set_canned_acl(acl_str, self.name, headers) + + def make_public(self, headers=None): + return self.bucket.set_canned_acl('public-read', self.name, headers) + + def generate_url(self, expires_in, method='GET', headers=None, + query_auth=True, force_http=False): + """ + Generate a URL to access this key. + + :type expires_in: int + :param expires_in: How long the url is valid for, in seconds + + :type method: string + :param method: The method to use for retrieving the file (default is GET) + + :type headers: dict + :param headers: Any headers to pass along in the request + + :type query_auth: bool + :param query_auth: + + :rtype: string + :return: The URL to access the key + """ + return self.bucket.connection.generate_url(expires_in, method, + self.bucket.name, self.name, + headers, query_auth, force_http) + + def send_file(self, fp, headers=None, cb=None, num_cb=10): + """ + Upload a file to a key into a bucket on S3. + + :type fp: file + :param fp: The file pointer to upload + + :type headers: dict + :param headers: The headers to pass along with the PUT request + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to S3 and the second representing + the total number of bytes that need to be transmitted. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + """ + def sender(http_conn, method, path, data, headers): + http_conn.putrequest(method, path) + for key in headers: + http_conn.putheader(key, headers[key]) + http_conn.endheaders() + fp.seek(0) + save_debug = self.bucket.connection.debug + self.bucket.connection.debug = 0 + if cb: + if num_cb > 2: + cb_count = self.size / self.BufferSize / (num_cb-2) + else: + cb_count = 0 + i = total_bytes = 0 + cb(total_bytes, self.size) + l = fp.read(self.BufferSize) + while len(l) > 0: + http_conn.send(l) + if cb: + total_bytes += len(l) + i += 1 + if i == cb_count: + cb(total_bytes, self.size) + i = 0 + l = fp.read(self.BufferSize) + if cb: + cb(total_bytes, self.size) + response = http_conn.getresponse() + body = response.read() + fp.seek(0) + self.bucket.connection.debug = save_debug + if response.status == 500 or response.status == 503 or \ + response.getheader('location'): + # we'll try again + return response + elif response.status >= 200 and response.status <= 299: + self.etag = response.getheader('etag') + if self.etag != '"%s"' % self.md5: + raise S3DataError('ETag from S3 did not match computed MD5') + return response + else: + raise S3ResponseError(response.status, response.reason, body) + + if not headers: + headers = {} + else: + headers = headers.copy() + headers['User-Agent'] = UserAgent + headers['Content-MD5'] = self.base64md5 + if headers.has_key('Content-Type'): + self.content_type = headers['Content-Type'] + elif self.path: + self.content_type = mimetypes.guess_type(self.path)[0] + if self.content_type == None: + self.content_type = self.DefaultContentType + headers['Content-Type'] = self.content_type + else: + headers['Content-Type'] = self.content_type + headers['Content-Length'] = str(self.size) + headers['Expect'] = '100-Continue' + headers = boto.utils.merge_meta(headers, self.metadata) + resp = self.bucket.connection.make_request('PUT', self.bucket.name, + self.name, headers, + sender=sender) + self.handle_version_headers(resp) + + def compute_md5(self, fp): + """ + :type fp: file + :param fp: File pointer to the file to MD5 hash. The file pointer will be + reset to the beginning of the file before the method returns. + + :rtype: tuple + :return: A tuple containing the hex digest version of the MD5 hash + as the first element and the base64 encoded version of the + plain digest as the second element. + """ + m = md5() + fp.seek(0) + s = fp.read(self.BufferSize) + while s: + m.update(s) + s = fp.read(self.BufferSize) + hex_md5 = m.hexdigest() + base64md5 = base64.encodestring(m.digest()) + if base64md5[-1] == '\n': + base64md5 = base64md5[0:-1] + self.size = fp.tell() + fp.seek(0) + return (hex_md5, base64md5) + + def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, + policy=None, md5=None): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file pointed to by 'fp' as the + contents. + + :type fp: file + :param fp: the file whose contents to upload + + :type headers: dict + :param headers: additional HTTP headers that will be sent with the PUT request. + + :type replace: bool + :param replace: If this parameter is False, the method + will first check to see if an object exists in the + bucket with the same key. If it does, it won't + overwrite it. The default value is True which will + overwrite the object. + + :type cb: function + :param cb: a callback function that will be called to report + progress on the upload. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted to S3 and the second representing + the total number of bytes that need to be transmitted. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 checksum of the + file as the first element and the Base64-encoded version of the plain + checksum as the second element. This is the same format returned by + the compute_md5 method. + :param md5: If you need to compute the MD5 for any reason prior to upload, + it's silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be computed. + """ + if policy: + if headers: + headers['x-amz-acl'] = policy + else: + headers = {'x-amz-acl' : policy} + if hasattr(fp, 'name'): + self.path = fp.name + if self.bucket != None: + if not md5: + md5 = self.compute_md5(fp) + self.md5 = md5[0] + self.base64md5 = md5[1] + if self.name == None: + self.name = self.md5 + if not replace: + k = self.bucket.lookup(self.name) + if k: + return + self.send_file(fp, headers, cb, num_cb) + + def set_contents_from_filename(self, filename, headers=None, replace=True, cb=None, num_cb=10, + policy=None, md5=None): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the contents of the file named by 'filename'. + See set_contents_from_file method for details about the + parameters. + + :type filename: string + :param filename: The name of the file that you want to put onto S3 + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if it already exists. + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 checksum of the + file as the first element and the Base64-encoded version of the plain + checksum as the second element. This is the same format returned by + the compute_md5 method. + :param md5: If you need to compute the MD5 for any reason prior to upload, + it's silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be computed. + """ + fp = open(filename, 'rb') + self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy) + fp.close() + + def set_contents_from_string(self, s, headers=None, replace=True, cb=None, num_cb=10, + policy=None, md5=None): + """ + Store an object in S3 using the name of the Key object as the + key in S3 and the string 's' as the contents. + See set_contents_from_file method for details about the + parameters. + + :type headers: dict + :param headers: Additional headers to pass along with the request to AWS. + + :type replace: bool + :param replace: If True, replaces the contents of the file if it already exists. + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type policy: :class:`boto.s3.acl.CannedACLStrings` + :param policy: A canned ACL policy that will be applied to the new key in S3. + + :type md5: A tuple containing the hexdigest version of the MD5 checksum of the + file as the first element and the Base64-encoded version of the plain + checksum as the second element. This is the same format returned by + the compute_md5 method. + :param md5: If you need to compute the MD5 for any reason prior to upload, + it's silly to have to do it twice so this param, if present, will be + used as the MD5 values of the file. Otherwise, the checksum will be computed. + """ + fp = StringIO.StringIO(s) + r = self.set_contents_from_file(fp, headers, replace, cb, num_cb, policy) + fp.close() + return r + + def get_file(self, fp, headers=None, cb=None, num_cb=10, + torrent=False, version_id=None): + """ + Retrieves a file from an S3 Key + + :type fp: file + :param fp: File pointer to put the data into + + :type headers: string + :param: headers to send when retrieving the files + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type torrent: bool + :param torrent: Flag for whether to get a torrent for the file + """ + if cb: + if num_cb > 2: + cb_count = self.size / self.BufferSize / (num_cb-2) + else: + cb_count = 0 + i = total_bytes = 0 + cb(total_bytes, self.size) + save_debug = self.bucket.connection.debug + if self.bucket.connection.debug == 1: + self.bucket.connection.debug = 0 + + query_args = '' + if torrent: + query_args = 'torrent' + elif version_id: + query_args = 'versionId=%s' % version_id + self.open('r', headers, query_args=query_args) + for bytes in self: + fp.write(bytes) + if cb: + total_bytes += len(bytes) + i += 1 + if i == cb_count: + cb(total_bytes, self.size) + i = 0 + if cb: + cb(total_bytes, self.size) + self.close() + self.bucket.connection.debug = save_debug + + def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10): + """ + Get a torrent file (see to get_file) + + :type fp: file + :param fp: The file pointer of where to put the torrent + + :type headers: dict + :param headers: Headers to be passed + + :type cb: function + :param cb: Callback function to call on retrieved data + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + """ + return self.get_file(fp, headers, cb, num_cb, torrent=True) + + def get_contents_to_file(self, fp, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Write the contents of the object to the file pointed + to by 'fp'. + + :type fp: File -like object + :param fp: + + :type headers: dict + :param headers: additional HTTP headers that will be sent with the GET request. + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file as a string. + + """ + if self.bucket != None: + self.get_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id) + + def get_contents_to_filename(self, filename, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Store contents of the object to a file named by 'filename'. + See get_contents_to_file method for details about the + parameters. + + :type filename: string + :param filename: The filename of where to put the file contents + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file as a string. + + """ + fp = open(filename, 'wb') + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id) + fp.close() + # if last_modified date was sent from s3, try to set file's timestamp + if self.last_modified != None: + try: + modified_tuple = rfc822.parsedate_tz(self.last_modified) + modified_stamp = int(rfc822.mktime_tz(modified_tuple)) + os.utime(fp.name, (modified_stamp, modified_stamp)) + except Exception: pass + + def get_contents_as_string(self, headers=None, + cb=None, num_cb=10, + torrent=False, + version_id=None): + """ + Retrieve an object from S3 using the name of the Key object as the + key in S3. Return the contents of the object as a string. + See get_contents_to_file method for details about the + parameters. + + :type headers: dict + :param headers: Any additional headers to send in the request + + :type cb: function + :param cb: (optional) a callback function that will be called to report + progress on the download. The callback should accept two integer + parameters, the first representing the number of bytes that have + been successfully transmitted from S3 and the second representing + the total number of bytes that need to be transmitted. + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + + :type cb: int + :param num_cb: (optional) If a callback is specified with the cb parameter + this parameter determines the granularity of the callback by defining + the maximum number of times the callback will be called during the file transfer. + + :type torrent: bool + :param torrent: If True, returns the contents of a torrent file as a string. + + :rtype: string + :returns: The contents of the file as a string + """ + fp = StringIO.StringIO() + self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent, + version_id=version_id) + return fp.getvalue() + + def add_email_grant(self, permission, email_address): + """ + Convenience method that provides a quick way to add an email grant to a key. + This method retrieves the current ACL, creates a new grant based on the parameters + passed in, adds that grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|WRITE|READ_ACP|WRITE_ACP|FULL_CONTROL + See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingAuthAccess.html + for more details on permissions. + + :type email_address: string + :param email_address: The email address associated with the AWS account your are granting + the permission to. + """ + policy = self.get_acl() + policy.acl.add_email_grant(permission, email_address) + self.set_acl(policy) + + def add_user_grant(self, permission, user_id): + """ + Convenience method that provides a quick way to add a canonical user grant to a key. + This method retrieves the current ACL, creates a new grant based on the parameters + passed in, adds that grant to the ACL and then PUT's the new ACL back to S3. + + :type permission: string + :param permission: The permission being granted. Should be one of: + READ|WRITE|READ_ACP|WRITE_ACP|FULL_CONTROL + See http://docs.amazonwebservices.com/AmazonS3/2006-03-01/UsingAuthAccess.html + for more details on permissions. + + :type user_id: string + :param user_id: The canonical user id associated with the AWS account your are granting + the permission to. + """ + policy = self.get_acl() + policy.acl.add_user_grant(permission, user_id) + self.set_acl(policy) diff --git a/vendor/boto/boto/s3/prefix.py b/vendor/boto/boto/s3/prefix.py new file mode 100644 index 000000000000..fc0f26ab58ae --- /dev/null +++ b/vendor/boto/boto/s3/prefix.py @@ -0,0 +1,35 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Prefix: + def __init__(self, bucket=None, name=None): + self.bucket = bucket + self.name = name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'Prefix': + self.name = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/s3/user.py b/vendor/boto/boto/s3/user.py new file mode 100644 index 000000000000..f45f03813052 --- /dev/null +++ b/vendor/boto/boto/s3/user.py @@ -0,0 +1,49 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class User: + def __init__(self, parent=None, id='', display_name=''): + if parent: + parent.owner = self + self.type = None + self.id = id + self.display_name = display_name + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DisplayName': + self.display_name = value + elif name == 'ID': + self.id = value + else: + setattr(self, name, value) + + def to_xml(self, element_name='Owner'): + if self.type: + s = '<%s xsi:type="%s">' % (element_name, self.type) + else: + s = '<%s>' % element_name + s += '%s' % self.id + s += '%s' % self.display_name + s += '' % element_name + return s diff --git a/vendor/boto/boto/sdb/__init__.py b/vendor/boto/boto/sdb/__init__.py new file mode 100644 index 000000000000..df1f95b3e222 --- /dev/null +++ b/vendor/boto/boto/sdb/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from regioninfo import SDBRegionInfo + + +def regions(): + """ + Get all available regions for the SDB service. + + :rtype: list + :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` + """ + return [SDBRegionInfo(name='us-east-1', endpoint='sdb.amazonaws.com'), + SDBRegionInfo(name='eu-west-1', endpoint='sdb.eu-west-1.amazonaws.com'), + SDBRegionInfo(name='us-west-1', endpoint='sdb.us-west-1.amazonaws.com')] + +def connect_to_region(region_name): + for region in regions(): + if region.name == region_name: + return region.connect() + return None diff --git a/vendor/boto/boto/sdb/connection.py b/vendor/boto/boto/sdb/connection.py new file mode 100644 index 000000000000..0824f5cb4827 --- /dev/null +++ b/vendor/boto/boto/sdb/connection.py @@ -0,0 +1,441 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import xml.sax +import threading +from boto import handler +from boto.connection import AWSQueryConnection +from boto.sdb.domain import Domain, DomainMetaData +from boto.sdb.item import Item +from boto.sdb.regioninfo import SDBRegionInfo +from boto.exception import SDBResponseError +from boto.resultset import ResultSet +import warnings + + +class ItemThread(threading.Thread): + + def __init__(self, name, domain_name, item_names): + threading.Thread.__init__(self, name=name) + print 'starting %s with %d items' % (name, len(item_names)) + self.domain_name = domain_name + self.conn = SDBConnection() + self.item_names = item_names + self.items = [] + + def run(self): + for item_name in self.item_names: + item = self.conn.get_attributes(self.domain_name, item_name) + self.items.append(item) + +#boto.set_stream_logger('sdb') + +class SDBConnection(AWSQueryConnection): + + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sdb.amazonaws.com' + APIVersion = '2009-04-15' + SignatureVersion = '2' + ResponseError = SDBResponseError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', converter=None): + if not region: + region = SDBRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, https_connection_factory, path) + self.box_usage = 0.0 + self.converter = converter + self.item_cls = Item + + def set_item_cls(self, cls): + self.item_cls = cls + + def build_name_value_list(self, params, attributes, replace=False, + label='Attribute'): + keys = attributes.keys() + keys.sort() + i = 1 + for key in keys: + value = attributes[key] + if isinstance(value, list): + for v in value: + params['%s.%d.Name'%(label,i)] = key + if self.converter: + v = self.converter.encode(v) + params['%s.%d.Value'%(label,i)] = v + if replace: + params['%s.%d.Replace'%(label,i)] = 'true' + i += 1 + else: + params['%s.%d.Name'%(label,i)] = key + if self.converter: + value = self.converter.encode(value) + params['%s.%d.Value'%(label,i)] = value + if replace: + params['%s.%d.Replace'%(label,i)] = 'true' + i += 1 + + def build_expected_value(self, params, expected_value): + params['Expected.1.Name'] = expected_value[0] + if expected_value[1] == True: + params['Expected.1.Exists'] = 'true' + elif expected_value[1] == False: + params['Expected.1.Exists'] = 'false' + else: + params['Expected.1.Value'] = expected_value[1] + + + def build_batch_list(self, params, items, replace=False): + item_names = items.keys() + i = 0 + for item_name in item_names: + j = 0 + item = items[item_name] + attr_names = item.keys() + params['Item.%d.ItemName' % i] = item_name + for attr_name in attr_names: + value = item[attr_name] + if isinstance(value, list): + for v in value: + if self.converter: + v = self.converter.encode(v) + params['Item.%d.Attribute.%d.Name' % (i,j)] = attr_name + params['Item.%d.Attribute.%d.Value' % (i,j)] = v + if replace: + params['Item.%d.Attribute.%d.Replace' % (i,j)] = 'true' + j += 1 + else: + params['Item.%d.Attribute.%d.Name' % (i,j)] = attr_name + if self.converter: + value = self.converter.encode(value) + params['Item.%d.Attribute.%d.Value' % (i,j)] = value + if replace: + params['Item.%d.Attribute.%d.Replace' % (i,j)] = 'true' + j += 1 + i += 1 + + def build_name_list(self, params, attribute_names): + i = 1 + attribute_names.sort() + for name in attribute_names: + params['Attribute.%d.Name'%i] = name + i += 1 + + def get_usage(self): + """ + Returns the BoxUsage accumulated on this SDBConnection object. + + :rtype: float + :return: The accumulated BoxUsage of all requests made on the connection. + """ + return self.box_usage + + def print_usage(self): + """ + Print the BoxUsage and approximate costs of all requests made on this connection. + """ + print 'Total Usage: %f compute seconds' % self.box_usage + cost = self.box_usage * 0.14 + print 'Approximate Cost: $%f' % cost + + def get_domain(self, domain_name, validate=True): + domain = Domain(self, domain_name) + if validate: + self.select(domain, """select * from `%s` limit 1""" % domain_name) + return domain + + def lookup(self, domain_name, validate=True): + """ + Lookup an existing SimpleDB domain + + :type domain_name: string + :param domain_name: The name of the new domain + + :rtype: :class:`boto.sdb.domain.Domain` object or None + :return: The Domain object or None if the domain does not exist. + """ + try: + domain = self.get_domain(domain_name, validate) + except: + domain = None + return domain + + def get_all_domains(self, max_domains=None, next_token=None): + params = {} + if max_domains: + params['MaxNumberOfDomains'] = max_domains + if next_token: + params['NextToken'] = next_token + return self.get_list('ListDomains', params, [('DomainName', Domain)]) + + def create_domain(self, domain_name): + """ + Create a SimpleDB domain. + + :type domain_name: string + :param domain_name: The name of the new domain + + :rtype: :class:`boto.sdb.domain.Domain` object + :return: The newly created domain + """ + params = {'DomainName':domain_name} + d = self.get_object('CreateDomain', params, Domain) + d.name = domain_name + return d + + def get_domain_and_name(self, domain_or_name): + if (isinstance(domain_or_name, Domain)): + return (domain_or_name, domain_or_name.name) + else: + return (self.get_domain(domain_or_name), domain_or_name) + + def delete_domain(self, domain_or_name): + """ + Delete a SimpleDB domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: bool + :return: True if successful + + B{Note:} This will delete the domain and all items within the domain. + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name} + return self.get_status('DeleteDomain', params) + + def domain_metadata(self, domain_or_name): + """ + Get the Metadata for a SimpleDB domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :rtype: :class:`boto.sdb.domain.DomainMetaData` object + :return: The newly created domain metadata object + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name} + d = self.get_object('DomainMetadata', params, DomainMetaData) + d.domain = domain + return d + + def put_attributes(self, domain_or_name, item_name, attributes, + replace=True, expected_value=None): + """ + Store attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. + The list can be of the form: + * ['name', 'value'] + In which case the call will first verify + that the attribute "name" of this item has + a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed + error will be returned. + The list can also be of the form: + * ['name', True|False] + which will simply check for the existence (True) + or non-existencve (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name, + 'ItemName' : item_name} + self.build_name_value_list(params, attributes, replace) + if expected_value: + self.build_expected_value(params, expected_value) + return self.get_status('PutAttributes', params) + + def batch_put_attributes(self, domain_or_name, items, replace=True): + """ + Store attributes for multiple items in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name} + self.build_batch_list(params, items, replace) + return self.get_status('BatchPutAttributes', params, verb='POST') + + def get_attributes(self, domain_or_name, item_name, attribute_names=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. This + parameter is optional. If not supplied, all attributes + will be retrieved for the item. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item mapping type containing the requested attribute name/values + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName' : domain_name, + 'ItemName' : item_name} + if consistent_read: + params['ConsistentRead'] = 'true' + if attribute_names: + if not isinstance(attribute_names, list): + attribute_names = [attribute_names] + self.build_list_params(params, attribute_names, 'AttributeName') + response = self.make_request('GetAttributes', params) + body = response.read() + if response.status == 200: + if item == None: + item = self.item_cls(domain, item_name) + h = handler.XmlHandler(item, self) + xml.sax.parseString(body, h) + return item + else: + raise SDBResponseError(response.status, response.reason, body) + + def delete_attributes(self, domain_or_name, item_name, attr_names=None, + expected_value=None): + """ + Delete attributes from a given item in a domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type item_name: string + :param item_name: The name of the item whose attributes are being deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which will cause + all values associated with that attribute name to be deleted or + a dict or Item containing the attribute names and keys and list + of values to delete as the value. If no value is supplied, + all attribute name/values for the item will be deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. + The list can be of the form: + * ['name', 'value'] + In which case the call will first verify + that the attribute "name" of this item has + a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed + error will be returned. + The list can also be of the form: + * ['name', True|False] + which will simply check for the existence (True) + or non-existencve (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'DomainName':domain_name, + 'ItemName' : item_name} + if attr_names: + if isinstance(attr_names, list): + self.build_name_list(params, attr_names) + elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls): + self.build_name_value_list(params, attr_names) + if expected_value: + self.build_expected_value(params, expected_value) + return self.get_status('DeleteAttributes', params) + + def select(self, domain_or_name, query='', next_token=None, + consistent_read=False): + """ + Returns a set of Attributes for item names within domain_name that match the query. + The query must be expressed in using the SELECT style syntax rather than the + original SimpleDB query language. + Even though the select request does not require a domain object, a domain + object must be passed into this method so the Item objects returned can + point to the appropriate domain. + + :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object. + :param domain_or_name: Either the name of a domain or a Domain object + + :type query: string + :param query: The SimpleDB query to be performed. + + :type consistent_read: bool + :param consistent_read: When set to true, ensures that the most recent + data is returned. + + :rtype: ResultSet + :return: An iterator containing the results. + """ + domain, domain_name = self.get_domain_and_name(domain_or_name) + params = {'SelectExpression' : query} + if consistent_read: + params['ConsistentRead'] = 'true' + if next_token: + params['NextToken'] = next_token + return self.get_list('Select', params, [('Item', self.item_cls)], + parent=domain) + diff --git a/vendor/boto/boto/sdb/db/__init__.py b/vendor/boto/boto/sdb/db/__init__.py new file mode 100644 index 000000000000..86044ed61b27 --- /dev/null +++ b/vendor/boto/boto/sdb/db/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + diff --git a/vendor/boto/boto/sdb/db/blob.py b/vendor/boto/boto/sdb/db/blob.py new file mode 100644 index 000000000000..8c0b66e14c3d --- /dev/null +++ b/vendor/boto/boto/sdb/db/blob.py @@ -0,0 +1,64 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +class Blob(object): + """Blob object""" + def __init__(self, value=None, file=None, id=None): + self._file = file + self.id = id + self.value = value + + @property + def file(self): + from StringIO import StringIO + if self._file: + f = self._file + else: + f = StringIO(self.value) + return f + + def __str__(self): + if hasattr(self.file, "get_contents_as_string"): + return str(self.file.get_contents_as_string()) + else: + return str(self.file.getvalue()) + + def read(self): + return self.file.read() + + def readline(self): + return self.file.readline() + + def next(self): + return self.file.next() + + def __iter__(self): + return iter(self.file) + + @property + def size(self): + if self._file: + return self._file.size + elif self.value: + return len(self.value) + else: + return 0 diff --git a/vendor/boto/boto/sdb/db/key.py b/vendor/boto/boto/sdb/db/key.py new file mode 100644 index 000000000000..42a9d8dae79f --- /dev/null +++ b/vendor/boto/boto/sdb/db/key.py @@ -0,0 +1,59 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Key(object): + + @classmethod + def from_path(cls, *args, **kwds): + raise NotImplementedError, "Paths are not currently supported" + + def __init__(self, encoded=None, obj=None): + self.name = None + if obj: + self.id = obj.id + self.kind = obj.kind() + else: + self.id = None + self.kind = None + + def app(self): + raise NotImplementedError, "Applications are not currently supported" + + def kind(self): + return self.kind + + def id(self): + return self.id + + def name(self): + raise NotImplementedError, "Key Names are not currently supported" + + def id_or_name(self): + return self.id + + def has_id_or_name(self): + return self.id != None + + def parent(self): + raise NotImplementedError, "Key parents are not currently supported" + + def __str__(self): + return self.id_or_name() diff --git a/vendor/boto/boto/sdb/db/manager/__init__.py b/vendor/boto/boto/sdb/db/manager/__init__.py new file mode 100644 index 000000000000..07777966007f --- /dev/null +++ b/vendor/boto/boto/sdb/db/manager/__init__.py @@ -0,0 +1,88 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto + +def get_manager(cls): + """ + Returns the appropriate Manager class for a given Model class. It does this by + looking in the boto config for a section like this:: + + [DB] + db_type = SimpleDB + db_user = + db_passwd = + db_name = my_domain + [DB_TestBasic] + db_type = SimpleDB + db_user = + db_passwd = + db_name = basic_domain + db_port = 1111 + + The values in the DB section are "generic values" that will be used if nothing more + specific is found. You can also create a section for a specific Model class that + gives the db info for that class. In the example above, TestBasic is a Model subclass. + """ + db_user = boto.config.get('DB', 'db_user', None) + db_passwd = boto.config.get('DB', 'db_passwd', None) + db_type = boto.config.get('DB', 'db_type', 'SimpleDB') + db_name = boto.config.get('DB', 'db_name', None) + db_table = boto.config.get('DB', 'db_table', None) + db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com") + db_port = boto.config.getint('DB', 'db_port', 443) + enable_ssl = boto.config.getbool('DB', 'enable_ssl', True) + sql_dir = boto.config.get('DB', 'sql_dir', None) + debug = boto.config.getint('DB', 'debug', 0) + # first see if there is a fully qualified section name in the Boto config file + module_name = cls.__module__.replace('.', '_') + db_section = 'DB_' + module_name + '_' + cls.__name__ + if not boto.config.has_section(db_section): + db_section = 'DB_' + cls.__name__ + if boto.config.has_section(db_section): + db_user = boto.config.get(db_section, 'db_user', db_user) + db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd) + db_type = boto.config.get(db_section, 'db_type', db_type) + db_name = boto.config.get(db_section, 'db_name', db_name) + db_table = boto.config.get(db_section, 'db_table', db_table) + db_host = boto.config.get(db_section, 'db_host', db_host) + db_port = boto.config.getint(db_section, 'db_port', db_port) + enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl) + debug = boto.config.getint(db_section, 'debug', debug) + elif hasattr(cls.__bases__[0], "_manager"): + return cls.__bases__[0]._manager + if db_type == 'SimpleDB': + from sdbmanager import SDBManager + return SDBManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + elif db_type == 'PostgreSQL': + from pgmanager import PGManager + if db_table: + return PGManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + else: + return None + elif db_type == 'XML': + from xmlmanager import XMLManager + return XMLManager(cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl) + else: + raise ValueError, 'Unknown db_type: %s' % db_type + diff --git a/vendor/boto/boto/sdb/db/manager/pgmanager.py b/vendor/boto/boto/sdb/db/manager/pgmanager.py new file mode 100644 index 000000000000..73a93f0ec5ca --- /dev/null +++ b/vendor/boto/boto/sdb/db/manager/pgmanager.py @@ -0,0 +1,389 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +import psycopg2 +import psycopg2.extensions +import uuid +import os +import string +from boto.exception import SDBPersistenceError + +psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) + +class PGConverter: + + def __init__(self, manager): + self.manager = manager + self.type_map = {Key : (self.encode_reference, self.decode_reference), + Model : (self.encode_reference, self.decode_reference)} + + def encode(self, type, value): + if type in self.type_map: + encode = self.type_map[type][0] + return encode(value) + return value + + def decode(self, type, value): + if type in self.type_map: + decode = self.type_map[type][1] + return decode(value) + return value + + def encode_prop(self, prop, value): + if isinstance(value, list): + if hasattr(prop, 'item_type'): + s = "{" + new_value = [] + for v in value: + item_type = getattr(prop, 'item_type') + if Model in item_type.mro(): + item_type = Model + new_value.append('%s' % self.encode(item_type, v)) + s += ','.join(new_value) + s += "}" + return s + else: + return value + return self.encode(prop.data_type, value) + + def decode_prop(self, prop, value): + if prop.data_type == list: + if value != None: + if not isinstance(value, list): + value = [value] + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + if item_type != self.manager.cls: + return item_type._manager.decode_value(prop, value) + else: + item_type = Model + return [self.decode(item_type, v) for v in value] + return value + elif hasattr(prop, 'reference_class'): + ref_class = getattr(prop, 'reference_class') + if ref_class != self.manager.cls: + return ref_class._manager.decode_value(prop, value) + else: + return self.decode(prop.data_type, value) + elif hasattr(prop, 'calculated_type'): + calc_type = getattr(prop, 'calculated_type') + return self.decode(calc_type, value) + else: + return self.decode(prop.data_type, value) + + def encode_reference(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + return value.id + + def decode_reference(self, value): + if not value: + return None + try: + return self.manager.get_object_from_id(value) + except: + raise ValueError, 'Unable to convert %s to Object' % value + +class PGManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, sql_dir, enable_ssl): + self.cls = cls + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.sql_dir = sql_dir + self.in_transaction = False + self.converter = PGConverter(self) + self._connect() + + def _build_connect_string(self): + cs = 'dbname=%s user=%s password=%s host=%s port=%d' + return cs % (self.db_name, self.db_user, self.db_passwd, + self.db_host, self.db_port) + + def _connect(self): + self.connection = psycopg2.connect(self._build_connect_string()) + self.connection.set_client_encoding('UTF8') + self.cursor = self.connection.cursor() + + def _object_lister(self, cursor): + try: + for row in cursor: + yield self._object_from_row(row, cursor.description) + except StopIteration: + cursor.close() + raise StopIteration + + def _dict_from_row(self, row, description): + d = {} + for i in range(0, len(row)): + d[description[i][0]] = row[i] + return d + + def _object_from_row(self, row, description=None): + if not description: + description = self.cursor.description + d = self._dict_from_row(row, description) + obj = self.cls(d['id']) + obj._manager = self + obj._auto_update = False + for prop in obj.properties(hidden=False): + if prop.data_type != Key: + v = self.decode_value(prop, d[prop.name]) + v = prop.make_value_from_datastore(v) + if hasattr(prop, 'calculated_type'): + prop._set_direct(obj, v) + elif not prop.empty(v): + setattr(obj, prop.name, v) + else: + setattr(obj, prop.name, prop.default_value()) + return obj + + def _build_insert_qs(self, obj, calculated): + fields = [] + values = [] + templs = [] + id_calculated = [p for p in calculated if p.name == 'id'] + for prop in obj.properties(hidden=False): + if prop not in calculated: + value = prop.get_value_for_datastore(obj) + if value != prop.default_value() or prop.required: + value = self.encode_value(prop, value) + values.append(value) + fields.append('"%s"' % prop.name) + templs.append('%s') + qs = 'INSERT INTO "%s" (' % self.db_table + if len(id_calculated) == 0: + qs += '"id",' + qs += ','.join(fields) + qs += ") VALUES (" + if len(id_calculated) == 0: + qs += "'%s'," % obj.id + qs += ','.join(templs) + qs += ')' + if calculated: + qs += ' RETURNING ' + calc_values = ['"%s"' % p.name for p in calculated] + qs += ','.join(calc_values) + qs += ';' + return qs, values + + def _build_update_qs(self, obj, calculated): + fields = [] + values = [] + for prop in obj.properties(hidden=False): + if prop not in calculated: + value = prop.get_value_for_datastore(obj) + if value != prop.default_value() or prop.required: + value = self.encode_value(prop, value) + values.append(value) + field = '"%s"=' % prop.name + field += '%s' + fields.append(field) + qs = 'UPDATE "%s" SET ' % self.db_table + qs += ','.join(fields) + qs += """ WHERE "id" = '%s'""" % obj.id + if calculated: + qs += ' RETURNING ' + calc_values = ['"%s"' % p.name for p in calculated] + qs += ','.join(calc_values) + qs += ';' + return qs, values + + def _get_sql(self, mapping=None): + print '_get_sql' + sql = None + if self.sql_dir: + path = os.path.join(self.sql_dir, self.cls.__name__ + '.sql') + print path + if os.path.isfile(path): + fp = open(path) + sql = fp.read() + fp.close() + t = string.Template(sql) + sql = t.safe_substitute(mapping) + return sql + + def start_transaction(self): + print 'start_transaction' + self.in_transaction = True + + def end_transaction(self): + print 'end_transaction' + self.in_transaction = False + self.commit() + + def commit(self): + if not self.in_transaction: + print '!!commit on %s' % self.db_table + try: + self.connection.commit() + + except psycopg2.ProgrammingError, err: + self.connection.rollback() + raise err + + def rollback(self): + print '!!rollback on %s' % self.db_table + self.connection.rollback() + + def delete_table(self): + self.cursor.execute('DROP TABLE "%s";' % self.db_table) + self.commit() + + def create_table(self, mapping=None): + self.cursor.execute(self._get_sql(mapping)) + self.commit() + + def encode_value(self, prop, value): + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def execute_sql(self, query): + self.cursor.execute(query, None) + self.commit() + + def query_sql(self, query, vars=None): + self.cursor.execute(query, vars) + return self.cursor.fetchall() + + def lookup(self, cls, name, value): + values = [] + qs = 'SELECT * FROM "%s" WHERE ' % self.db_table + found = False + for property in cls.properties(hidden=False): + if property.name == name: + found = True + value = self.encode_value(property, value) + values.append(value) + qs += "%s=" % name + qs += "%s" + if not found: + raise SDBPersistenceError('%s is not a valid field' % name) + qs += ';' + print qs + self.cursor.execute(qs, values) + if self.cursor.rowcount == 1: + row = self.cursor.fetchone() + return self._object_from_row(row, self.cursor.description) + elif self.cursor.rowcount == 0: + raise KeyError, 'Object not found' + else: + raise LookupError, 'Multiple Objects Found' + + def query(self, cls, filters, limit=None, order_by=None): + parts = [] + qs = 'SELECT * FROM "%s"' % self.db_table + if filters: + qs += ' WHERE ' + properties = cls.properties(hidden=False) + for filter, value in filters: + name, op = filter.strip().split() + found = False + for property in properties: + if property.name == name: + found = True + value = self.encode_value(property, value) + parts.append(""""%s"%s'%s'""" % (name, op, value)) + if not found: + raise SDBPersistenceError('%s is not a valid field' % name) + qs += ','.join(parts) + qs += ';' + print qs + cursor = self.connection.cursor() + cursor.execute(qs) + return self._object_lister(cursor) + + def get_property(self, prop, obj, name): + qs = """SELECT "%s" FROM "%s" WHERE id='%s';""" % (name, self.db_table, obj.id) + print qs + self.cursor.execute(qs, None) + if self.cursor.rowcount == 1: + rs = self.cursor.fetchone() + for prop in obj.properties(hidden=False): + if prop.name == name: + v = self.decode_value(prop, rs[0]) + return v + raise AttributeError, '%s not found' % name + + def set_property(self, prop, obj, name, value): + pass + value = self.encode_value(prop, value) + qs = 'UPDATE "%s" SET ' % self.db_table + qs += "%s='%s'" % (name, self.encode_value(prop, value)) + qs += " WHERE id='%s'" % obj.id + qs += ';' + print qs + self.cursor.execute(qs) + self.commit() + + def get_object(self, cls, id): + qs = """SELECT * FROM "%s" WHERE id='%s';""" % (self.db_table, id) + self.cursor.execute(qs, None) + if self.cursor.rowcount == 1: + row = self.cursor.fetchone() + return self._object_from_row(row, self.cursor.description) + else: + raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id)) + + def get_object_from_id(self, id): + return self.get_object(self.cls, id) + + def _find_calculated_props(self, obj): + return [p for p in obj.properties() if hasattr(p, 'calculated_type')] + + def save_object(self, obj): + obj._auto_update = False + calculated = self._find_calculated_props(obj) + if not obj.id: + obj.id = str(uuid.uuid4()) + qs, values = self._build_insert_qs(obj, calculated) + else: + qs, values = self._build_update_qs(obj, calculated) + print qs + self.cursor.execute(qs, values) + if calculated: + calc_values = self.cursor.fetchone() + print calculated + print calc_values + for i in range(0, len(calculated)): + prop = calculated[i] + prop._set_direct(obj, calc_values[i]) + self.commit() + + def delete_object(self, obj): + qs = """DELETE FROM "%s" WHERE id='%s';""" % (self.db_table, obj.id) + print qs + self.cursor.execute(qs) + self.commit() + + diff --git a/vendor/boto/boto/sdb/db/manager/sdbmanager.py b/vendor/boto/boto/sdb/db/manager/sdbmanager.py new file mode 100644 index 000000000000..fec24273fa72 --- /dev/null +++ b/vendor/boto/boto/sdb/db/manager/sdbmanager.py @@ -0,0 +1,599 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +import re +from boto.utils import find_class +import uuid +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +from boto.sdb.db.blob import Blob +from boto.sdb.db.property import ListProperty, MapProperty +from datetime import datetime +from boto.exception import SDBPersistenceError + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + + +class SDBConverter: + """ + Responsible for converting base Python types to format compatible with underlying + database. For SimpleDB, that means everything needs to be converted to a string + when stored in SimpleDB and from a string when retrieved. + + To convert a value, pass it to the encode or decode method. The encode method + will take a Python native value and convert to DB format. The decode method will + take a DB format value and convert it to Python native format. To find the appropriate + method to call, the generic encode/decode methods will look for the type-specific + method by searching for a method called "encode_" or "decode_". + """ + def __init__(self, manager): + self.manager = manager + self.type_map = { bool : (self.encode_bool, self.decode_bool), + int : (self.encode_int, self.decode_int), + long : (self.encode_long, self.decode_long), + float : (self.encode_float, self.decode_float), + Model : (self.encode_reference, self.decode_reference), + Key : (self.encode_reference, self.decode_reference), + datetime : (self.encode_datetime, self.decode_datetime), + Blob: (self.encode_blob, self.decode_blob), + } + + def encode(self, item_type, value): + try: + if Model in item_type.mro(): + item_type = Model + except: + pass + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + return value + + def encode_list(self, prop, value): + if value == None: + return None + if not isinstance(value, list): + # This is a little trick to avoid encoding when it's just a single value, + # since that most likely means it's from a query + item_type = getattr(prop, "item_type") + return self.encode(item_type, value) + # Just enumerate(value) won't work here because + # we need to add in some zero padding + # We support lists up to 1,000 attributes, since + # SDB technically only supports 1024 attributes anyway + values = {} + for k,v in enumerate(value): + values["%03d" % k] = v + return self.encode_map(prop, values) + + def encode_map(self, prop, value): + if value == None: + return None + if not isinstance(value, dict): + raise ValueError, 'Expected a dict value, got %s' % type(value) + new_value = [] + for key in value: + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + encoded_value = self.encode(item_type, value[key]) + if encoded_value != None and encoded_value != "None": + new_value.append('%s:%s' % (key, encoded_value)) + return new_value + + def encode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.encode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.encode_map(prop, value) + else: + return self.encode(prop.data_type, value) + + def decode_list(self, prop, value): + if not isinstance(value, list): + value = [value] + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + dec_val = {} + for val in value: + k,v = self.decode_map_element(item_type, val) + try: + k = int(k) + except: + k = v + dec_val[k] = v + value = dec_val.values() + return value + + def decode_map(self, prop, value): + if not isinstance(value, list): + value = [value] + ret_value = {} + item_type = getattr(prop, "item_type") + for val in value: + k,v = self.decode_map_element(item_type, val) + ret_value[k] = v + return ret_value + + def decode_map_element(self, item_type, value): + """Decode a single element for a map""" + key = value + if ":" in value: + key, value = value.split(':',1) + if Model in item_type.mro(): + value = item_type(id=value) + else: + value = self.decode(item_type, value) + return (key, value) + + def decode_prop(self, prop, value): + if isinstance(prop, ListProperty): + return self.decode_list(prop, value) + elif isinstance(prop, MapProperty): + return self.decode_map(prop, value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + value += 2147483648 + return '%010d' % value + + def decode_int(self, value): + try: + value = int(value) + except: + boto.log.error("Error, %s is not an integer" % value) + value = 0 + value = int(value) + value -= 2147483648 + return int(value) + + def encode_long(self, value): + value = long(value) + value += 9223372036854775808 + return '%020d' % value + + def decode_long(self, value): + value = long(value) + value -= 9223372036854775808 + return value + + def encode_bool(self, value): + if value == True: + return 'true' + else: + return 'false' + + def decode_bool(self, value): + if value.lower() == 'true': + return True + else: + return False + + def encode_float(self, value): + """ + See http://tools.ietf.org/html/draft-wood-ldapext-float-00. + """ + s = '%e' % value + l = s.split('e') + mantissa = l[0].ljust(18, '0') + exponent = l[1] + if value == 0.0: + case = '3' + exponent = '000' + elif mantissa[0] != '-' and exponent[0] == '+': + case = '5' + exponent = exponent[1:].rjust(3, '0') + elif mantissa[0] != '-' and exponent[0] == '-': + case = '4' + exponent = 999 + int(exponent) + exponent = '%03d' % exponent + elif mantissa[0] == '-' and exponent[0] == '-': + case = '2' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = exponent[1:].rjust(3, '0') + else: + case = '1' + mantissa = '%f' % (10 + float(mantissa)) + mantissa = mantissa.ljust(18, '0') + exponent = 999 - int(exponent) + exponent = '%03d' % exponent + return '%s %s %s' % (case, exponent, mantissa) + + def decode_float(self, value): + case = value[0] + exponent = value[2:5] + mantissa = value[6:] + if case == '3': + return 0.0 + elif case == '5': + pass + elif case == '4': + exponent = '%03d' % (int(exponent) - 999) + elif case == '2': + mantissa = '%f' % (float(mantissa) - 10) + exponent = '-' + exponent + else: + mantissa = '%f' % (float(mantissa) - 10) + exponent = '%03d' % abs((int(exponent) - 999)) + return float(mantissa + 'e' + exponent) + + def encode_datetime(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + return value.strftime(ISO8601) + + def decode_datetime(self, value): + try: + return datetime.strptime(value, ISO8601) + except: + return None + + def encode_reference(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + return value.id + + def decode_reference(self, value): + if not value or value == "None": + return None + return value + + def encode_blob(self, value): + if not value: + return None + + if not value.id: + bucket = self.manager.get_blob_bucket() + key = bucket.new_key(str(uuid.uuid4())) + value.id = "s3://%s/%s" % (key.bucket.name, key.name) + else: + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + key = bucket.get_key(match.group(2)) + else: + raise SDBPersistenceError("Invalid Blob ID: %s" % value.id) + + if value.value != None: + key.set_contents_from_string(value.value) + return value.id + + + def decode_blob(self, value): + if not value: + return None + match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value) + if match: + s3 = self.manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + key = bucket.get_key(match.group(2)) + else: + return None + if key: + return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name)) + else: + return None + +class SDBManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl, consistent=None): + self.cls = cls + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.enable_ssl = enable_ssl + self.s3 = None + self.bucket = None + self.converter = SDBConverter(self) + self._sdb = None + self._domain = None + if consistent == None and hasattr(cls, "__consistent"): + consistent = cls.__consistent__ + self.consistent = consistent + + @property + def sdb(self): + if self._sdb is None: + self._connect() + return self._sdb + + @property + def domain(self): + if self._domain is None: + self._connect() + return self._domain + + def _connect(self): + self._sdb = boto.connect_sdb(aws_access_key_id=self.db_user, + aws_secret_access_key=self.db_passwd, + is_secure=self.enable_ssl) + # This assumes that the domain has already been created + # It's much more efficient to do it this way rather than + # having this make a roundtrip each time to validate. + # The downside is that if the domain doesn't exist, it breaks + self._domain = self._sdb.lookup(self.db_name, validate=False) + if not self._domain: + self._domain = self._sdb.create_domain(self.db_name) + + def _object_lister(self, cls, query_lister): + for item in query_lister: + obj = self.get_object(cls, item.name, item) + if obj: + yield obj + + def encode_value(self, prop, value): + if value == None: + return None + if not prop: + return str(value) + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.db_user, self.db_passwd) + return self.s3 + + def get_blob_bucket(self, bucket_name=None): + s3 = self.get_s3_connection() + bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name) + bucket_name = bucket_name.lower() + try: + self.bucket = s3.get_bucket(bucket_name) + except: + self.bucket = s3.create_bucket(bucket_name) + return self.bucket + + def load_object(self, obj): + if not obj._loaded: + a = self.domain.get_attributes(obj.id,consistent_read=self.consistent) + if a.has_key('__type__'): + for prop in obj.properties(hidden=False): + if a.has_key(prop.name): + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + try: + setattr(obj, prop.name, value) + except Exception, e: + boto.log.exception(e) + obj._loaded = True + + def get_object(self, cls, id, a=None): + obj = None + if not a: + a = self.domain.get_attributes(id,consistent_read=self.consistent) + if a.has_key('__type__'): + if not cls or a['__type__'] != cls.__name__: + cls = find_class(a['__module__'], a['__type__']) + if cls: + params = {} + for prop in cls.properties(hidden=False): + if a.has_key(prop.name): + value = self.decode_value(prop, a[prop.name]) + value = prop.make_value_from_datastore(value) + params[prop.name] = value + obj = cls(id, **params) + obj._loaded = True + else: + s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__']) + boto.log.info('sdbmanager: %s' % s) + return obj + + def get_object_from_id(self, id): + return self.get_object(None, id) + + def query(self, query): + query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by)) + if query.limit: + query_str += " limit %s" % query.limit + rs = self.domain.select(query_str, max_items=query.limit, next_token = query.next_token) + query.rs = rs + return self._object_lister(query.model_class, rs) + + def count(self, cls, filters): + """ + Get the number of results that would + be returned in this query + """ + query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters)) + count = int(self.domain.select(query).next()["Count"]) + return count + + + def _build_filter(self, property, name, op, val): + if val == None: + if op in ('is','='): + return "`%s` is null" % name + elif op in ('is not', '!='): + return "`%s` is not null" % name + else: + val = "" + if property.__class__ == ListProperty: + if op in ("is", "="): + op = "like" + elif op in ("!=", "not"): + op = "not like" + if not(op == "like" and val.startswith("%")): + val = "%%:%s" % val + return "`%s` %s '%s'" % (name, op, val.replace("'", "''")) + + def _build_filter_part(self, cls, filters, order_by=None): + """ + Build the filter part + """ + import types + query_parts = [] + order_by_filtered = False + if order_by: + if order_by[0] == "-": + order_by_method = "desc"; + order_by = order_by[1:] + else: + order_by_method = "asc"; + + for filter in filters: + filter_parts = [] + filter_props = filter[0] + if type(filter_props) != list: + filter_props = [filter_props] + for filter_prop in filter_props: + (name, op) = filter_prop.strip().split(" ", 1) + value = filter[1] + property = cls.find_property(name) + if name == order_by: + order_by_filtered = True + if types.TypeType(value) == types.ListType: + filter_parts_sub = [] + for val in value: + val = self.encode_value(property, val) + if isinstance(val, list): + for v in val: + filter_parts_sub.append(self._build_filter(property, name, op, v)) + else: + filter_parts_sub.append(self._build_filter(property, name, op, val)) + filter_parts.append("(%s)" % (" or ".join(filter_parts_sub))) + else: + val = self.encode_value(property, value) + if isinstance(val, list): + for v in val: + filter_parts.append(self._build_filter(property, name, op, v)) + else: + filter_parts.append(self._build_filter(property, name, op, val)) + query_parts.append("(%s)" % (" or ".join(filter_parts))) + + + type_query = "(`__type__` = '%s'" % cls.__name__ + for subclass in self._get_all_decendents(cls).keys(): + type_query += " or `__type__` = '%s'" % subclass + type_query +=")" + query_parts.append(type_query) + + order_by_query = "" + if order_by: + if not order_by_filtered: + query_parts.append("`%s` like '%%'" % order_by) + order_by_query = " order by `%s` %s" % (order_by, order_by_method) + + if len(query_parts) > 0: + return "where %s %s" % (" and ".join(query_parts), order_by_query) + else: + return "" + + + def _get_all_decendents(self, cls): + """Get all decendents for a given class""" + decendents = {} + for sc in cls.__sub_classes__: + decendents[sc.__name__] = sc + decendents.update(self._get_all_decendents(sc)) + return decendents + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError, "GQL queries not supported in SimpleDB" + + def save_object(self, obj): + if not obj.id: + obj.id = str(uuid.uuid4()) + + attrs = {'__type__' : obj.__class__.__name__, + '__module__' : obj.__class__.__module__, + '__lineage__' : obj.get_lineage()} + for property in obj.properties(hidden=False): + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if value == []: + value = None + attrs[property.name] = value + if property.unique: + try: + args = {property.name: value} + obj2 = obj.find(**args).next() + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % property.name) + except(StopIteration): + pass + self.domain.put_attributes(obj.id, attrs, replace=True) + + def delete_object(self, obj): + self.domain.delete_attributes(obj.id) + + def set_property(self, prop, obj, name, value): + value = prop.get_value_for_datastore(obj) + value = self.encode_value(prop, value) + if prop.unique: + try: + args = {prop.name: value} + obj2 = obj.find(**args).next() + if obj2.id != obj.id: + raise SDBPersistenceError("Error: %s must be unique!" % prop.name) + except(StopIteration): + pass + self.domain.put_attributes(obj.id, {name : value}, replace=True) + + def get_property(self, prop, obj, name): + a = self.domain.get_attributes(obj.id,consistent_read=self.consistent) + + # try to get the attribute value from SDB + if name in a: + value = self.decode_value(prop, a[name]) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + return value + raise AttributeError, '%s not found' % name + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name : value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name,consistent_read=self.consistent) + if a.has_key(name): + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) + diff --git a/vendor/boto/boto/sdb/db/manager/xmlmanager.py b/vendor/boto/boto/sdb/db/manager/xmlmanager.py new file mode 100644 index 000000000000..a3f8074e7a93 --- /dev/null +++ b/vendor/boto/boto/sdb/db/manager/xmlmanager.py @@ -0,0 +1,517 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +import boto +from boto.utils import find_class, Password +from boto.sdb.db.key import Key +from boto.sdb.db.model import Model +from datetime import datetime +from xml.dom.minidom import getDOMImplementation, parse, parseString, Node + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +class XMLConverter: + """ + Responsible for converting base Python types to format compatible with underlying + database. For SimpleDB, that means everything needs to be converted to a string + when stored in SimpleDB and from a string when retrieved. + + To convert a value, pass it to the encode or decode method. The encode method + will take a Python native value and convert to DB format. The decode method will + take a DB format value and convert it to Python native format. To find the appropriate + method to call, the generic encode/decode methods will look for the type-specific + method by searching for a method called "encode_" or "decode_". + """ + def __init__(self, manager): + self.manager = manager + self.type_map = { bool : (self.encode_bool, self.decode_bool), + int : (self.encode_int, self.decode_int), + long : (self.encode_long, self.decode_long), + Model : (self.encode_reference, self.decode_reference), + Key : (self.encode_reference, self.decode_reference), + Password : (self.encode_password, self.decode_password), + datetime : (self.encode_datetime, self.decode_datetime)} + + def get_text_value(self, parent_node): + value = '' + for node in parent_node.childNodes: + if node.nodeType == node.TEXT_NODE: + value += node.data + return value + + def encode(self, item_type, value): + if item_type in self.type_map: + encode = self.type_map[item_type][0] + return encode(value) + return value + + def decode(self, item_type, value): + if item_type in self.type_map: + decode = self.type_map[item_type][1] + return decode(value) + else: + value = self.get_text_value(value) + return value + + def encode_prop(self, prop, value): + if isinstance(value, list): + if hasattr(prop, 'item_type'): + new_value = [] + for v in value: + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + new_value.append(self.encode(item_type, v)) + return new_value + else: + return value + else: + return self.encode(prop.data_type, value) + + def decode_prop(self, prop, value): + if prop.data_type == list: + if hasattr(prop, 'item_type'): + item_type = getattr(prop, "item_type") + if Model in item_type.mro(): + item_type = Model + values = [] + for item_node in value.getElementsByTagName('item'): + value = self.decode(item_type, item_node) + values.append(value) + return values + else: + return self.get_text_value(value) + else: + return self.decode(prop.data_type, value) + + def encode_int(self, value): + value = int(value) + return '%d' % value + + def decode_int(self, value): + value = self.get_text_value(value) + if value: + value = int(value) + else: + value = None + return value + + def encode_long(self, value): + value = long(value) + return '%d' % value + + def decode_long(self, value): + value = self.get_text_value(value) + return long(value) + + def encode_bool(self, value): + if value == True: + return 'true' + else: + return 'false' + + def decode_bool(self, value): + value = self.get_text_value(value) + if value.lower() == 'true': + return True + else: + return False + + def encode_datetime(self, value): + return value.strftime(ISO8601) + + def decode_datetime(self, value): + value = self.get_text_value(value) + try: + return datetime.strptime(value, ISO8601) + except: + return None + + def encode_reference(self, value): + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + val_node = self.manager.doc.createElement("object") + val_node.setAttribute('id', value.id) + val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__)) + return val_node + + def decode_reference(self, value): + if not value: + return None + try: + value = value.childNodes[0] + class_name = value.getAttribute("class") + id = value.getAttribute("id") + cls = find_class(class_name) + return cls.get_by_ids(id) + except: + return None + + def encode_password(self, value): + if value and len(value) > 0: + return str(value) + else: + return None + + def decode_password(self, value): + value = self.get_text_value(value) + return Password(value) + + +class XMLManager(object): + + def __init__(self, cls, db_name, db_user, db_passwd, + db_host, db_port, db_table, ddl_dir, enable_ssl): + self.cls = cls + if not db_name: + db_name = cls.__name__.lower() + self.db_name = db_name + self.db_user = db_user + self.db_passwd = db_passwd + self.db_host = db_host + self.db_port = db_port + self.db_table = db_table + self.ddl_dir = ddl_dir + self.s3 = None + self.converter = XMLConverter(self) + self.impl = getDOMImplementation() + self.doc = self.impl.createDocument(None, 'objects', None) + + self.connection = None + self.enable_ssl = enable_ssl + self.auth_header = None + if self.db_user: + import base64 + base64string = base64.encodestring('%s:%s' % (self.db_user, self.db_passwd))[:-1] + authheader = "Basic %s" % base64string + self.auth_header = authheader + + def _connect(self): + if self.db_host: + if self.enable_ssl: + from httplib import HTTPSConnection as Connection + else: + from httplib import HTTPConnection as Connection + + self.connection = Connection(self.db_host, self.db_port) + + def _make_request(self, method, url, post_data=None, body=None): + """ + Make a request on this connection + """ + if not self.connection: + self._connect() + try: + self.connection.close() + except: + pass + self.connection.connect() + headers = {} + if self.auth_header: + headers["Authorization"] = self.auth_header + self.connection.request(method, url, body, headers) + resp = self.connection.getresponse() + return resp + + def new_doc(self): + return self.impl.createDocument(None, 'objects', None) + + def _object_lister(self, cls, doc): + for obj_node in doc.getElementsByTagName('object'): + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + if prop: + if hasattr(prop, 'item_type'): + value = self.get_list(prop_node, prop.item_type) + else: + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + setattr(obj, prop.name, value) + yield obj + + def reset(self): + self._connect() + + def get_doc(self): + return self.doc + + def encode_value(self, prop, value): + return self.converter.encode_prop(prop, value) + + def decode_value(self, prop, value): + return self.converter.decode_prop(prop, value) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key) + return self.s3 + + def get_list(self, prop_node, item_type): + values = [] + try: + items_node = prop_node.getElementsByTagName('items')[0] + except: + return [] + for item_node in items_node.getElementsByTagName('item'): + value = self.converter.decode(item_type, item_node) + values.append(value) + return values + + def get_object_from_doc(self, cls, id, doc): + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + obj = cls(id) + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = obj.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value != None: + try: + setattr(obj, prop.name, value) + except: + pass + return obj + + def get_props_from_doc(self, cls, id, doc): + """ + Pull out the properties from this document + Returns the class, the properties in a hash, and the id if provided as a tuple + :return: (cls, props, id) + """ + obj_node = doc.getElementsByTagName('object')[0] + if not cls: + class_name = obj_node.getAttribute('class') + cls = find_class(class_name) + if not id: + id = obj_node.getAttribute('id') + props = {} + for prop_node in obj_node.getElementsByTagName('property'): + prop_name = prop_node.getAttribute('name') + prop = cls.find_property(prop_name) + value = self.decode_value(prop, prop_node) + value = prop.make_value_from_datastore(value) + if value != None: + props[prop.name] = value + return (cls, props, id) + + + def get_object(self, cls, id): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + url = "/%s/%s" % (self.db_name, id) + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self.get_object_from_doc(cls, id, doc) + + def query(self, cls, filters, limit=None, order_by=None): + if not self.connection: + self._connect() + + if not self.connection: + raise NotImplementedError("Can't query without a database connection") + + from urllib import urlencode + + query = str(self._build_query(cls, filters, limit, order_by)) + if query: + url = "/%s?%s" % (self.db_name, urlencode({"query": query})) + else: + url = "/%s" % self.db_name + resp = self._make_request('GET', url) + if resp.status == 200: + doc = parse(resp) + else: + raise Exception("Error: %s" % resp.status) + return self._object_lister(cls, doc) + + def _build_query(self, cls, filters, limit, order_by): + import types + if len(filters) > 4: + raise Exception('Too many filters, max is 4') + parts = [] + properties = cls.properties(hidden=False) + for filter, value in filters: + name, op = filter.strip().split() + found = False + for property in properties: + if property.name == name: + found = True + if types.TypeType(value) == types.ListType: + filter_parts = [] + for val in value: + val = self.encode_value(property, val) + filter_parts.append("'%s' %s '%s'" % (name, op, val)) + parts.append("[%s]" % " OR ".join(filter_parts)) + else: + value = self.encode_value(property, value) + parts.append("['%s' %s '%s']" % (name, op, value)) + if not found: + raise Exception('%s is not a valid field' % name) + if order_by: + if order_by.startswith("-"): + key = order_by[1:] + type = "desc" + else: + key = order_by + type = "asc" + parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type)) + return ' intersection '.join(parts) + + def query_gql(self, query_string, *args, **kwds): + raise NotImplementedError, "GQL queries not supported in XML" + + def save_list(self, doc, items, prop_node): + items_node = doc.createElement('items') + prop_node.appendChild(items_node) + for item in items: + item_node = doc.createElement('item') + items_node.appendChild(item_node) + if isinstance(item, Node): + item_node.appendChild(item) + else: + text_node = doc.createTextNode(item) + item_node.appendChild(text_node) + + def save_object(self, obj): + """ + Marshal the object and do a PUT + """ + doc = self.marshal_object(obj) + if obj.id: + url = "/%s/%s" % (self.db_name, obj.id) + else: + url = "/%s" % (self.db_name) + resp = self._make_request("PUT", url, body=doc.toxml()) + new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp)) + obj.id = new_obj.id + for prop in obj.properties(): + try: + propname = prop.name + except AttributeError: + propname = None + if propname: + value = getattr(new_obj, prop.name) + if value: + setattr(obj, prop.name, value) + return obj + + + def marshal_object(self, obj, doc=None): + if not doc: + doc = self.new_doc() + if not doc: + doc = self.doc + obj_node = doc.createElement('object') + + if obj.id: + obj_node.setAttribute('id', obj.id) + + obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__, + obj.__class__.__name__)) + root = doc.documentElement + root.appendChild(obj_node) + for property in obj.properties(hidden=False): + prop_node = doc.createElement('property') + prop_node.setAttribute('name', property.name) + prop_node.setAttribute('type', property.type_name) + value = property.get_value_for_datastore(obj) + if value is not None: + value = self.encode_value(property, value) + if isinstance(value, list): + self.save_list(doc, value, prop_node) + elif isinstance(value, Node): + prop_node.appendChild(value) + else: + text_node = doc.createTextNode(str(value)) + prop_node.appendChild(text_node) + obj_node.appendChild(prop_node) + + return doc + + def unmarshal_object(self, fp, cls=None, id=None): + if isinstance(fp, str) or isinstance(fp, unicode): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_object_from_doc(cls, id, doc) + + def unmarshal_props(self, fp, cls=None, id=None): + """ + Same as unmarshalling an object, except it returns + from "get_props_from_doc" + """ + if isinstance(fp, str) or isinstance(fp, unicode): + doc = parseString(fp) + else: + doc = parse(fp) + return self.get_props_from_doc(cls, id, doc) + + def delete_object(self, obj): + url = "/%s/%s" % (self.db_name, obj.id) + return self._make_request("DELETE", url) + + def set_key_value(self, obj, name, value): + self.domain.put_attributes(obj.id, {name : value}, replace=True) + + def delete_key_value(self, obj, name): + self.domain.delete_attributes(obj.id, name) + + def get_key_value(self, obj, name): + a = self.domain.get_attributes(obj.id, name) + if a.has_key(name): + return a[name] + else: + return None + + def get_raw_item(self, obj): + return self.domain.get_item(obj.id) + + def set_property(self, prop, obj, name, value): + pass + + def get_property(self, prop, obj, name): + pass + + def load_object(self, obj): + if not obj._loaded: + obj = obj.get_by_id(obj.id) + obj._loaded = True + return obj + diff --git a/vendor/boto/boto/sdb/db/model.py b/vendor/boto/boto/sdb/db/model.py new file mode 100644 index 000000000000..069294729ea9 --- /dev/null +++ b/vendor/boto/boto/sdb/db/model.py @@ -0,0 +1,234 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sdb.db.manager import get_manager +from boto.sdb.db.property import Property +from boto.sdb.db.key import Key +from boto.sdb.db.query import Query +import boto + +class ModelMeta(type): + "Metaclass for all Models" + + def __init__(cls, name, bases, dict): + super(ModelMeta, cls).__init__(name, bases, dict) + # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!) + cls.__sub_classes__ = [] + try: + if filter(lambda b: issubclass(b, Model), bases): + for base in bases: + base.__sub_classes__.append(cls) + cls._manager = get_manager(cls) + # look for all of the Properties and set their names + for key in dict.keys(): + if isinstance(dict[key], Property): + property = dict[key] + property.__property_config__(cls, key) + prop_names = [] + props = cls.properties() + for prop in props: + if not prop.__class__.__name__.startswith('_'): + prop_names.append(prop.name) + setattr(cls, '_prop_names', prop_names) + except NameError: + # 'Model' isn't defined yet, meaning we're looking at our own + # Model class, defined below. + pass + +class Model(object): + __metaclass__ = ModelMeta + __consistent__ = False # Consistent is set off by default + id = None + + @classmethod + def get_lineage(cls): + l = [c.__name__ for c in cls.mro()] + l.reverse() + return '.'.join(l) + + @classmethod + def kind(cls): + return cls.__name__ + + @classmethod + def _get_by_id(cls, id, manager=None): + if not manager: + manager = cls._manager + return manager.get_object(cls, id) + + @classmethod + def get_by_id(cls, ids=None, parent=None): + if isinstance(ids, list): + objs = [cls._get_by_id(id) for id in ids] + return objs + else: + return cls._get_by_id(ids) + + get_by_ids = get_by_id + + @classmethod + def get_by_key_name(cls, key_names, parent=None): + raise NotImplementedError, "Key Names are not currently supported" + + @classmethod + def find(cls, limit=None, next_token=None, **params): + q = Query(cls, limit=limit, next_token=next_token) + for key, value in params.items(): + q.filter('%s =' % key, value) + return q + + @classmethod + def lookup(cls, name, value): + return cls._manager.lookup(cls, name, value) + + @classmethod + def all(cls, limit=None, next_token=None): + return cls.find(limit=limit, next_token=next_token) + + @classmethod + def get_or_insert(key_name, **kw): + raise NotImplementedError, "get_or_insert not currently supported" + + @classmethod + def properties(cls, hidden=True): + properties = [] + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if hidden or not prop.__class__.__name__.startswith('_'): + properties.append(prop) + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return properties + + @classmethod + def find_property(cls, prop_name): + property = None + while cls: + for key in cls.__dict__.keys(): + prop = cls.__dict__[key] + if isinstance(prop, Property): + if not prop.__class__.__name__.startswith('_') and prop_name == prop.name: + property = prop + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return property + + @classmethod + def get_xmlmanager(cls): + if not hasattr(cls, '_xmlmanager'): + from boto.sdb.db.manager.xmlmanager import XMLManager + cls._xmlmanager = XMLManager(cls, None, None, None, + None, None, None, None, False) + return cls._xmlmanager + + @classmethod + def from_xml(cls, fp): + xmlmanager = cls.get_xmlmanager() + return xmlmanager.unmarshal_object(fp) + + def __init__(self, id=None, **kw): + self._loaded = False + # first initialize all properties to their default values + for prop in self.properties(hidden=False): + setattr(self, prop.name, prop.default_value()) + if kw.has_key('manager'): + self._manager = kw['manager'] + self.id = id + for key in kw: + if key != 'manager': + # We don't want any errors populating up when loading an object, + # so if it fails we just revert to it's default value + try: + setattr(self, key, kw[key]) + except Exception, e: + boto.log.exception(e) + + def __repr__(self): + return '%s<%s>' % (self.__class__.__name__, self.id) + + def __str__(self): + return str(self.id) + + def __eq__(self, other): + return other and isinstance(other, Model) and self.id == other.id + + def _get_raw_item(self): + return self._manager.get_raw_item(self) + + def load(self): + if self.id and not self._loaded: + self._manager.load_object(self) + + def put(self): + self._manager.save_object(self) + + save = put + + def delete(self): + self._manager.delete_object(self) + + def key(self): + return Key(obj=self) + + def set_manager(self, manager): + self._manager = manager + + def to_dict(self): + props = {} + for prop in self.properties(hidden=False): + props[prop.name] = getattr(self, prop.name) + obj = {'properties' : props, + 'id' : self.id} + return {self.__class__.__name__ : obj} + + def to_xml(self, doc=None): + xmlmanager = self.get_xmlmanager() + doc = xmlmanager.marshal_object(self, doc) + return doc + +class Expando(Model): + + def __setattr__(self, name, value): + if name in self._prop_names: + object.__setattr__(self, name, value) + elif name.startswith('_'): + object.__setattr__(self, name, value) + elif name == 'id': + object.__setattr__(self, name, value) + else: + self._manager.set_key_value(self, name, value) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if not name.startswith('_'): + value = self._manager.get_key_value(self, name) + if value: + object.__setattr__(self, name, value) + return value + raise AttributeError + + diff --git a/vendor/boto/boto/sdb/db/property.py b/vendor/boto/boto/sdb/db/property.py new file mode 100644 index 000000000000..c7993ae7cf72 --- /dev/null +++ b/vendor/boto/boto/sdb/db/property.py @@ -0,0 +1,556 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import datetime +from key import Key +from boto.utils import Password +from boto.sdb.db.query import Query +import re +import boto +import boto.s3.key +from boto.sdb.db.blob import Blob + +class Property(object): + + data_type = str + type_name = '' + name = '' + verbose_name = '' + + def __init__(self, verbose_name=None, name=None, default=None, required=False, + validator=None, choices=None, unique=False): + self.verbose_name = verbose_name + self.name = name + self.default = default + self.required = required + self.validator = validator + self.choices = choices + self.slot_name = '_' + self.unique = unique + + def __get__(self, obj, objtype): + if obj: + obj.load() + return getattr(obj, self.slot_name) + else: + return None + + def __set__(self, obj, value): + self.validate(value) + + # Fire off any on_set functions + try: + if obj._loaded and hasattr(obj, "on_set_%s" % self.name): + fnc = getattr(obj, "on_set_%s" % self.name) + value = fnc(value) + except Exception: + boto.log.exception("Exception running on_set_%s" % self.name) + + setattr(obj, self.slot_name, value) + + def __property_config__(self, model_class, property_name): + self.model_class = model_class + self.name = property_name + self.slot_name = '_' + self.name + + def default_validator(self, value): + if value == self.default_value(): + return + if not isinstance(value, self.data_type): + raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value)) + + def default_value(self): + return self.default + + def validate(self, value): + if self.required and value==None: + raise ValueError, '%s is a required property' % self.name + if self.choices and value and not value in self.choices: + raise ValueError, '%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name) + if self.validator: + self.validator(value) + else: + self.default_validator(value) + return value + + def empty(self, value): + return not value + + def get_value_for_datastore(self, model_instance): + return getattr(model_instance, self.name) + + def make_value_from_datastore(self, value): + return value + + def get_choices(self): + if callable(self.choices): + return self.choices() + return self.choices + +def validate_string(value): + if isinstance(value, str) or isinstance(value, unicode): + if len(value) > 1024: + raise ValueError, 'Length of value greater than maxlength' + else: + raise TypeError, 'Expecting String, got %s' % type(value) + +class StringProperty(Property): + + type_name = 'String' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=validate_string, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + +class TextProperty(Property): + + type_name = 'Text' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=None, choices=None, unique=False, max_length=None): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.max_length = max_length + + def validate(self, value): + if not isinstance(value, str) and not isinstance(value, unicode): + raise TypeError, 'Expecting Text, got %s' % type(value) + if self.max_length and len(value) > self.max_length: + raise ValueError, 'Length of value greater than maxlength %s' % self.max_length + +class PasswordProperty(StringProperty): + """ + Hashed property who's original value can not be + retrieved, but still can be compaired. + """ + data_type = Password + type_name = 'Password' + + def __init__(self, verbose_name=None, name=None, default='', required=False, + validator=None, choices=None, unique=False): + StringProperty.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def make_value_from_datastore(self, value): + p = Password(value) + return p + + def get_value_for_datastore(self, model_instance): + value = StringProperty.get_value_for_datastore(self, model_instance) + if value and len(value): + return str(value) + else: + return None + + def __set__(self, obj, value): + if not isinstance(value, Password): + p = Password() + p.set(value) + value = p + Property.__set__(self, obj, value) + + def __get__(self, obj, objtype): + return Password(StringProperty.__get__(self, obj, objtype)) + + def validate(self, value): + value = Property.validate(self, value) + if isinstance(value, Password): + if len(value) > 1024: + raise ValueError, 'Length of value greater than maxlength' + else: + raise TypeError, 'Expecting Password, got %s' % type(value) + +class BlobProperty(Property): + data_type = Blob + type_name = "blob" + + def __set__(self, obj, value): + if value != self.default_value(): + if not isinstance(value, Blob): + oldb = self.__get__(obj, type(obj)) + id = None + if oldb: + id = oldb.id + b = Blob(value=value, id=id) + value = b + Property.__set__(self, obj, value) + +class S3KeyProperty(Property): + + data_type = boto.s3.key.Key + type_name = 'S3Key' + validate_regex = "^s3:\/\/([^\/]*)\/(.*)$" + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + + def validate(self, value): + if value == self.default_value() or value == str(self.default_value()): + return self.default_value() + if isinstance(value, self.data_type): + return + match = re.match(self.validate_regex, value) + if match: + return + raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value)) + + def __get__(self, obj, objtype): + value = Property.__get__(self, obj, objtype) + if value: + if isinstance(value, self.data_type): + return value + match = re.match(self.validate_regex, value) + if match: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(match.group(1), validate=False) + k = bucket.get_key(match.group(2)) + if not k: + k = bucket.new_key(match.group(2)) + k.set_contents_from_string("") + return k + else: + return value + + def get_value_for_datastore(self, model_instance): + value = Property.get_value_for_datastore(self, model_instance) + if value: + return "s3://%s/%s" % (value.bucket.name, value.name) + else: + return None + +class IntegerProperty(Property): + + data_type = int + type_name = 'Integer' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False, max=2147483647, min=-2147483648): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.max = max + self.min = min + + def validate(self, value): + value = int(value) + value = Property.validate(self, value) + if value > self.max: + raise ValueError, 'Maximum value is %d' % self.max + if value < self.min: + raise ValueError, 'Minimum value is %d' % self.min + return value + + def empty(self, value): + return value is None + +class LongProperty(Property): + + data_type = long + type_name = 'Long' + + def __init__(self, verbose_name=None, name=None, default=0, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = long(value) + value = Property.validate(self, value) + min = -9223372036854775808 + max = 9223372036854775807 + if value > max: + raise ValueError, 'Maximum value is %d' % max + if value < min: + raise ValueError, 'Minimum value is %d' % min + return value + + def empty(self, value): + return value is None + +class BooleanProperty(Property): + + data_type = bool + type_name = 'Boolean' + + def __init__(self, verbose_name=None, name=None, default=False, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def empty(self, value): + return value is None + +class FloatProperty(Property): + + data_type = float + type_name = 'Float' + + def __init__(self, verbose_name=None, name=None, default=0.0, required=False, + validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + + def validate(self, value): + value = float(value) + value = Property.validate(self, value) + return value + + def empty(self, value): + return value is None + +class DateTimeProperty(Property): + + data_type = datetime.datetime + type_name = 'DateTime' + + def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None, + default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.auto_now = auto_now + self.auto_now_add = auto_now_add + + def default_value(self): + if self.auto_now or self.auto_now_add: + return self.now() + return Property.default_value(self) + + def validate(self, value): + if value == None: + return + if not isinstance(value, self.data_type): + raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value)) + + def get_value_for_datastore(self, model_instance): + if self.auto_now: + setattr(model_instance, self.name, self.now()) + return Property.get_value_for_datastore(self, model_instance) + + def now(self): + return datetime.datetime.utcnow() + +class ReferenceProperty(Property): + + data_type = Key + type_name = 'Reference' + + def __init__(self, reference_class=None, collection_name=None, + verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False): + Property.__init__(self, verbose_name, name, default, required, validator, choices, unique) + self.reference_class = reference_class + self.collection_name = collection_name + + def __get__(self, obj, objtype): + if obj: + value = getattr(obj, self.slot_name) + if value == self.default_value(): + return value + # If the value is still the UUID for the referenced object, we need to create + # the object now that is the attribute has actually been accessed. This lazy + # instantiation saves unnecessary roundtrips to SimpleDB + if isinstance(value, str) or isinstance(value, unicode): + value = self.reference_class(value) + setattr(obj, self.name, value) + return value + + def __property_config__(self, model_class, property_name): + Property.__property_config__(self, model_class, property_name) + if self.collection_name is None: + self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name) + if hasattr(self.reference_class, self.collection_name): + raise ValueError, 'duplicate property: %s' % self.collection_name + setattr(self.reference_class, self.collection_name, + _ReverseReferenceProperty(model_class, property_name, self.collection_name)) + + def check_uuid(self, value): + # This does a bit of hand waving to "type check" the string + t = value.split('-') + if len(t) != 5: + raise ValueError + + def check_instance(self, value): + try: + obj_lineage = value.get_lineage() + cls_lineage = self.reference_class.get_lineage() + if obj_lineage.startswith(cls_lineage): + return + raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage) + except: + raise ValueError, '%s is not a Model' % value + + def validate(self, value): + if self.required and value==None: + raise ValueError, '%s is a required property' % self.name + if value == self.default_value(): + return + if not isinstance(value, str) and not isinstance(value, unicode): + self.check_instance(value) + +class _ReverseReferenceProperty(Property): + data_type = Query + type_name = 'query' + + def __init__(self, model, prop, name): + self.__model = model + self.__property = prop + self.collection_name = prop + self.name = name + self.item_type = model + + def __get__(self, model_instance, model_class): + """Fetches collection of model instances of this collection property.""" + if model_instance is not None: + query = Query(self.__model) + if type(self.__property) == list: + props = [] + for prop in self.__property: + props.append("%s =" % prop) + return query.filter(props, model_instance) + else: + return query.filter(self.__property + ' =', model_instance) + else: + return self + + def __set__(self, model_instance, value): + """Not possible to set a new collection.""" + raise ValueError, 'Virtual property is read-only' + + +class CalculatedProperty(Property): + + def __init__(self, verbose_name=None, name=None, default=None, + required=False, validator=None, choices=None, + calculated_type=int, unique=False, use_method=False): + Property.__init__(self, verbose_name, name, default, required, + validator, choices, unique) + self.calculated_type = calculated_type + self.use_method = use_method + + def __get__(self, obj, objtype): + value = self.default_value() + if obj: + try: + value = getattr(obj, self.slot_name) + if self.use_method: + value = value() + except AttributeError: + pass + return value + + def __set__(self, obj, value): + """Not possible to set a new AutoID.""" + pass + + def _set_direct(self, obj, value): + if not self.use_method: + setattr(obj, self.slot_name, value) + + def get_value_for_datastore(self, model_instance): + if self.calculated_type in [str, int, bool]: + value = self.__get__(model_instance, model_instance.__class__) + return value + else: + return None + +class ListProperty(Property): + + data_type = list + type_name = 'List' + + def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = [] + self.item_type = item_type + Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + if value is not None: + if not isinstance(value, list): + value = [value] + + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + + for item in value: + if not isinstance(item, item_type): + if item_type == (int, long): + raise ValueError, 'Items in the %s list must all be integers.' % self.name + else: + raise ValueError('Items in the %s list must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return list(super(ListProperty, self).default_value()) + + def __set__(self, obj, value): + """Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in""" + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + if isinstance(value, item_type): + value = [value] + elif value == None: # Override to allow them to set this to "None" to remove everything + value = [] + return super(ListProperty, self).__set__(obj,value) + + +class MapProperty(Property): + + data_type = dict + type_name = 'Map' + + def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds): + if default is None: + default = {} + self.item_type = item_type + Property.__init__(self, verbose_name, name, default=default, required=True, **kwds) + + def validate(self, value): + if value is not None: + if not isinstance(value, dict): + raise ValueError, 'Value must of type dict' + + if self.item_type in (int, long): + item_type = (int, long) + elif self.item_type in (str, unicode): + item_type = (str, unicode) + else: + item_type = self.item_type + + for key in value: + if not isinstance(value[key], item_type): + if item_type == (int, long): + raise ValueError, 'Values in the %s Map must all be integers.' % self.name + else: + raise ValueError('Values in the %s Map must all be %s instances' % + (self.name, self.item_type.__name__)) + return value + + def empty(self, value): + return value is None + + def default_value(self): + return {} diff --git a/vendor/boto/boto/sdb/db/query.py b/vendor/boto/boto/sdb/db/query.py new file mode 100644 index 000000000000..27987a311bfe --- /dev/null +++ b/vendor/boto/boto/sdb/db/query.py @@ -0,0 +1,79 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +class Query(object): + __local_iter__ = None + def __init__(self, model_class, limit=None, next_token=None, manager=None): + self.model_class = model_class + self.limit = limit + if manager: + self.manager = manager + else: + self.manager = self.model_class._manager + self.filters = [] + self.sort_by = None + self.rs = None + self.next_token = next_token + + def __iter__(self): + return iter(self.manager.query(self)) + + def next(self): + if self.__local_iter__ == None: + self.__local_iter__ = self.__iter__() + return self.__local_iter__.next() + + def filter(self, property_operator, value): + self.filters.append((property_operator, value)) + return self + + def fetch(self, limit, offset=0): + raise NotImplementedError, "fetch mode is not currently supported" + + def count(self): + return self.manager.count(self.model_class, self.filters) + + def get_query(self): + return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by) + + def order(self, key): + self.sort_by = key + return self + + def to_xml(self, doc=None): + if not doc: + xmlmanager = self.model_class.get_xmlmanager() + doc = xmlmanager.new_doc() + for obj in self: + obj.to_xml(doc) + return doc + + def get_next_token(self): + if self.rs: + return self.rs.next_token + if self._next_token: + return self._next_token + return None + + def set_next_token(self, token): + self._next_token = token + + next_token = property(get_next_token, set_next_token) diff --git a/vendor/boto/boto/sdb/db/sequence.py b/vendor/boto/boto/sdb/db/sequence.py new file mode 100644 index 000000000000..0a2ad326b852 --- /dev/null +++ b/vendor/boto/boto/sdb/db/sequence.py @@ -0,0 +1,224 @@ +# Copyright (c) 2010 Chris Moyer http://coredumped.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBResponseError + +class SequenceGenerator(object): + """Generic Sequence Generator object, this takes a single + string as the "sequence" and uses that to figure out + what the next value in a string is. For example + if you give "ABC" and pass in "A" it will give you "B", + and if you give it "C" it will give you "AA". + + If you set "rollover" to True in the above example, passing + in "C" would give you "A" again. + + The Sequence string can be a string or any iterable + that has the "index" function and is indexable. + """ + __name__ = "SequenceGenerator" + + def __init__(self, sequence_string, rollover=False): + """Create a new SequenceGenerator using the sequence_string + as how to generate the next item. + + :param sequence_string: The string or list that explains + how to generate the next item in the sequence + :type sequence_string: str,iterable + + :param rollover: Rollover instead of incrementing when + we hit the end of the sequence + :type rollover: bool + """ + self.sequence_string = sequence_string + self.sequence_length = len(sequence_string[0]) + self.rollover = rollover + self.last_item = sequence_string[-1] + self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string) + + def __call__(self, val, last=None): + """Get the next value in the sequence""" + # If they pass us in a string that's not at least + # the lenght of our sequence, then return the + # first element in our sequence + if val == None or len(val) < self.sequence_length: + return self.sequence_string[0] + last_value = val[-self.sequence_length:] + if (not self.rollover) and (last_value == self.last_item): + val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value)) + else: + val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value)) + return val + + def _inc(self, val): + """Increment a single value""" + assert(len(val) == self.sequence_length) + return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)] + + + +# +# Simple Sequence Functions +# +def increment_by_one(cv=None, lv=None): + if cv == None: + return 0 + return cv + 1 + +def double(cv=None, lv=None): + if cv == None: + return 1 + return cv * 2 + +def fib(cv=1, lv=0): + """The fibonacci sequence, this incrementer uses the + last value""" + if cv == None: + cv = 1 + if lv == None: + lv = 0 + return cv + lv + +increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + + +class Sequence(object): + """A simple Sequence using the new SDB "Consistent" features + Based largly off of the "Counter" example from mitch garnaat: + http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py""" + + + def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None): + """Create a new Sequence, using an optional function to + increment to the next number, by default we just increment by one. + Every parameter here is optional, if you don't specify any options + then you'll get a new SequenceGenerator with a random ID stored in the + default domain that increments by one and uses the default botoweb + environment + + :param id: Optional ID (name) for this counter + :type id: str + + :param domain_name: Optional domain name to use, by default we get this out of the + environment configuration + :type domain_name:str + + :param fnc: Optional function to use for the incrementation, by default we just increment by one + There are several functions defined in this module. + Your function must accept "None" to get the initial value + :type fnc: function, str + + :param init_val: Initial value, by default this is the first element in your sequence, + but you can pass in any value, even a string if you pass in a function that uses + strings instead of ints to increment + """ + self._db = None + self._value = None + self.last_value = None + self.domain_name = domain_name + self.id = id + if self.id == None: + import uuid + self.id = str(uuid.uuid4()) + if init_val == None: + init_val = fnc(init_val) + self.val = init_val + + self.item_type = type(fnc(None)) + self.timestamp = None + # Allow us to pass in a full name to a function + if type(fnc) == str: + from boto.utils import find_class + fnc = find_class(fnc) + self.fnc = fnc + + def set(self, val): + """Set the value""" + import time + now = time.time() + expected_values = [] + new_val = {} + new_val['timestamp'] = now + if self._value != None: + new_val['last_value'] = self._value + expected_values = ['current_value', str(self._value)] + new_val['current_value'] = val + try: + self.db.put_attributes(self.id, new_val, expected_values=expected_values) + self.timestamp = new_val['timestamp'] + except SDBResponseError, e: + if e.status == 409: + raise ValueError, "Sequence out of sync" + else: + raise + + + def get(self): + """Get the value""" + val = self.db.get_attributes(self.id, consistent_read=True) + if val and val.has_key('timestamp'): + self.timestamp = val['timestamp'] + if val and val.has_key('current_value'): + self._value = self.item_type(val['current_value']) + if val.has_key("last_value") and val['last_value'] != None: + self.last_value = self.item_type(val['last_value']) + return self._value + + val = property(get, set) + + def __repr__(self): + return "%s('%s', '%s', '%s.%s', '%s')" % ( + self.__class__.__name__, + self.id, + self.domain_name, + self.fnc.__module__, self.fnc.__name__, + self.val) + + + def _connect(self): + """Connect to our domain""" + if not self._db: + if not self.domain_name: + import boto + sdb = boto.connect_sdb() + self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default")) + try: + self._db = sdb.get_domain(self.domain_name) + except SDBResponseError, e: + if e.status == 400: + self._db = sdb.create_domain(self.domain_name) + else: + raise + return self._db + + db = property(_connect) + + def next(self): + self.val = self.fnc(self.val, self.last_value) + return self.val + + def delete(self): + """Remove this sequence""" + self.db.delete_attributes(self.id) + + def __del__(self): + self.delete() diff --git a/vendor/boto/boto/sdb/db/test_db.py b/vendor/boto/boto/sdb/db/test_db.py new file mode 100644 index 000000000000..0c345abd4a6f --- /dev/null +++ b/vendor/boto/boto/sdb/db/test_db.py @@ -0,0 +1,225 @@ +from boto.sdb.db.model import Model +from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty +from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty +from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty +from datetime import datetime +import time +from boto.exception import SDBPersistenceError + +_objects = {} + +# +# This will eventually be moved to the boto.tests module and become a real unit test +# but for now it will live here. It shows examples of each of the Property types in +# use and tests the basic operations. +# +class TestBasic(Model): + + name = StringProperty() + size = IntegerProperty() + foo = BooleanProperty() + date = DateTimeProperty() + +class TestFloat(Model): + + name = StringProperty() + value = FloatProperty() + +class TestRequired(Model): + + req = StringProperty(required=True, default='foo') + +class TestReference(Model): + + ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs') + +class TestSubClass(TestBasic): + + answer = IntegerProperty() + +class TestPassword(Model): + password = PasswordProperty() + +class TestList(Model): + + name = StringProperty() + nums = ListProperty(int) + +class TestMap(Model): + + name = StringProperty() + map = MapProperty() + +class TestListReference(Model): + + name = StringProperty() + basics = ListProperty(TestBasic) + +class TestAutoNow(Model): + + create_date = DateTimeProperty(auto_now_add=True) + modified_date = DateTimeProperty(auto_now=True) + +class TestUnique(Model): + name = StringProperty(unique=True) + +def test_basic(): + global _objects + t = TestBasic() + t.name = 'simple' + t.size = -42 + t.foo = True + t.date = datetime.now() + print 'saving object' + t.put() + _objects['test_basic_t'] = t + time.sleep(5) + print 'now try retrieving it' + tt = TestBasic.get_by_id(t.id) + _objects['test_basic_tt'] = tt + assert tt.id == t.id + l = TestBasic.get_by_id([t.id]) + assert len(l) == 1 + assert l[0].id == t.id + assert t.size == tt.size + assert t.foo == tt.foo + assert t.name == tt.name + #assert t.date == tt.date + return t + +def test_float(): + global _objects + t = TestFloat() + t.name = 'float object' + t.value = 98.6 + print 'saving object' + t.save() + _objects['test_float_t'] = t + time.sleep(5) + print 'now try retrieving it' + tt = TestFloat.get_by_id(t.id) + _objects['test_float_tt'] = tt + assert tt.id == t.id + assert tt.name == t.name + assert tt.value == t.value + return t + +def test_required(): + global _objects + t = TestRequired() + _objects['test_required_t'] = t + t.put() + return t + +def test_reference(t=None): + global _objects + if not t: + t = test_basic() + tt = TestReference() + tt.ref = t + tt.put() + time.sleep(10) + tt = TestReference.get_by_id(tt.id) + _objects['test_reference_tt'] = tt + assert tt.ref.id == t.id + for o in t.refs: + print o + +def test_subclass(): + global _objects + t = TestSubClass() + _objects['test_subclass_t'] = t + t.name = 'a subclass' + t.size = -489 + t.save() + +def test_password(): + global _objects + t = TestPassword() + _objects['test_password_t'] = t + t.password = "foo" + t.save() + time.sleep(5) + # Make sure it stored ok + tt = TestPassword.get_by_id(t.id) + _objects['test_password_tt'] = tt + #Testing password equality + assert tt.password == "foo" + #Testing password not stored as string + assert str(tt.password) != "foo" + +def test_list(): + global _objects + t = TestList() + _objects['test_list_t'] = t + t.name = 'a list of ints' + t.nums = [1,2,3,4,5] + t.put() + tt = TestList.get_by_id(t.id) + _objects['test_list_tt'] = tt + assert tt.name == t.name + for n in tt.nums: + assert isinstance(n, int) + +def test_list_reference(): + global _objects + t = TestBasic() + t.put() + _objects['test_list_ref_t'] = t + tt = TestListReference() + tt.name = "foo" + tt.basics = [t] + tt.put() + time.sleep(5) + _objects['test_list_ref_tt'] = tt + ttt = TestListReference.get_by_id(tt.id) + assert ttt.basics[0].id == t.id + +def test_unique(): + global _objects + t = TestUnique() + name = 'foo' + str(int(time.time())) + t.name = name + t.put() + _objects['test_unique_t'] = t + time.sleep(10) + tt = TestUnique() + _objects['test_unique_tt'] = tt + tt.name = name + try: + tt.put() + assert False + except(SDBPersistenceError): + pass + +def test_datetime(): + global _objects + t = TestAutoNow() + t.put() + _objects['test_datetime_t'] = t + time.sleep(5) + tt = TestAutoNow.get_by_id(t.id) + assert tt.create_date.timetuple() == t.create_date.timetuple() + +def test(): + print 'test_basic' + t1 = test_basic() + print 'test_required' + test_required() + print 'test_reference' + test_reference(t1) + print 'test_subclass' + test_subclass() + print 'test_password' + test_password() + print 'test_list' + test_list() + print 'test_list_reference' + test_list_reference() + print "test_datetime" + test_datetime() + print 'test_unique' + test_unique() + +if __name__ == "__main__": + test() diff --git a/vendor/boto/boto/sdb/domain.py b/vendor/boto/boto/sdb/domain.py new file mode 100644 index 000000000000..17739eead049 --- /dev/null +++ b/vendor/boto/boto/sdb/domain.py @@ -0,0 +1,337 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SDB Domain +""" +from boto.sdb.queryresultset import QueryResultSet, SelectResultSet + +class Domain: + + def __init__(self, connection=None, name=None): + self.connection = connection + self.name = name + self._metadata = None + + def __repr__(self): + return 'Domain:%s' % self.name + + def __iter__(self): + return iter(self.select("SELECT * FROM `%s`" % self.name)) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'DomainName': + self.name = value + else: + setattr(self, name, value) + + def get_metadata(self): + if not self._metadata: + self._metadata = self.connection.domain_metadata(self) + return self._metadata + + def put_attributes(self, item_name, attributes, + replace=True, expected_values=None): + """ + Store attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being stored. + + :type attribute_names: dict or dict-like object + :param attribute_names: The name/value pairs to store as attributes + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. + The list can be of the form: + * ['name', 'value'] + In which case the call will first verify + that the attribute "name" of this item has + a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed + error will be returned. + The list can also be of the form: + * ['name', True|False] + which will simply check for the existence (True) + or non-existencve (False) of the attribute. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.put_attributes(self, item_name, attributes, + replace, expected_values) + + def batch_put_attributes(self, items, replace=True): + """ + Store attributes for multiple items. + + :type items: dict or dict-like object + :param items: A dictionary-like object. The keys of the dictionary are + the item names and the values are themselves dictionaries + of attribute names/values, exactly the same as the + attribute_names parameter of the scalar put_attributes + call. + + :type replace: bool + :param replace: Whether the attribute values passed in will replace + existing values or will be added as addition values. + Defaults to True. + + :rtype: bool + :return: True if successful + """ + return self.connection.batch_put_attributes(self, items, replace) + + def get_attributes(self, item_name, attribute_name=None, + consistent_read=False, item=None): + """ + Retrieve attributes for a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being retrieved. + + :type attribute_names: string or list of strings + :param attribute_names: An attribute name or list of attribute names. This + parameter is optional. If not supplied, all attributes + will be retrieved for the item. + + :rtype: :class:`boto.sdb.item.Item` + :return: An Item mapping type containing the requested attribute name/values + """ + return self.connection.get_attributes(self, item_name, attribute_name, + consistent_read, item) + + def delete_attributes(self, item_name, attributes=None, + expected_values=None): + """ + Delete attributes from a given item. + + :type item_name: string + :param item_name: The name of the item whose attributes are being deleted. + + :type attributes: dict, list or :class:`boto.sdb.item.Item` + :param attributes: Either a list containing attribute names which will cause + all values associated with that attribute name to be deleted or + a dict or Item containing the attribute names and keys and list + of values to delete as the value. If no value is supplied, + all attribute name/values for the item will be deleted. + + :type expected_value: list + :param expected_value: If supplied, this is a list or tuple consisting + of a single attribute name and expected value. + The list can be of the form: + * ['name', 'value'] + In which case the call will first verify + that the attribute "name" of this item has + a value of "value". If it does, the delete + will proceed, otherwise a ConditionalCheckFailed + error will be returned. + The list can also be of the form: + * ['name', True|False] + which will simply check for the existence (True) + or non-existencve (False) of the attribute. + + :rtype: bool + :return: True if successful + """ + return self.connection.delete_attributes(self, item_name, attributes, + expected_values) + + def select(self, query='', next_token=None, consistent_read=False, max_items=None): + """ + Returns a set of Attributes for item names within domain_name that match the query. + The query must be expressed in using the SELECT style syntax rather than the + original SimpleDB query language. + + :type query: string + :param query: The SimpleDB query to be performed. + + :rtype: iter + :return: An iterator containing the results. This is actually a generator + function that will iterate across all search results, not just the + first page. + """ + return SelectResultSet(self, query, max_items = max_items, next_token=next_token, + consistent_read=consistent_read) + + def get_item(self, item_name): + item = self.get_attributes(item_name) + if item: + item.domain = self + return item + else: + return None + + def new_item(self, item_name): + return self.connection.item_cls(self, item_name) + + def delete_item(self, item): + self.delete_attributes(item.name) + + def to_xml(self, f=None): + """Get this domain as an XML DOM Document + :param f: Optional File to dump directly to + :type f: File or Stream + + :return: File object where the XML has been dumped to + :rtype: file + """ + if not f: + from tempfile import TemporaryFile + f = TemporaryFile() + print >>f, '' + print >>f, '' % self.name + for item in self: + print >>f, '\t' % item.name + for k in item: + print >>f, '\t\t' % k + values = item[k] + if not isinstance(values, list): + values = [values] + for value in values: + print >>f, '\t\t\t>f, ']]>' + print >>f, '\t\t' + print >>f, '\t' + print >>f, '' + f.flush() + f.seek(0) + return f + + + def from_xml(self, doc): + """Load this domain based on an XML document""" + import xml.sax + handler = DomainDumpParser(self) + xml.sax.parse(doc, handler) + return handler + + +class DomainMetaData: + + def __init__(self, domain=None): + self.domain = domain + self.item_count = None + self.item_names_size = None + self.attr_name_count = None + self.attr_names_size = None + self.attr_value_count = None + self.attr_values_size = None + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'ItemCount': + self.item_count = int(value) + elif name == 'ItemNamesSizeBytes': + self.item_names_size = int(value) + elif name == 'AttributeNameCount': + self.attr_name_count = int(value) + elif name == 'AttributeNamesSizeBytes': + self.attr_names_size = int(value) + elif name == 'AttributeValueCount': + self.attr_value_count = int(value) + elif name == 'AttributeValuesSizeBytes': + self.attr_values_size = int(value) + elif name == 'Timestamp': + self.timestamp = value + else: + setattr(self, name, value) + +import sys +from xml.sax.handler import ContentHandler +class DomainDumpParser(ContentHandler): + """ + SAX parser for a domain that has been dumped + """ + + def __init__(self, domain): + self.uploader = UploaderThread(domain) + self.item_id = None + self.attrs = {} + self.attribute = None + self.value = "" + self.domain = domain + + def startElement(self, name, attrs): + if name == "Item": + self.item_id = attrs['id'] + self.attrs = {} + elif name == "attribute": + self.attribute = attrs['id'] + elif name == "value": + self.value = "" + + def characters(self, ch): + self.value += ch + + def endElement(self, name): + if name == "value": + if self.value and self.attribute: + value = self.value.strip() + attr_name = self.attribute.strip() + if self.attrs.has_key(attr_name): + self.attrs[attr_name].append(value) + else: + self.attrs[attr_name] = [value] + elif name == "Item": + self.uploader.items[self.item_id] = self.attrs + # Every 20 items we spawn off the uploader + if len(self.uploader.items) >= 20: + self.uploader.start() + self.uploader = UploaderThread(self.domain) + elif name == "Domain": + # If we're done, spawn off our last Uploader Thread + self.uploader.start() + +from threading import Thread +class UploaderThread(Thread): + """Uploader Thread""" + + def __init__(self, domain): + self.db = domain + self.items = {} + Thread.__init__(self) + + def run(self): + try: + self.db.batch_put_attributes(self.items) + except: + print "Exception using batch put, trying regular put instead" + for item_name in self.items: + self.db.put_attributes(item_name, self.items[item_name]) + print ".", + sys.stdout.flush() diff --git a/vendor/boto/boto/sdb/item.py b/vendor/boto/boto/sdb/item.py new file mode 100644 index 000000000000..d6a56a95b93a --- /dev/null +++ b/vendor/boto/boto/sdb/item.py @@ -0,0 +1,105 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SDB Item +""" + +import base64 + +class Item(dict): + + def __init__(self, domain, name='', active=False): + dict.__init__(self) + self.domain = domain + self.name = name + self.active = active + self.request_id = None + self.encoding = None + self.in_attribute = False + self.converter = self.domain.connection.converter + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + self.in_attribute = True + self.encoding = attrs.get('encoding', None) + return None + + def decode_value(self, value): + if self.encoding == 'base64': + self.encoding = None + return base64.decodestring(value) + else: + return value + + def endElement(self, name, value, connection): + if name == 'ItemName': + self.name = self.decode_value(value) + elif name == 'Name': + if self.in_attribute: + self.last_key = self.decode_value(value) + else: + self.name = self.decode_value(value) + elif name == 'Value': + if self.has_key(self.last_key): + if not isinstance(self[self.last_key], list): + self[self.last_key] = [self[self.last_key]] + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key].append(value) + else: + value = self.decode_value(value) + if self.converter: + value = self.converter.decode(value) + self[self.last_key] = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'RequestId': + self.request_id = value + elif name == 'Attribute': + self.in_attribute = False + else: + setattr(self, name, value) + + def load(self): + self.domain.get_attributes(self.name, item=self) + + def save(self, replace=True): + self.domain.put_attributes(self.name, self, replace) + + def add_value(self, key, value): + if key in self: + if not isinstance(self[key], list): + self[key] = [self[key]] + self[key].append(value) + else: + self[key] = value + + def delete(self): + self.domain.delete_item(self) + + + + diff --git a/vendor/boto/boto/sdb/persist/__init__.py b/vendor/boto/boto/sdb/persist/__init__.py new file mode 100644 index 000000000000..2f2b0c1d68ad --- /dev/null +++ b/vendor/boto/boto/sdb/persist/__init__.py @@ -0,0 +1,83 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.utils import find_class + +class Manager(object): + + DefaultDomainName = boto.config.get('Persist', 'default_domain', None) + + def __init__(self, domain_name=None, aws_access_key_id=None, aws_secret_access_key=None, debug=0): + self.domain_name = domain_name + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + self.domain = None + self.sdb = None + self.s3 = None + if not self.domain_name: + self.domain_name = self.DefaultDomainName + if self.domain_name: + boto.log.info('No SimpleDB domain set, using default_domain: %s' % self.domain_name) + else: + boto.log.warning('No SimpleDB domain set, persistance is disabled') + if self.domain_name: + self.sdb = boto.connect_sdb(aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key, + debug=debug) + self.domain = self.sdb.lookup(self.domain_name) + if not self.domain: + self.domain = self.sdb.create_domain(self.domain_name) + + def get_s3_connection(self): + if not self.s3: + self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key) + return self.s3 + +def get_manager(domain_name=None, aws_access_key_id=None, aws_secret_access_key=None, debug=0): + return Manager(domain_name, aws_access_key_id, aws_secret_access_key, debug=debug) + +def set_domain(domain_name): + Manager.DefaultDomainName = domain_name + +def get_domain(): + return Manager.DefaultDomainName + +def revive_object_from_id(id, manager): + if not manager.domain: + return None + attrs = manager.domain.get_attributes(id, ['__module__', '__type__', '__lineage__']) + try: + cls = find_class(attrs['__module__'], attrs['__type__']) + return cls(id, manager=manager) + except ImportError: + return None + +def object_lister(cls, query_lister, manager): + for item in query_lister: + if cls: + yield cls(item.name) + else: + o = revive_object_from_id(item.name, manager) + if o: + yield o + + diff --git a/vendor/boto/boto/sdb/persist/checker.py b/vendor/boto/boto/sdb/persist/checker.py new file mode 100644 index 000000000000..e2146c973200 --- /dev/null +++ b/vendor/boto/boto/sdb/persist/checker.py @@ -0,0 +1,302 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from datetime import datetime +from boto.s3.key import Key +from boto.s3.bucket import Bucket +from boto.sdb.persist import revive_object_from_id +from boto.exception import SDBPersistenceError +from boto.utils import Password + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +class ValueChecker: + + def check(self, value): + """ + Checks a value to see if it is of the right type. + + Should raise a TypeError exception if an in appropriate value is passed in. + """ + raise TypeError + + def from_string(self, str_value, obj): + """ + Takes a string as input and returns the type-specific value represented by that string. + + Should raise a ValueError if the value cannot be converted to the appropriate type. + """ + raise ValueError + + def to_string(self, value): + """ + Convert a value to it's string representation. + + Should raise a ValueError if the value cannot be converted to a string representation. + """ + raise ValueError + +class StringChecker(ValueChecker): + + def __init__(self, **params): + if params.has_key('maxlength'): + self.maxlength = params['maxlength'] + else: + self.maxlength = 1024 + if params.has_key('default'): + self.check(params['default']) + self.default = params['default'] + else: + self.default = '' + + def check(self, value): + if isinstance(value, str) or isinstance(value, unicode): + if len(value) > self.maxlength: + raise ValueError, 'Length of value greater than maxlength' + else: + raise TypeError, 'Expecting String, got %s' % type(value) + + def from_string(self, str_value, obj): + return str_value + + def to_string(self, value): + self.check(value) + return value + +class PasswordChecker(StringChecker): + def check(self, value): + if isinstance(value, str) or isinstance(value, unicode) or isinstance(value, Password): + if len(value) > self.maxlength: + raise ValueError, 'Length of value greater than maxlength' + else: + raise TypeError, 'Expecting String, got %s' % type(value) + +class IntegerChecker(ValueChecker): + + __sizes__ = { 'small' : (65535, 32767, -32768, 5), + 'medium' : (4294967295, 2147483647, -2147483648, 10), + 'large' : (18446744073709551615, 9223372036854775807, -9223372036854775808, 20)} + + def __init__(self, **params): + self.size = params.get('size', 'medium') + if self.size not in self.__sizes__.keys(): + raise ValueError, 'size must be one of %s' % self.__sizes__.keys() + self.signed = params.get('signed', True) + self.default = params.get('default', 0) + self.format_string = '%%0%dd' % self.__sizes__[self.size][-1] + + def check(self, value): + if not isinstance(value, int) and not isinstance(value, long): + raise TypeError, 'Expecting int or long, got %s' % type(value) + if self.signed: + min = self.__sizes__[self.size][2] + max = self.__sizes__[self.size][1] + else: + min = 0 + max = self.__sizes__[self.size][0] + if value > max: + raise ValueError, 'Maximum value is %d' % max + if value < min: + raise ValueError, 'Minimum value is %d' % min + + def from_string(self, str_value, obj): + val = int(str_value) + if self.signed: + val = val + self.__sizes__[self.size][2] + return val + + def to_string(self, value): + self.check(value) + if self.signed: + value += -self.__sizes__[self.size][2] + return self.format_string % value + +class BooleanChecker(ValueChecker): + + def __init__(self, **params): + if params.has_key('default'): + self.default = params['default'] + else: + self.default = False + + def check(self, value): + if not isinstance(value, bool): + raise TypeError, 'Expecting bool, got %s' % type(value) + + def from_string(self, str_value, obj): + if str_value.lower() == 'true': + return True + else: + return False + + def to_string(self, value): + self.check(value) + if value == True: + return 'true' + else: + return 'false' + +class DateTimeChecker(ValueChecker): + + def __init__(self, **params): + if params.has_key('maxlength'): + self.maxlength = params['maxlength'] + else: + self.maxlength = 1024 + if params.has_key('default'): + self.default = params['default'] + else: + self.default = datetime.now() + + def check(self, value): + if not isinstance(value, datetime): + raise TypeError, 'Expecting datetime, got %s' % type(value) + + def from_string(self, str_value, obj): + try: + return datetime.strptime(str_value, ISO8601) + except: + raise ValueError, 'Unable to convert %s to DateTime' % str_value + + def to_string(self, value): + self.check(value) + return value.strftime(ISO8601) + +class ObjectChecker(ValueChecker): + + def __init__(self, **params): + self.default = None + self.ref_class = params.get('ref_class', None) + if self.ref_class == None: + raise SDBPersistenceError('ref_class parameter is required') + + def check(self, value): + if value == None: + return + if isinstance(value, str) or isinstance(value, unicode): + # ugly little hack - sometimes I want to just stick a UUID string + # in here rather than instantiate an object. + # This does a bit of hand waving to "type check" the string + t = value.split('-') + if len(t) != 5: + raise ValueError + else: + try: + obj_lineage = value.get_lineage() + cls_lineage = self.ref_class.get_lineage() + if obj_lineage.startswith(cls_lineage): + return + raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage) + except: + raise ValueError, '%s is not an SDBObject' % value + + def from_string(self, str_value, obj): + if not str_value: + return None + try: + return revive_object_from_id(str_value, obj._manager) + except: + raise ValueError, 'Unable to convert %s to Object' % str_value + + def to_string(self, value): + self.check(value) + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + return value.id + +class S3KeyChecker(ValueChecker): + + def __init__(self, **params): + self.default = None + + def check(self, value): + if value == None: + return + if isinstance(value, str) or isinstance(value, unicode): + try: + bucket_name, key_name = value.split('/', 1) + except: + raise ValueError + elif not isinstance(value, Key): + raise TypeError, 'Expecting Key, got %s' % type(value) + + def from_string(self, str_value, obj): + if not str_value: + return None + if str_value == 'None': + return None + try: + bucket_name, key_name = str_value.split('/', 1) + if obj: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(bucket_name) + key = bucket.get_key(key_name) + if not key: + key = bucket.new_key(key_name) + return key + except: + raise ValueError, 'Unable to convert %s to S3Key' % str_value + + def to_string(self, value): + self.check(value) + if isinstance(value, str) or isinstance(value, unicode): + return value + if value == None: + return '' + else: + return '%s/%s' % (value.bucket.name, value.name) + +class S3BucketChecker(ValueChecker): + + def __init__(self, **params): + self.default = None + + def check(self, value): + if value == None: + return + if isinstance(value, str) or isinstance(value, unicode): + return + elif not isinstance(value, Bucket): + raise TypeError, 'Expecting Bucket, got %s' % type(value) + + def from_string(self, str_value, obj): + if not str_value: + return None + if str_value == 'None': + return None + try: + if obj: + s3 = obj._manager.get_s3_connection() + bucket = s3.get_bucket(str_value) + return bucket + except: + raise ValueError, 'Unable to convert %s to S3Bucket' % str_value + + def to_string(self, value): + self.check(value) + if value == None: + return '' + else: + return '%s' % value.name + diff --git a/vendor/boto/boto/sdb/persist/object.py b/vendor/boto/boto/sdb/persist/object.py new file mode 100644 index 000000000000..993df1ee836b --- /dev/null +++ b/vendor/boto/boto/sdb/persist/object.py @@ -0,0 +1,207 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBPersistenceError +from boto.sdb.persist import get_manager, object_lister +from boto.sdb.persist.property import Property, ScalarProperty +import uuid + +class SDBBase(type): + "Metaclass for all SDBObjects" + def __init__(cls, name, bases, dict): + super(SDBBase, cls).__init__(name, bases, dict) + # Make sure this is a subclass of SDBObject - mainly copied from django ModelBase (thanks!) + try: + if filter(lambda b: issubclass(b, SDBObject), bases): + # look for all of the Properties and set their names + for key in dict.keys(): + if isinstance(dict[key], Property): + property = dict[key] + property.set_name(key) + prop_names = [] + props = cls.properties() + for prop in props: + prop_names.append(prop.name) + setattr(cls, '_prop_names', prop_names) + except NameError: + # 'SDBObject' isn't defined yet, meaning we're looking at our own + # SDBObject class, defined below. + pass + +class SDBObject(object): + __metaclass__ = SDBBase + + _manager = get_manager() + + @classmethod + def get_lineage(cls): + l = [c.__name__ for c in cls.mro()] + l.reverse() + return '.'.join(l) + + @classmethod + def get(cls, id=None, **params): + if params.has_key('manager'): + manager = params['manager'] + else: + manager = cls._manager + if manager.domain and id: + a = cls._manager.domain.get_attributes(id, '__type__') + if a.has_key('__type__'): + return cls(id, manager) + else: + raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id)) + else: + rs = cls.find(**params) + try: + obj = rs.next() + except StopIteration: + raise SDBPersistenceError('%s object matching query does not exist' % cls.__name__) + try: + rs.next() + except StopIteration: + return obj + raise SDBPersistenceError('Query matched more than 1 item') + + @classmethod + def find(cls, **params): + if params.has_key('manager'): + manager = params['manager'] + del params['manager'] + else: + manager = cls._manager + keys = params.keys() + if len(keys) > 4: + raise SDBPersistenceError('Too many fields, max is 4') + parts = ["['__type__'='%s'] union ['__lineage__'starts-with'%s']" % (cls.__name__, cls.get_lineage())] + properties = cls.properties() + for key in keys: + found = False + for property in properties: + if property.name == key: + found = True + if isinstance(property, ScalarProperty): + checker = property.checker + parts.append("['%s' = '%s']" % (key, checker.to_string(params[key]))) + else: + raise SDBPersistenceError('%s is not a searchable field' % key) + if not found: + raise SDBPersistenceError('%s is not a valid field' % key) + query = ' intersection '.join(parts) + if manager.domain: + rs = manager.domain.query(query) + else: + rs = [] + return object_lister(None, rs, manager) + + @classmethod + def list(cls, max_items=None, manager=None): + if not manager: + manager = cls._manager + if manager.domain: + rs = manager.domain.query("['__type__' = '%s']" % cls.__name__, max_items=max_items) + else: + rs = [] + return object_lister(cls, rs, manager) + + @classmethod + def properties(cls): + properties = [] + while cls: + for key in cls.__dict__.keys(): + if isinstance(cls.__dict__[key], Property): + properties.append(cls.__dict__[key]) + if len(cls.__bases__) > 0: + cls = cls.__bases__[0] + else: + cls = None + return properties + + # for backwards compatibility + find_properties = properties + + def __init__(self, id=None, manager=None): + if manager: + self._manager = manager + self.id = id + if self.id: + self._auto_update = True + if self._manager.domain: + attrs = self._manager.domain.get_attributes(self.id, '__type__') + if len(attrs.keys()) == 0: + raise SDBPersistenceError('Object %s: not found' % self.id) + else: + self.id = str(uuid.uuid4()) + self._auto_update = False + + def __setattr__(self, name, value): + if name in self._prop_names: + object.__setattr__(self, name, value) + elif name.startswith('_'): + object.__setattr__(self, name, value) + elif name == 'id': + object.__setattr__(self, name, value) + else: + self._persist_attribute(name, value) + object.__setattr__(self, name, value) + + def __getattr__(self, name): + if not name.startswith('_'): + a = self._manager.domain.get_attributes(self.id, name) + if a.has_key(name): + object.__setattr__(self, name, a[name]) + return a[name] + raise AttributeError + + def __repr__(self): + return '%s<%s>' % (self.__class__.__name__, self.id) + + def _persist_attribute(self, name, value): + if self.id: + self._manager.domain.put_attributes(self.id, {name : value}, replace=True) + + def _get_sdb_item(self): + return self._manager.domain.get_item(self.id) + + def save(self): + attrs = {'__type__' : self.__class__.__name__, + '__module__' : self.__class__.__module__, + '__lineage__' : self.get_lineage()} + for property in self.properties(): + attrs[property.name] = property.to_string(self) + if self._manager.domain: + self._manager.domain.put_attributes(self.id, attrs, replace=True) + self._auto_update = True + + def delete(self): + if self._manager.domain: + self._manager.domain.delete_attributes(self.id) + + def get_related_objects(self, ref_name, ref_cls=None): + if self._manager.domain: + query = "['%s' = '%s']" % (ref_name, self.id) + if ref_cls: + query += " intersection ['__type__'='%s']" % ref_cls.__name__ + rs = self._manager.domain.query(query) + else: + rs = [] + return object_lister(ref_cls, rs, self._manager) + diff --git a/vendor/boto/boto/sdb/persist/property.py b/vendor/boto/boto/sdb/persist/property.py new file mode 100644 index 000000000000..4776d35d6d73 --- /dev/null +++ b/vendor/boto/boto/sdb/persist/property.py @@ -0,0 +1,371 @@ +# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.exception import SDBPersistenceError +from boto.sdb.persist.checker import StringChecker, PasswordChecker, IntegerChecker, BooleanChecker +from boto.sdb.persist.checker import DateTimeChecker, ObjectChecker, S3KeyChecker, S3BucketChecker +from boto.utils import Password + +class Property(object): + + def __init__(self, checker_class, **params): + self.name = '' + self.checker = checker_class(**params) + self.slot_name = '__' + + def set_name(self, name): + self.name = name + self.slot_name = '__' + self.name + +class ScalarProperty(Property): + + def save(self, obj): + domain = obj._manager.domain + domain.put_attributes(obj.id, {self.name : self.to_string(obj)}, replace=True) + + def to_string(self, obj): + return self.checker.to_string(getattr(obj, self.name)) + + def load(self, obj): + domain = obj._manager.domain + a = domain.get_attributes(obj.id, self.name) + # try to get the attribute value from SDB + if self.name in a: + value = self.checker.from_string(a[self.name], obj) + setattr(obj, self.slot_name, value) + # if it's not there, set the value to the default value + else: + self.__set__(obj, self.checker.default) + + def __get__(self, obj, objtype): + if obj: + try: + value = getattr(obj, self.slot_name) + except AttributeError: + if obj._auto_update: + self.load(obj) + value = getattr(obj, self.slot_name) + else: + value = self.checker.default + setattr(obj, self.slot_name, self.checker.default) + return value + + def __set__(self, obj, value): + self.checker.check(value) + try: + old_value = getattr(obj, self.slot_name) + except: + old_value = self.checker.default + setattr(obj, self.slot_name, value) + if obj._auto_update: + try: + self.save(obj) + except: + setattr(obj, self.slot_name, old_value) + raise + +class StringProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, StringChecker, **params) + +class PasswordProperty(ScalarProperty): + """ + Hashed password + """ + + def __init__(self, **params): + ScalarProperty.__init__(self, PasswordChecker, **params) + + def __set__(self, obj, value): + p = Password() + p.set(value) + ScalarProperty.__set__(self, obj, p) + + def __get__(self, obj, objtype): + return Password(ScalarProperty.__get__(self, obj, objtype)) + +class SmallPositiveIntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'small' + params['signed'] = False + ScalarProperty.__init__(self, IntegerChecker, **params) + +class SmallIntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'small' + params['signed'] = True + ScalarProperty.__init__(self, IntegerChecker, **params) + +class PositiveIntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'medium' + params['signed'] = False + ScalarProperty.__init__(self, IntegerChecker, **params) + +class IntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'medium' + params['signed'] = True + ScalarProperty.__init__(self, IntegerChecker, **params) + +class LargePositiveIntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'large' + params['signed'] = False + ScalarProperty.__init__(self, IntegerChecker, **params) + +class LargeIntegerProperty(ScalarProperty): + + def __init__(self, **params): + params['size'] = 'large' + params['signed'] = True + ScalarProperty.__init__(self, IntegerChecker, **params) + +class BooleanProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, BooleanChecker, **params) + +class DateTimeProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, DateTimeChecker, **params) + +class ObjectProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, ObjectChecker, **params) + +class S3KeyProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, S3KeyChecker, **params) + + def __set__(self, obj, value): + self.checker.check(value) + try: + old_value = getattr(obj, self.slot_name) + except: + old_value = self.checker.default + if isinstance(value, str): + value = self.checker.from_string(value, obj) + setattr(obj, self.slot_name, value) + if obj._auto_update: + try: + self.save(obj) + except: + setattr(obj, self.slot_name, old_value) + raise + +class S3BucketProperty(ScalarProperty): + + def __init__(self, **params): + ScalarProperty.__init__(self, S3BucketChecker, **params) + + def __set__(self, obj, value): + self.checker.check(value) + try: + old_value = getattr(obj, self.slot_name) + except: + old_value = self.checker.default + if isinstance(value, str): + value = self.checker.from_string(value, obj) + setattr(obj, self.slot_name, value) + if obj._auto_update: + try: + self.save(obj) + except: + setattr(obj, self.slot_name, old_value) + raise + +class MultiValueProperty(Property): + + def __init__(self, checker_class, **params): + Property.__init__(self, checker_class, **params) + + def __get__(self, obj, objtype): + if obj: + try: + value = getattr(obj, self.slot_name) + except AttributeError: + if obj._auto_update: + self.load(obj) + value = getattr(obj, self.slot_name) + else: + value = MultiValue(self, obj, []) + setattr(obj, self.slot_name, value) + return value + + def load(self, obj): + if obj != None: + _list = [] + domain = obj._manager.domain + a = domain.get_attributes(obj.id, self.name) + if self.name in a: + lst = a[self.name] + if not isinstance(lst, list): + lst = [lst] + for value in lst: + value = self.checker.from_string(value, obj) + _list.append(value) + setattr(obj, self.slot_name, MultiValue(self, obj, _list)) + + def __set__(self, obj, value): + if not isinstance(value, list): + raise SDBPersistenceError('Value must be a list') + setattr(obj, self.slot_name, MultiValue(self, obj, value)) + str_list = self.to_string(obj) + domain = obj._manager.domain + if obj._auto_update: + if len(str_list) == 1: + domain.put_attributes(obj.id, {self.name : str_list[0]}, replace=True) + else: + try: + self.__delete__(obj) + except: + pass + domain.put_attributes(obj.id, {self.name : str_list}, replace=True) + setattr(obj, self.slot_name, MultiValue(self, obj, value)) + + def __delete__(self, obj): + if obj._auto_update: + domain = obj._manager.domain + domain.delete_attributes(obj.id, [self.name]) + setattr(obj, self.slot_name, MultiValue(self, obj, [])) + + def to_string(self, obj): + str_list = [] + for value in self.__get__(obj, type(obj)): + str_list.append(self.checker.to_string(value)) + return str_list + +class StringListProperty(MultiValueProperty): + + def __init__(self, **params): + MultiValueProperty.__init__(self, StringChecker, **params) + +class SmallIntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'small' + params['signed'] = True + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class SmallPositiveIntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'small' + params['signed'] = False + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class IntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'medium' + params['signed'] = True + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class PositiveIntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'medium' + params['signed'] = False + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class LargeIntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'large' + params['signed'] = True + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class LargePositiveIntegerListProperty(MultiValueProperty): + + def __init__(self, **params): + params['size'] = 'large' + params['signed'] = False + MultiValueProperty.__init__(self, IntegerChecker, **params) + +class BooleanListProperty(MultiValueProperty): + + def __init__(self, **params): + MultiValueProperty.__init__(self, BooleanChecker, **params) + +class ObjectListProperty(MultiValueProperty): + + def __init__(self, **params): + MultiValueProperty.__init__(self, ObjectChecker, **params) + +class HasManyProperty(Property): + + def set_name(self, name): + self.name = name + self.slot_name = '__' + self.name + + def __get__(self, obj, objtype): + return self + + +class MultiValue: + """ + Special Multi Value for boto persistence layer to allow us to do + obj.list.append(foo) + """ + def __init__(self, property, obj, _list): + self.checker = property.checker + self.name = property.name + self.object = obj + self._list = _list + + def __repr__(self): + return repr(self._list) + + def __getitem__(self, key): + return self._list.__getitem__(key) + + def __delitem__(self, key): + item = self[key] + self._list.__delitem__(key) + domain = self.object._manager.domain + domain.delete_attributes(self.object.id, {self.name: [self.checker.to_string(item)]}) + + def __len__(self): + return len(self._list) + + def append(self, value): + self.checker.check(value) + self._list.append(value) + domain = self.object._manager.domain + domain.put_attributes(self.object.id, {self.name: self.checker.to_string(value)}, replace=False) + + def index(self, value): + for x in self._list: + if x.id == value.id: + return self._list.index(x) + + def remove(self, value): + del(self[self.index(value)]) diff --git a/vendor/boto/boto/sdb/persist/test_persist.py b/vendor/boto/boto/sdb/persist/test_persist.py new file mode 100644 index 000000000000..080935d312c1 --- /dev/null +++ b/vendor/boto/boto/sdb/persist/test_persist.py @@ -0,0 +1,141 @@ +from boto.sdb.persist.object import SDBObject +from boto.sdb.persist.property import StringProperty, PositiveIntegerProperty, IntegerProperty +from boto.sdb.persist.property import BooleanProperty, DateTimeProperty, S3KeyProperty +from boto.sdb.persist.property import ObjectProperty, StringListProperty +from boto.sdb.persist.property import PositiveIntegerListProperty, BooleanListProperty, ObjectListProperty +from boto.sdb.persist import Manager +from datetime import datetime +import time + +# +# This will eventually be moved to the boto.tests module and become a real unit test +# but for now it will live here. It shows examples of each of the Property types in +# use and tests the basic operations. +# +class TestScalar(SDBObject): + + name = StringProperty() + description = StringProperty() + size = PositiveIntegerProperty() + offset = IntegerProperty() + foo = BooleanProperty() + date = DateTimeProperty() + file = S3KeyProperty() + +class TestRef(SDBObject): + + name = StringProperty() + ref = ObjectProperty(ref_class=TestScalar) + +class TestSubClass1(TestRef): + + answer = PositiveIntegerProperty() + +class TestSubClass2(TestScalar): + + flag = BooleanProperty() + +class TestList(SDBObject): + + names = StringListProperty() + numbers = PositiveIntegerListProperty() + bools = BooleanListProperty() + objects = ObjectListProperty(ref_class=TestScalar) + +def test1(): + s = TestScalar() + s.name = 'foo' + s.description = 'This is foo' + s.size = 42 + s.offset = -100 + s.foo = True + s.date = datetime.now() + s.save() + return s + +def test2(ref_name): + s = TestRef() + s.name = 'testref' + rs = TestScalar.find(name=ref_name) + s.ref = rs.next() + s.save() + return s + +def test3(): + s = TestScalar() + s.name = 'bar' + s.description = 'This is bar' + s.size = 24 + s.foo = False + s.date = datetime.now() + s.save() + return s + +def test4(ref1, ref2): + s = TestList() + s.names.append(ref1.name) + s.names.append(ref2.name) + s.numbers.append(ref1.size) + s.numbers.append(ref2.size) + s.bools.append(ref1.foo) + s.bools.append(ref2.foo) + s.objects.append(ref1) + s.objects.append(ref2) + s.save() + return s + +def test5(ref): + s = TestSubClass1() + s.answer = 42 + s.ref = ref + s.save() + # test out free form attribute + s.fiddlefaddle = 'this is fiddlefaddle' + s._fiddlefaddle = 'this is not fiddlefaddle' + return s + +def test6(): + s = TestSubClass2() + s.name = 'fie' + s.description = 'This is fie' + s.size = 4200 + s.offset = -820 + s.foo = False + s.date = datetime.now() + s.flag = True + s.save() + return s + +def test(domain_name): + print 'Initialize the Persistance system' + Manager.DefaultDomainName = domain_name + print 'Call test1' + s1 = test1() + # now create a new instance and read the saved data from SDB + print 'Now sleep to wait for things to converge' + time.sleep(5) + print 'Now lookup the object and compare the fields' + s2 = TestScalar(s1.id) + assert s1.name == s2.name + assert s1.description == s2.description + assert s1.size == s2.size + assert s1.offset == s2.offset + assert s1.foo == s2.foo + #assert s1.date == s2.date + print 'Call test2' + s2 = test2(s1.name) + print 'Call test3' + s3 = test3() + print 'Call test4' + s4 = test4(s1, s3) + print 'Call test5' + s6 = test6() + s5 = test5(s6) + domain = s5._manager.domain + item1 = domain.get_item(s1.id) + item2 = domain.get_item(s2.id) + item3 = domain.get_item(s3.id) + item4 = domain.get_item(s4.id) + item5 = domain.get_item(s5.id) + item6 = domain.get_item(s6.id) + return [(s1, item1), (s2, item2), (s3, item3), (s4, item4), (s5, item5), (s6, item6)] diff --git a/vendor/boto/boto/sdb/queryresultset.py b/vendor/boto/boto/sdb/queryresultset.py new file mode 100644 index 000000000000..10bafd1c92c9 --- /dev/null +++ b/vendor/boto/boto/sdb/queryresultset.py @@ -0,0 +1,92 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +def query_lister(domain, query='', max_items=None, attr_names=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.query_with_attributes(domain, query, attr_names, + next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token != None + +class QueryResultSet: + + def __init__(self, domain=None, query='', max_items=None, attr_names=None): + self.max_items = max_items + self.domain = domain + self.query = query + self.attr_names = attr_names + + def __iter__(self): + return query_lister(self.domain, self.query, self.max_items, self.attr_names) + +def select_lister(domain, query='', max_items=None): + more_results = True + num_results = 0 + next_token = None + while more_results: + rs = domain.connection.select(domain, query, next_token=next_token) + for item in rs: + if max_items: + if num_results == max_items: + raise StopIteration + yield item + num_results += 1 + next_token = rs.next_token + more_results = next_token != None + +class SelectResultSet(object): + + def __init__(self, domain=None, query='', max_items=None, + next_token=None, consistent_read=False): + self.domain = domain + self.query = query + self.consistent_read = consistent_read + self.max_items = max_items + self.next_token = next_token + + def __iter__(self): + more_results = True + num_results = 0 + while more_results: + rs = self.domain.connection.select(self.domain, self.query, + next_token=self.next_token, + consistent_read=self.consistent_read) + for item in rs: + if self.max_items and num_results >= self.max_items: + raise StopIteration + yield item + num_results += 1 + self.next_token = rs.next_token + if self.max_items and num_results >= self.max_items: + raise StopIteration + more_results = self.next_token != None + + def next(self): + return self.__iter__().next() diff --git a/vendor/boto/boto/sdb/regioninfo.py b/vendor/boto/boto/sdb/regioninfo.py new file mode 100644 index 000000000000..bff9dea61dc5 --- /dev/null +++ b/vendor/boto/boto/sdb/regioninfo.py @@ -0,0 +1,40 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.regioninfo import RegionInfo + +class SDBRegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an SDBConnection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the SDBConnection + object's constructor as keyword arguments and they will be + passed along to the SDBConnection object. + + :rtype: :class:`boto.sdb.connection.SDBConnection` + :return: The connection to this regions endpoint + """ + from boto.sdb.connection import SDBConnection + return SDBConnection(region=self, **kw_params) + diff --git a/vendor/boto/boto/services/__init__.py b/vendor/boto/boto/services/__init__.py new file mode 100644 index 000000000000..449bd162a8ea --- /dev/null +++ b/vendor/boto/boto/services/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/services/bs.py b/vendor/boto/boto/services/bs.py new file mode 100755 index 000000000000..3d700315db44 --- /dev/null +++ b/vendor/boto/boto/services/bs.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +from optparse import OptionParser +from boto.services.servicedef import ServiceDef +from boto.services.submit import Submitter +from boto.services.result import ResultProcessor +import boto +import sys, os, StringIO + +class BS(object): + + Usage = "usage: %prog [options] config_file command" + + Commands = {'reset' : 'Clear input queue and output bucket', + 'submit' : 'Submit local files to the service', + 'start' : 'Start the service', + 'status' : 'Report on the status of the service buckets and queues', + 'retrieve' : 'Retrieve output generated by a batch', + 'batches' : 'List all batches stored in current output_domain'} + + def __init__(self): + self.service_name = None + self.parser = OptionParser(usage=self.Usage) + self.parser.add_option("--help-commands", action="store_true", dest="help_commands", + help="provides help on the available commands") + self.parser.add_option("-a", "--access-key", action="store", type="string", + help="your AWS Access Key") + self.parser.add_option("-s", "--secret-key", action="store", type="string", + help="your AWS Secret Access Key") + self.parser.add_option("-p", "--path", action="store", type="string", dest="path", + help="the path to local directory for submit and retrieve") + self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair", + help="the SSH keypair used with launched instance(s)") + self.parser.add_option("-l", "--leave", action="store_true", dest="leave", + help="leave the files (don't retrieve) files during retrieve command") + self.parser.set_defaults(leave=False) + self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances", + help="the number of launched instance(s)") + self.parser.set_defaults(num_instances=1) + self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore", + help="directories that should be ignored by submit command") + self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch", + help="batch identifier required by the retrieve command") + + def print_command_help(self): + print '\nCommands:' + for key in self.Commands.keys(): + print ' %s\t\t%s' % (key, self.Commands[key]) + + def do_reset(self): + iq = self.sd.get_obj('input_queue') + if iq: + print 'clearing out input queue' + i = 0 + m = iq.read() + while m: + i += 1 + iq.delete_message(m) + m = iq.read() + print 'deleted %d messages' % i + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + print 'delete generated files in output bucket' + i = 0 + for k in ob: + i += 1 + k.delete() + print 'deleted %d keys' % i + + def do_submit(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + s = Submitter(self.sd) + t = s.submit_path(self.options.path, None, self.options.ignore, None, + None, True, self.options.path) + print 'A total of %d files were submitted' % t[1] + print 'Batch Identifier: %s' % t[0] + + def do_start(self): + ami_id = self.sd.get('ami_id') + instance_type = self.sd.get('instance_type', 'm1.small') + security_group = self.sd.get('security_group', 'default') + if not ami_id: + self.parser.error('ami_id option is required when starting the service') + ec2 = boto.connect_ec2() + if not self.sd.has_section('Credentials'): + self.sd.add_section('Credentials') + self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id) + self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key) + s = StringIO.StringIO() + self.sd.write(s) + rs = ec2.get_all_images([ami_id]) + img = rs[0] + r = img.run(user_data=s.getvalue(), key_name=self.options.keypair, + max_count=self.options.num_instances, + instance_type=instance_type, + security_groups=[security_group]) + print 'Starting AMI: %s' % ami_id + print 'Reservation %s contains the following instances:' % r.id + for i in r.instances: + print '\t%s' % i.id + + def do_status(self): + iq = self.sd.get_obj('input_queue') + if iq: + print 'The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count()) + ob = self.sd.get_obj('output_bucket') + ib = self.sd.get_obj('input_bucket') + if ob: + if ib and ob.name == ib.name: + return + total = 0 + for k in ob: + total += 1 + print 'The output_bucket (%s) contains %d keys' % (ob.name, total) + + def do_retrieve(self): + if not self.options.path: + self.parser.error('No path provided') + if not os.path.exists(self.options.path): + self.parser.error('Invalid path (%s)' % self.options.path) + if not self.options.batch: + self.parser.error('batch identifier is required for retrieve command') + s = ResultProcessor(self.options.batch, self.sd) + s.get_results(self.options.path, get_file=(not self.options.leave)) + + def do_batches(self): + d = self.sd.get_obj('output_domain') + if d: + print 'Available Batches:' + rs = d.query("['type'='Batch']") + for item in rs: + print ' %s' % item.name + else: + self.parser.error('No output_domain specified for service') + + def main(self): + self.options, self.args = self.parser.parse_args() + if self.options.help_commands: + self.print_command_help() + sys.exit(0) + if len(self.args) != 2: + self.parser.error("config_file and command are required") + self.config_file = self.args[0] + self.sd = ServiceDef(self.config_file) + self.command = self.args[1] + if hasattr(self, 'do_%s' % self.command): + method = getattr(self, 'do_%s' % self.command) + method() + else: + self.parser.error('command (%s) not recognized' % self.command) + +if __name__ == "__main__": + bs = BS() + bs.main() diff --git a/vendor/boto/boto/services/message.py b/vendor/boto/boto/services/message.py new file mode 100644 index 000000000000..79f6d19f6644 --- /dev/null +++ b/vendor/boto/boto/services/message.py @@ -0,0 +1,58 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sqs.message import MHMessage +from boto.utils import get_ts +from socket import gethostname +import os, mimetypes, time + +class ServiceMessage(MHMessage): + + def for_key(self, key, params=None, bucket_name=None): + if params: + self.update(params) + if key.path: + t = os.path.split(key.path) + self['OriginalLocation'] = t[0] + self['OriginalFileName'] = t[1] + mime_type = mimetypes.guess_type(t[1])[0] + if mime_type == None: + mime_type = 'application/octet-stream' + self['Content-Type'] = mime_type + s = os.stat(key.path) + t = time.gmtime(s[7]) + self['FileAccessedDate'] = get_ts(t) + t = time.gmtime(s[8]) + self['FileModifiedDate'] = get_ts(t) + t = time.gmtime(s[9]) + self['FileCreateDate'] = get_ts(t) + else: + self['OriginalFileName'] = key.name + self['OriginalLocation'] = key.bucket.name + self['ContentType'] = key.content_type + self['Host'] = gethostname() + if bucket_name: + self['Bucket'] = bucket_name + else: + self['Bucket'] = key.bucket.name + self['InputKey'] = key.name + self['Size'] = key.size + diff --git a/vendor/boto/boto/services/result.py b/vendor/boto/boto/services/result.py new file mode 100644 index 000000000000..f6c440719be2 --- /dev/null +++ b/vendor/boto/boto/services/result.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import os +from datetime import datetime, timedelta +from boto.utils import parse_ts +import boto + +class ResultProcessor: + + LogFileName = 'log.csv' + + def __init__(self, batch_name, sd, mimetype_files=None): + self.sd = sd + self.batch = batch_name + self.log_fp = None + self.num_files = 0 + self.total_time = 0 + self.min_time = timedelta.max + self.max_time = timedelta.min + self.earliest_time = datetime.max + self.latest_time = datetime.min + self.queue = self.sd.get_obj('output_queue') + self.domain = self.sd.get_obj('output_domain') + + def calculate_stats(self, msg): + start_time = parse_ts(msg['Service-Read']) + end_time = parse_ts(msg['Service-Write']) + elapsed_time = end_time - start_time + if elapsed_time > self.max_time: + self.max_time = elapsed_time + if elapsed_time < self.min_time: + self.min_time = elapsed_time + self.total_time += elapsed_time.seconds + if start_time < self.earliest_time: + self.earliest_time = start_time + if end_time > self.latest_time: + self.latest_time = end_time + + def log_message(self, msg, path): + keys = msg.keys() + keys.sort() + if not self.log_fp: + self.log_fp = open(os.path.join(path, self.LogFileName), 'w') + line = ','.join(keys) + self.log_fp.write(line+'\n') + values = [] + for key in keys: + value = msg[key] + if value.find(',') > 0: + value = '"%s"' % value + values.append(value) + line = ','.join(values) + self.log_fp.write(line+'\n') + + def process_record(self, record, path, get_file=True): + self.log_message(record, path) + self.calculate_stats(record) + outputs = record['OutputKey'].split(',') + if record.has_key('OutputBucket'): + bucket = boto.lookup('s3', record['OutputBucket']) + else: + bucket = boto.lookup('s3', record['Bucket']) + for output in outputs: + if get_file: + key_name = output.split(';')[0] + key = bucket.lookup(key_name) + file_name = os.path.join(path, key_name) + print 'retrieving file: %s to %s' % (key_name, file_name) + key.get_contents_to_filename(file_name) + self.num_files += 1 + + def get_results_from_queue(self, path, get_file=True, delete_msg=True): + m = self.queue.read() + while m: + if m.has_key('Batch') and m['Batch'] == self.batch: + self.process_record(m, path, get_file) + if delete_msg: + self.queue.delete_message(m) + m = self.queue.read() + + def get_results_from_domain(self, path, get_file=True): + rs = self.domain.query("['Batch'='%s']" % self.batch) + for item in rs: + self.process_record(item, path, get_file) + + def get_results_from_bucket(self, path): + bucket = self.sd.get_obj('output_bucket') + if bucket: + print 'No output queue or domain, just retrieving files from output_bucket' + for key in bucket: + file_name = os.path.join(path, key) + print 'retrieving file: %s to %s' % (key, file_name) + key.get_contents_to_filename(file_name) + self.num_files + 1 + + def get_results(self, path, get_file=True, delete_msg=True): + if not os.path.isdir(path): + os.mkdir(path) + if self.queue: + self.get_results_from_queue(path, get_file) + elif self.domain: + self.get_results_from_domain(path, get_file) + else: + self.get_results_from_bucket(path) + if self.log_fp: + self.log_fp.close() + print '%d results successfully retrieved.' % self.num_files + if self.num_files > 0: + self.avg_time = float(self.total_time)/self.num_files + print 'Minimum Processing Time: %d' % self.min_time.seconds + print 'Maximum Processing Time: %d' % self.max_time.seconds + print 'Average Processing Time: %f' % self.avg_time + self.elapsed_time = self.latest_time-self.earliest_time + print 'Elapsed Time: %d' % self.elapsed_time.seconds + tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files) + print 'Throughput: %f transactions / minute' % tput + diff --git a/vendor/boto/boto/services/service.py b/vendor/boto/boto/services/service.py new file mode 100644 index 000000000000..8ee1a8beed26 --- /dev/null +++ b/vendor/boto/boto/services/service.py @@ -0,0 +1,161 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.message import ServiceMessage +from boto.services.servicedef import ServiceDef +from boto.pyami.scriptbase import ScriptBase +from boto.utils import get_ts +import time +import os +import mimetypes + + +class Service(ScriptBase): + + # Time required to process a transaction + ProcessingTime = 60 + + def __init__(self, config_file=None, mimetype_files=None): + ScriptBase.__init__(self, config_file) + self.name = self.__class__.__name__ + self.working_dir = boto.config.get('Pyami', 'working_dir') + self.sd = ServiceDef(config_file) + self.retry_count = self.sd.getint('retry_count', 5) + self.loop_delay = self.sd.getint('loop_delay', 30) + self.processing_time = self.sd.getint('processing_time', 60) + self.input_queue = self.sd.get_obj('input_queue') + self.output_queue = self.sd.get_obj('output_queue') + self.output_domain = self.sd.get_obj('output_domain') + if mimetype_files: + mimetypes.init(mimetype_files) + + def split_key(key): + if key.find(';') < 0: + t = (key, '') + else: + key, type = key.split(';') + label, mtype = type.split('=') + t = (key, mtype) + return t + + def read_message(self): + boto.log.info('read_message') + message = self.input_queue.read(self.processing_time) + if message: + boto.log.info(message.get_body()) + key = 'Service-Read' + message[key] = get_ts() + return message + + # retrieve the source file from S3 + def get_file(self, message): + bucket_name = message['Bucket'] + key_name = message['InputKey'] + file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file')) + boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.get_contents_to_filename(os.path.join(self.working_dir, file_name)) + return file_name + + # process source file, return list of output files + def process_file(self, in_file_name, msg): + return [] + + # store result file in S3 + def put_file(self, bucket_name, file_path, key_name=None): + boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name)) + bucket = boto.lookup('s3', bucket_name) + key = bucket.new_key(key_name) + key.set_contents_from_filename(file_path) + return key + + def save_results(self, results, input_message, output_message): + output_keys = [] + for file, type in results: + if input_message.has_key('OutputBucket'): + output_bucket = input_message['OutputBucket'] + else: + output_bucket = input_message['Bucket'] + key_name = os.path.split(file)[1] + key = self.put_file(output_bucket, file, key_name) + output_keys.append('%s;type=%s' % (key.name, type)) + output_message['OutputKey'] = ','.join(output_keys) + + # write message to each output queue + def write_message(self, message): + message['Service-Write'] = get_ts() + message['Server'] = self.name + if os.environ.has_key('HOSTNAME'): + message['Host'] = os.environ['HOSTNAME'] + else: + message['Host'] = 'unknown' + message['Instance-ID'] = self.instance_id + if self.output_queue: + boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id) + self.output_queue.write(message) + if self.output_domain: + boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name) + item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']]) + self.output_domain.put_attributes(item_name, message) + + # delete message from input queue + def delete_message(self, message): + boto.log.info('deleting message from %s' % self.input_queue.id) + self.input_queue.delete_message(message) + + # to clean up any files, etc. after each iteration + def cleanup(self): + pass + + def shutdown(self): + on_completion = self.sd.get('on_completion', 'shutdown') + if on_completion == 'shutdown': + if self.instance_id: + time.sleep(60) + c = boto.connect_ec2() + c.terminate_instances([self.instance_id]) + + def main(self, notify=False): + self.notify('Service: %s Starting' % self.name) + empty_reads = 0 + while self.retry_count < 0 or empty_reads < self.retry_count: + try: + input_message = self.read_message() + if input_message: + empty_reads = 0 + output_message = ServiceMessage(None, input_message.get_body()) + input_file = self.get_file(input_message) + results = self.process_file(input_file, output_message) + self.save_results(results, input_message, output_message) + self.write_message(output_message) + self.delete_message(input_message) + self.cleanup() + else: + empty_reads += 1 + time.sleep(self.loop_delay) + except Exception: + boto.log.exception('Service Failed') + empty_reads += 1 + self.notify('Service: %s Shutting Down' % self.name) + self.shutdown() + diff --git a/vendor/boto/boto/services/servicedef.py b/vendor/boto/boto/services/servicedef.py new file mode 100644 index 000000000000..1cb01aa75480 --- /dev/null +++ b/vendor/boto/boto/services/servicedef.py @@ -0,0 +1,91 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.pyami.config import Config +from boto.services.message import ServiceMessage +import boto + +class ServiceDef(Config): + + def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None): + Config.__init__(self, config_file) + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + script = Config.get(self, 'Pyami', 'scripts') + if script: + self.name = script.split('.')[-1] + else: + self.name = None + + + def get(self, name, default=None): + return Config.get(self, self.name, name, default) + + def has_option(self, option): + return Config.has_option(self, self.name, option) + + def getint(self, option, default=0): + try: + val = Config.get(self, self.name, option) + val = int(val) + except: + val = int(default) + return val + + def getbool(self, option, default=False): + try: + val = Config.get(self, self.name, option) + if val.lower() == 'true': + val = True + else: + val = False + except: + val = default + return val + + def get_obj(self, name): + """ + Returns the AWS object associated with a given option. + + The heuristics used are a bit lame. If the option name contains + the word 'bucket' it is assumed to be an S3 bucket, if the name + contains the word 'queue' it is assumed to be an SQS queue and + if it contains the word 'domain' it is assumed to be a SimpleDB + domain. If the option name specified does not exist in the + config file or if the AWS object cannot be retrieved this + returns None. + """ + val = self.get(name) + if not val: + return None + if name.find('queue') >= 0: + obj = boto.lookup('sqs', val) + if obj: + obj.set_message_class(ServiceMessage) + elif name.find('bucket') >= 0: + obj = boto.lookup('s3', val) + elif name.find('domain') >= 0: + obj = boto.lookup('sdb', val) + else: + obj = None + return obj + + diff --git a/vendor/boto/boto/services/sonofmmm.cfg b/vendor/boto/boto/services/sonofmmm.cfg new file mode 100644 index 000000000000..d70d3794d5d4 --- /dev/null +++ b/vendor/boto/boto/services/sonofmmm.cfg @@ -0,0 +1,43 @@ +# +# Your AWS Credentials +# You only need to supply these in this file if you are not using +# the boto tools to start your service +# +#[Credentials] +#aws_access_key_id = +#aws_secret_access_key = + +# +# Fill out this section if you want emails from the service +# when it starts and stops +# +#[Notification] +#smtp_host = +#smtp_user = +#smtp_pass = +#smtp_from = +#smtp_to = + +[Pyami] +scripts = boto.services.sonofmmm.SonOfMMM + +[SonOfMMM] +# id of the AMI to be launched +ami_id = ami-dc799cb5 +# number of times service will read an empty queue before exiting +# a negative value will cause the service to run forever +retry_count = 5 +# seconds to wait after empty queue read before reading again +loop_delay = 10 +# average time it takes to process a transaction +# controls invisibility timeout of messages +processing_time = 60 +ffmpeg_args = -y -i %%s -f mov -r 29.97 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -ar 48000 -ab 19200 -s 320x240 -vcodec mpeg4 -acodec libfaac %%s +output_mimetype = video/quicktime +output_ext = .mov +input_bucket = +output_bucket = +output_domain = +output_queue = +input_queue = + diff --git a/vendor/boto/boto/services/sonofmmm.py b/vendor/boto/boto/services/sonofmmm.py new file mode 100644 index 000000000000..acb7e610673e --- /dev/null +++ b/vendor/boto/boto/services/sonofmmm.py @@ -0,0 +1,81 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import boto +from boto.services.service import Service +from boto.services.message import ServiceMessage +import os +import mimetypes + +class SonOfMMM(Service): + + def __init__(self, config_file=None): + Service.__init__(self, config_file) + self.log_file = '%s.log' % self.instance_id + self.log_path = os.path.join(self.working_dir, self.log_file) + boto.set_file_logger(self.name, self.log_path) + if self.sd.has_option('ffmpeg_args'): + self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args') + else: + self.command = '/usr/local/bin/ffmpeg -y -i %s %s' + self.output_mimetype = self.sd.get('output_mimetype') + if self.sd.has_option('output_ext'): + self.output_ext = self.sd.get('output_ext') + else: + self.output_ext = mimetypes.guess_extension(self.output_mimetype) + self.output_bucket = self.sd.get_obj('output_bucket') + self.input_bucket = self.sd.get_obj('input_bucket') + # check to see if there are any messages queue + # if not, create messages for all files in input_bucket + m = self.input_queue.read(1) + if not m: + self.queue_files() + + def queue_files(self): + boto.log.info('Queueing files from %s' % self.input_bucket.name) + for key in self.input_bucket: + boto.log.info('Queueing %s' % key.name) + m = ServiceMessage() + if self.output_bucket: + d = {'OutputBucket' : self.output_bucket.name} + else: + d = None + m.for_key(key, d) + self.input_queue.write(m) + + def process_file(self, in_file_name, msg): + base, ext = os.path.splitext(in_file_name) + out_file_name = os.path.join(self.working_dir, + base+self.output_ext) + command = self.command % (in_file_name, out_file_name) + boto.log.info('running:\n%s' % command) + status = self.run(command) + if status == 0: + return [(out_file_name, self.output_mimetype)] + else: + return [] + + def shutdown(self): + if os.path.isfile(self.log_path): + if self.output_bucket: + key = self.output_bucket.new_key(self.log_file) + key.set_contents_from_filename(self.log_path) + Service.shutdown(self) diff --git a/vendor/boto/boto/services/submit.py b/vendor/boto/boto/services/submit.py new file mode 100644 index 000000000000..89c439c525fe --- /dev/null +++ b/vendor/boto/boto/services/submit.py @@ -0,0 +1,88 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +import time +import os + + +class Submitter: + + def __init__(self, sd): + self.sd = sd + self.input_bucket = self.sd.get_obj('input_bucket') + self.output_bucket = self.sd.get_obj('output_bucket') + self.output_domain = self.sd.get_obj('output_domain') + self.queue = self.sd.get_obj('input_queue') + + def get_key_name(self, fullpath, prefix): + key_name = fullpath[len(prefix):] + l = key_name.split(os.sep) + return '/'.join(l) + + def write_message(self, key, metadata): + if self.queue: + m = self.queue.new_message() + m.for_key(key, metadata) + if self.output_bucket: + m['OutputBucket'] = self.output_bucket.name + self.queue.write(m) + + def submit_file(self, path, metadata=None, cb=None, num_cb=0, prefix='/'): + if not metadata: + metadata = {} + key_name = self.get_key_name(path, prefix) + k = self.input_bucket.new_key(key_name) + k.update_metadata(metadata) + k.set_contents_from_filename(path, replace=False, cb=cb, num_cb=num_cb) + self.write_message(k, metadata) + + def submit_path(self, path, tags=None, ignore_dirs=None, cb=None, num_cb=0, status=False, prefix='/'): + path = os.path.expanduser(path) + path = os.path.expandvars(path) + path = os.path.abspath(path) + total = 0 + metadata = {} + if tags: + metadata['Tags'] = tags + l = [] + for t in time.gmtime(): + l.append(str(t)) + metadata['Batch'] = '_'.join(l) + if self.output_domain: + self.output_domain.put_attributes(metadata['Batch'], {'type' : 'Batch'}) + if os.path.isdir(path): + for root, dirs, files in os.walk(path): + if ignore_dirs: + for ignore in ignore_dirs: + if ignore in dirs: + dirs.remove(ignore) + for file in files: + fullpath = os.path.join(root, file) + if status: + print 'Submitting %s' % fullpath + self.submit_file(fullpath, metadata, cb, num_cb, prefix) + total += 1 + elif os.path.isfile(path): + self.submit_file(path, metadata, cb, num_cb) + total += 1 + else: + print 'problem with %s' % path + return (metadata['Batch'], total) diff --git a/vendor/boto/boto/sns/__init__.py b/vendor/boto/boto/sns/__init__.py new file mode 100644 index 000000000000..6075fe6f760d --- /dev/null +++ b/vendor/boto/boto/sns/__init__.py @@ -0,0 +1,353 @@ +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.sdb.regioninfo import SDBRegionInfo +import boto +try: + import json +except ImportError: + import simplejson as json + +#boto.set_stream_logger('sns') + +class SNSConnection(AWSQueryConnection): + + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com' + APIVersion = '2010-03-31' + SignatureVersion = '2' + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/', converter=None): + if not region: + region = SDBRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, https_connection_factory, path) + + def get_all_topics(self, next_token=None): + """ + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'ContentType' : 'JSON'} + if next_token: + params['NextToken'] = next_token + response = self.make_request('ListTopics', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_topic_attributes(self, topic): + """ + Get attributes of a Topic + + :type topic: string + :param topic: The ARN of the topic. + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic} + response = self.make_request('GetTopicAttributes', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def add_permission(self, topic, label, account_ids, actions): + """ + Adds a statement to a topic's access control policy, granting + access for the specified AWS accounts to the specified actions. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the new policy statement. + + :type account_ids: list of strings + :param account_ids: The AWS account ids of the users who will be + give access to the specified actions. + + :type actions: list of strings + :param actions: The actions you want to allow for each of the + specified principal(s). + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic, + 'Label' : label} + self.build_list_params(params, account_ids, 'AWSAccountId') + self.build_list_params(params, actions, 'ActionName') + response = self.make_request('AddPermission', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def remove_permission(self, topic, label): + """ + Removes a statement from a topic's access control policy. + + :type topic: string + :param topic: The ARN of the topic. + + :type label: string + :param label: A unique identifier for the policy statement + to be removed. + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic, + 'Label' : label} + response = self.make_request('RemovePermission', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def create_topic(self, topic): + """ + Create a new Topic. + + :type topic: string + :param topic: The name of the new topic. + + """ + params = {'ContentType' : 'JSON', + 'Name' : topic} + response = self.make_request('CreateTopic', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def delete_topic(self, topic): + """ + Delete an existing topic + + :type topic: string + :param topic: The ARN of the topic + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic} + response = self.make_request('DeleteTopic', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + + + def publish(self, topic, message, subject=None): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type message: string + :param message: The message you want to send to the topic. + Messages must be UTF-8 encoded strings and + be at most 4KB in size. + + :type subject: string + :param subject: Optional parameter to be used as the "Subject" + line of the email notifications. + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic, + 'Message' : message} + if subject: + params['Subject'] = subject + response = self.make_request('Publish', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def subscribe(self, topic, protocol, endpoint): + """ + Subscribe to a Topic. + + :type topic: string + :param topic: The name of the new topic. + + :type protocol: string + :param protocol: The protocol used to communicate with + the subscriber. Current choices are: + email|email-json|http|https|sqs + + :type endpoint: string + :param endpoint: The location of the endpoint for + the subscriber. + * For email, this would be a valid email address + * For email-json, this would be a valid email address + * For http, this would be a URL beginning with http + * For https, this would be a URL beginning with https + * For sqs, this would be the ARN of an SQS Queue + + :rtype: :class:`boto.sdb.domain.Domain` object + :return: The newly created domain + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic, + 'Protocol' : protocol, + 'Endpoint' : endpoint} + response = self.make_request('Subscribe', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def confirm_subscription(self, topic, token, + authenticate_on_unsubscribe=False): + """ + Get properties of a Topic + + :type topic: string + :param topic: The ARN of the new topic. + + :type token: string + :param token: Short-lived token sent to and endpoint during + the Subscribe operation. + + :type authenticate_on_unsubscribe: bool + :param authenticate_on_unsubscribe: Optional parameter indicating + that you wish to disable + unauthenticated unsubscription + of the subscription. + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic, + 'Token' : token} + if authenticate_on_unsubscribe: + params['AuthenticateOnUnsubscribe'] = 'true' + response = self.make_request('ConfirmSubscription', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def unsubscribe(self, subscription): + """ + Allows endpoint owner to delete subscription. + Confirmation message will be delivered. + + :type subscription: string + :param subscription: The ARN of the subscription to be deleted. + + """ + params = {'ContentType' : 'JSON', + 'SubscriptionArn' : subscription} + response = self.make_request('Unsubscribe', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_all_subscriptions(self, next_token=None): + """ + Get list of all subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'ContentType' : 'JSON'} + if next_token: + params['NextToken'] = next_token + response = self.make_request('ListSubscriptions', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + + def get_all_subscriptions_by_topic(self, topic, next_token=None): + """ + Get list of all subscriptions to a specific topic. + + :type topic: string + :param topic: The ARN of the topic for which you wish to + find subscriptions. + + :type next_token: string + :param next_token: Token returned by the previous call to + this method. + + """ + params = {'ContentType' : 'JSON', + 'TopicArn' : topic} + if next_token: + params['NextToken'] = next_token + response = self.make_request('ListSubscriptions', params, '/', 'GET') + body = response.read() + if response.status == 200: + return json.loads(body) + else: + boto.log.error('%s %s' % (response.status, response.reason)) + boto.log.error('%s' % body) + raise self.ResponseError(response.status, response.reason, body) + diff --git a/vendor/boto/boto/sqs/__init__.py b/vendor/boto/boto/sqs/__init__.py new file mode 100644 index 000000000000..281719133837 --- /dev/null +++ b/vendor/boto/boto/sqs/__init__.py @@ -0,0 +1,42 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +import boto + +from regioninfo import SQSRegionInfo + +def regions(): + """ + Get all available regions for the SQS service. + + :rtype: list + :return: A list of :class:`boto.ec2.regioninfo.RegionInfo` + """ + return [SQSRegionInfo(name='us-east-1', endpoint='queue.amazonaws.com'), + SQSRegionInfo(name='eu-west-1', endpoint='eu-west-1.queue.amazonaws.com'), + SQSRegionInfo(name='us-west-1', endpoint='us-west-1.queue.amazonaws.com')] + +def connect_to_region(region_name): + for region in regions(): + if region.name == region_name: + return region.connect() + return None diff --git a/vendor/boto/boto/sqs/attributes.py b/vendor/boto/boto/sqs/attributes.py new file mode 100644 index 000000000000..26c720416ffd --- /dev/null +++ b/vendor/boto/boto/sqs/attributes.py @@ -0,0 +1,46 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Attribute Name/Value set +""" + +class Attributes(dict): + + def __init__(self, parent): + self.parent = parent + self.current_key = None + self.current_value = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'Attribute': + self[self.current_key] = self.current_value + elif name == 'Name': + self.current_key = value + elif name == 'Value': + self.current_value = value + else: + setattr(self, name, value) + + diff --git a/vendor/boto/boto/sqs/connection.py b/vendor/boto/boto/sqs/connection.py new file mode 100644 index 000000000000..848679e30b6c --- /dev/null +++ b/vendor/boto/boto/sqs/connection.py @@ -0,0 +1,286 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.connection import AWSQueryConnection +from boto.sqs.regioninfo import SQSRegionInfo +from boto.sqs.queue import Queue +from boto.sqs.message import Message +from boto.sqs.attributes import Attributes +from boto.exception import SQSError + + +class SQSConnection(AWSQueryConnection): + """ + A Connection to the SQS Service. + """ + DefaultRegionName = 'us-east-1' + DefaultRegionEndpoint = 'queue.amazonaws.com' + APIVersion = '2009-02-01' + SignatureVersion = '2' + DefaultContentType = 'text/plain' + ResponseError = SQSError + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, + is_secure=True, port=None, proxy=None, proxy_port=None, + proxy_user=None, proxy_pass=None, debug=0, + https_connection_factory=None, region=None, path='/'): + if not region: + region = SQSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) + self.region = region + AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key, + is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, + self.region.endpoint, debug, https_connection_factory, path) + + def create_queue(self, queue_name, visibility_timeout=None): + """ + Create an SQS Queue. + + :type queue_name: str or unicode + :param queue_name: The name of the new queue. Names are scoped to an account and need to + be unique within that account. Calling this method on an existing + queue name will not return an error from SQS unless the value for + visibility_timeout is different than the value of the existing queue + of that name. This is still an expensive operation, though, and not + the preferred way to check for the existence of a queue. See the + :func:`boto.sqs.connection.SQSConnection.lookup` method. + + :type visibility_timeout: int + :param visibility_timeout: The default visibility timeout for all messages written in the + queue. This can be overridden on a per-message. + + :rtype: :class:`boto.sqs.queue.Queue` + :return: The newly created queue. + + """ + params = {'QueueName': queue_name} + if visibility_timeout: + params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,) + return self.get_object('CreateQueue', params, Queue) + + def delete_queue(self, queue, force_deletion=False): + """ + Delete an SQS Queue. + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type force_deletion: Boolean + :param force_deletion: Normally, SQS will not delete a queue that contains messages. + However, if the force_deletion argument is True, the + queue will be deleted regardless of whether there are messages in + the queue or not. USE WITH CAUTION. This will delete all + messages in the queue as well. + + :rtype: bool + :return: True if the command succeeded, False otherwise + """ + return self.get_status('DeleteQueue', None, queue.id) + + def get_queue_attributes(self, queue, attribute='All'): + """ + Gets one or all attributes of a Queue + + :type queue: A Queue object + :param queue: The SQS queue to be deleted + + :type attribute: str + :type attribute: The specific attribute requested. If not supplied, the default + is to return all attributes. Valid attributes are: + ApproximateNumberOfMessages, + ApproximateNumberOfMessagesNotVisible, + VisibilityTimeout, + CreatedTimestamp, + LastModifiedTimestamp, + Policy + + :rtype: :class:`boto.sqs.attributes.Attributes` + :return: An Attributes object containing request value(s). + """ + params = {'AttributeName' : attribute} + return self.get_object('GetQueueAttributes', params, Attributes, queue.id) + + def set_queue_attribute(self, queue, attribute, value): + params = {'Attribute.Name' : attribute, 'Attribute.Value' : value} + return self.get_status('SetQueueAttributes', params, queue.id) + + def receive_message(self, queue, number_messages=1, visibility_timeout=None, + attributes=None): + """ + Read messages from an SQS Queue. + + :type queue: A Queue object + :param queue: The Queue from which messages are read. + + :type number_messages: int + :param number_messages: The maximum number of messages to read (default=1) + + :type visibility_timeout: int + :param visibility_timeout: The number of seconds the message should remain invisible + to other queue readers (default=None which uses the Queues default) + + :type attributes: str + :param attributes: The name of additional attribute to return with response + or All if you want all attributes. The default is to + return no additional attributes. Valid values: + All + SenderId + SentTimestamp + ApproximateReceiveCount + ApproximateFirstReceiveTimestamp + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + """ + params = {'MaxNumberOfMessages' : number_messages} + if visibility_timeout: + params['VisibilityTimeout'] = visibility_timeout + if attributes: + self.build_list_params(params, attributes, 'AttributeName') + return self.get_list('ReceiveMessage', params, [('Message', queue.message_class)], + queue.id, queue) + + def delete_message(self, queue, message): + """ + Delete a message from a queue. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type message: A :class:`boto.sqs.message.Message` object + :param message: The Message to be deleted + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : message.receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def delete_message_from_handle(self, queue, receipt_handle): + """ + Delete a message from a queue, given a receipt handle. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param receipt_handle: The receipt handle for the message + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'ReceiptHandle' : receipt_handle} + return self.get_status('DeleteMessage', params, queue.id) + + def send_message(self, queue, message_content): + params = {'MessageBody' : message_content} + return self.get_object('SendMessage', params, Message, queue.id, verb='POST') + + def change_message_visibility(self, queue, receipt_handle, visibility_timeout): + """ + Extends the read lock timeout for the specified message from the specified queue + to the specified value. + + :type queue: A :class:`boto.sqs.queue.Queue` object + :param queue: The Queue from which messages are read. + + :type receipt_handle: str + :param queue: The receipt handle associated with the message whose + visibility timeout will be changed. + + :type visibility_timeout: int + :param visibility_timeout: The new value of the message's visibility timeout + in seconds. + """ + params = {'ReceiptHandle' : receipt_handle, + 'VisibilityTimeout' : visibility_timeout} + return self.get_status('ChangeMessageVisibility', params, queue.id) + + def get_all_queues(self, prefix=''): + params = {} + if prefix: + params['QueueNamePrefix'] = prefix + return self.get_list('ListQueues', params, [('QueueUrl', Queue)]) + + def get_queue(self, queue_name): + rs = self.get_all_queues(queue_name) + for q in rs: + if q.url.endswith(queue_name): + return q + return None + + lookup = get_queue + + # + # Permissions methods + # + + def add_permission(self, queue, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal who will be given + permission. The principal must have an AWS account, but + does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + \*|SendMessage|ReceiveMessage|DeleteMessage| + ChangeMessageVisibility|GetQueueAttributes + + :rtype: bool + :return: True if successful, False otherwise. + + """ + params = {'Label': label, + 'AWSAccountId' : aws_account_id, + 'ActionName' : action_name} + return self.get_status('AddPermission', params, queue.id) + + def remove_permission(self, queue, label): + """ + Remove a permission from a queue. + + :type queue: :class:`boto.sqs.queue.Queue` + :param queue: The queue object + + :type label: str or unicode + :param label: The unique label associated with the permission being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + params = {'Label': label} + return self.get_status('RemovePermission', params, queue.id) + + + + + diff --git a/vendor/boto/boto/sqs/jsonmessage.py b/vendor/boto/boto/sqs/jsonmessage.py new file mode 100644 index 000000000000..ab05a60172f7 --- /dev/null +++ b/vendor/boto/boto/sqs/jsonmessage.py @@ -0,0 +1,42 @@ +# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.sqs.message import MHMessage +from boto.exception import SQSDecodeError +import base64 +import simplejson + +class JSONMessage(MHMessage): + """ + Acts like a dictionary but encodes it's data as a Base64 encoded JSON payload. + """ + + def decode(self, value): + try: + value = base64.b64decode(value) + value = simplejson.loads(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return value + + def encode(self, value): + value = simplejson.dumps(value) + return base64.b64encode(value) diff --git a/vendor/boto/boto/sqs/message.py b/vendor/boto/boto/sqs/message.py new file mode 100644 index 000000000000..da1ba68e6f6a --- /dev/null +++ b/vendor/boto/boto/sqs/message.py @@ -0,0 +1,251 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +SQS Message + +A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS +Message are here: + + http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html + +So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes +back out. However, to allow messages to have richer semantics, the Message class must support the +following interfaces: + +The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a +boto Queue object and represents the queue that the message will be stored in. The default value for +this parameter is None. + +The constructor for the Message class must accept a keyword parameter "body" which represents the +content or body of the message. The format of this parameter will depend on the behavior of the +particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the +user the body passed to the constructor should be a dict-like object that can be used to populate +the initial state of the message. + +The Message class must provide an encode method that accepts a value of the same type as the body +parameter of the constructor and returns a string of characters that are able to be stored in an +SQS message body (see rules above). + +The Message class must provide a decode method that accepts a string of characters that can be +stored (and probably were stored!) in an SQS message and return an object of a type that is consistent +with the "body" parameter accepted on the class constructor. + +The Message class must provide a __len__ method that will return the size of the encoded message +that would be stored in SQS based on the current state of the Message object. + +The Message class must provide a get_body method that will return the body of the message in the +same format accepted in the constructor of the class. + +The Message class must provide a set_body method that accepts a message body in the same format +accepted by the constructor of the class. This method should alter to the internal state of the +Message object to reflect the state represented in the message body parameter. + +The Message class must provide a get_body_encoded method that returns the current body of the message +in the format in which it would be stored in SQS. +""" + +import base64 +import StringIO +from boto.sqs.attributes import Attributes +from boto.exception import SQSDecodeError + +class RawMessage: + """ + Base class for SQS messages. RawMessage does not encode the message + in any way. Whatever you store in the body of the message is what + will be written to SQS and whatever is returned from SQS is stored + directly into the body of the message. + """ + + def __init__(self, queue=None, body=''): + self.queue = queue + self.set_body(body) + self.id = None + self.receipt_handle = None + self.md5 = None + self.attributes = Attributes(self) + + def __len__(self): + return len(self.encode(self._body)) + + def startElement(self, name, attrs, connection): + if name == 'Attribute': + return self.attributes + return None + + def endElement(self, name, value, connection): + if name == 'Body': + self.set_body(self.decode(value)) + elif name == 'MessageId': + self.id = value + elif name == 'ReceiptHandle': + self.receipt_handle = value + elif name == 'MD5OfMessageBody': + self.md5 = value + else: + setattr(self, name, value) + + def encode(self, value): + """Transform body object into serialized byte array format.""" + return value + + def decode(self, value): + """Transform seralized byte array into any object.""" + return value + + def set_body(self, body): + """Override the current body for this object, using decoded format.""" + self._body = body + + def get_body(self): + return self._body + + def get_body_encoded(self): + """ + This method is really a semi-private method used by the Queue.write + method when writing the contents of the message to SQS. + You probably shouldn't need to call this method in the normal course of events. + """ + return self.encode(self.get_body()) + + def delete(self): + if self.queue: + return self.queue.delete_message(self) + + def change_visibility(self, visibility_timeout): + if self.queue: + self.queue.connection.change_message_visibility(self.queue, + self.receipt_handle, + visibility_timeout) + +class Message(RawMessage): + """ + The default Message class used for SQS queues. This class automatically + encodes/decodes the message body using Base64 encoding to avoid any + illegal characters in the message body. See: + + http://developer.amazonwebservices.com/connect/thread.jspa?messageID=49680%EC%88%90 + + for details on why this is a good idea. The encode/decode is meant to + be transparent to the end-user. + """ + + def encode(self, value): + return base64.b64encode(value) + + def decode(self, value): + try: + value = base64.b64decode(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return value + +class MHMessage(Message): + """ + The MHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + The encoding/decoding of this is handled automatically and after + the message body has been read, the message instance can be treated + like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def __init__(self, queue=None, body=None, xml_attrs=None): + if body == None or body == '': + body = {} + Message.__init__(self, queue, body) + + def decode(self, value): + try: + msg = {} + fp = StringIO.StringIO(value) + line = fp.readline() + while line: + delim = line.find(':') + key = line[0:delim] + value = line[delim+1:].strip() + msg[key.strip()] = value.strip() + line = fp.readline() + except: + raise SQSDecodeError('Unable to decode message', self) + return msg + + def encode(self, value): + s = '' + for item in value.items(): + s = s + '%s: %s\n' % (item[0], item[1]) + return s + + def __getitem__(self, key): + if self._body.has_key(key): + return self._body[key] + else: + raise KeyError(key) + + def __setitem__(self, key, value): + self._body[key] = value + self.set_body(self._body) + + def keys(self): + return self._body.keys() + + def values(self): + return self._body.values() + + def items(self): + return self._body.items() + + def has_key(self, key): + return self._body.has_key(key) + + def update(self, d): + self._body.update(d) + self.set_body(self._body) + + def get(self, key, default=None): + return self._body.get(key, default) + +class EncodedMHMessage(MHMessage): + """ + The EncodedMHMessage class provides a message that provides RFC821-like + headers like this: + + HeaderName: HeaderValue + + This variation encodes/decodes the body of the message in base64 automatically. + The message instance can be treated like a mapping object, + i.e. m['HeaderName'] would return 'HeaderValue'. + """ + + def decode(self, value): + try: + value = base64.b64decode(value) + except: + raise SQSDecodeError('Unable to decode message', self) + return MHMessage.decode(self, value) + + def encode(self, value): + value = MHMessage.encode(value) + return base64.b64encode(self, value) + diff --git a/vendor/boto/boto/sqs/queue.py b/vendor/boto/boto/sqs/queue.py new file mode 100644 index 000000000000..91b319ba1458 --- /dev/null +++ b/vendor/boto/boto/sqs/queue.py @@ -0,0 +1,414 @@ +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents an SQS Queue +""" + +import urlparse +from boto.sqs.message import Message + + +class Queue: + + def __init__(self, connection=None, url=None, message_class=Message): + self.connection = connection + self.url = url + self.message_class = message_class + self.visibility_timeout = None + + def _id(self): + if self.url: + val = urlparse.urlparse(self.url)[2] + else: + val = self.url + return val + id = property(_id) + + def _name(self): + if self.url: + val = urlparse.urlparse(self.url)[2].split('/')[2] + else: + val = self.url + return val + name = property(_name) + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'QueueUrl': + self.url = value + elif name == 'VisibilityTimeout': + self.visibility_timeout = int(value) + else: + setattr(self, name, value) + + def set_message_class(self, message_class): + """ + Set the message class that should be used when instantiating messages read + from the queue. By default, the class boto.sqs.message.Message is used but + this can be overriden with any class that behaves like a message. + + :type message_class: Message-like class + :param message_class: The new Message class + """ + self.message_class = message_class + + def get_attributes(self, attributes='All'): + """ + Retrieves attributes about this queue object and returns + them in an Attribute instance (subclass of a Dictionary). + + :type attributes: string + :param attributes: String containing one of: + ApproximateNumberOfMessages, + ApproximateNumberOfMessagesNotVisible, + VisibilityTimeout, + CreatedTimestamp, + LastModifiedTimestamp, + Policy + :rtype: Attribute object + :return: An Attribute object which is a mapping type holding the + requested name/value pairs + """ + return self.connection.get_queue_attributes(self, attributes) + + def set_attribute(self, attribute, value): + """ + Set a new value for an attribute of the Queue. + + :type attribute: String + :param attribute: The name of the attribute you want to set. The + only valid value at this time is: VisibilityTimeout + :type value: int + :param value: The new value for the attribute. + For VisibilityTimeout the value must be an + integer number of seconds from 0 to 86400. + + :rtype: bool + :return: True if successful, otherwise False. + """ + return self.connection.set_queue_attribute(self, attribute, value) + + def get_timeout(self): + """ + Get the visibility timeout for the queue. + + :rtype: int + :return: The number of seconds as an integer. + """ + a = self.get_attributes('VisibilityTimeout') + return int(a['VisibilityTimeout']) + + def set_timeout(self, visibility_timeout): + """ + Set the visibility timeout for the queue. + + :type visibility_timeout: int + :param visibility_timeout: The desired timeout in seconds + """ + retval = self.set_attribute('VisibilityTimeout', visibility_timeout) + if retval: + self.visibility_timeout = visibility_timeout + return retval + + def add_permission(self, label, aws_account_id, action_name): + """ + Add a permission to a queue. + + :type label: str or unicode + :param label: A unique identification of the permission you are setting. + Maximum of 80 characters ``[0-9a-zA-Z_-]`` + Example, AliceSendMessage + + :type aws_account_id: str or unicode + :param principal_id: The AWS account number of the principal who will be given + permission. The principal must have an AWS account, but + does not need to be signed up for Amazon SQS. For information + about locating the AWS account identification. + + :type action_name: str or unicode + :param action_name: The action. Valid choices are: + \*|SendMessage|ReceiveMessage|DeleteMessage| + ChangeMessageVisibility|GetQueueAttributes + + :rtype: bool + :return: True if successful, False otherwise. + + """ + return self.connection.add_permission(self, label, aws_account_id, action_name) + + def remove_permission(self, label): + """ + Remove a permission from a queue. + + :type label: str or unicode + :param label: The unique label associated with the permission being removed. + + :rtype: bool + :return: True if successful, False otherwise. + """ + return self.connection.remove_permission(self, label) + + def read(self, visibility_timeout=None): + """ + Read a single message from the queue. + + :type visibility_timeout: int + :param visibility_timeout: The timeout for this message in seconds + + :rtype: :class:`boto.sqs.message.Message` + :return: A single message or None if queue is empty + """ + rs = self.get_messages(1, visibility_timeout) + if len(rs) == 1: + return rs[0] + else: + return None + + def write(self, message): + """ + Add a single message to the queue. + + :type message: Message + :param message: The message to be written to the queue + + :rtype: :class:`boto.sqs.message.Message` + :return: The :class:`boto.sqs.message.Message` object that was written. + """ + new_msg = self.connection.send_message(self, message.get_body_encoded()) + message.id = new_msg.id + message.md5 = new_msg.md5 + return message + + def new_message(self, body=''): + """ + Create new message of appropriate class. + + :type body: message body + :param body: The body of the newly created message (optional). + + :rtype: :class:`boto.sqs.message.Message` + :return: A new Message object + """ + m = self.message_class(self, body) + m.queue = self + return m + + # get a variable number of messages, returns a list of messages + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None): + """ + Get a variable number of messages. + + :type num_messages: int + :param num_messages: The maximum number of messages to read from the queue. + + :type visibility_timeout: int + :param visibility_timeout: The VisibilityTimeout for the messages read. + + :type attributes: str + :param attributes: The name of additional attribute to return with response + or All if you want all attributes. The default is to + return no additional attributes. Valid values: + All + SenderId + SentTimestamp + ApproximateReceiveCount + ApproximateFirstReceiveTimestamp + + :rtype: list + :return: A list of :class:`boto.sqs.message.Message` objects. + """ + return self.connection.receive_message(self, number_messages=num_messages, + visibility_timeout=visibility_timeout, + attributes=attributes) + + def delete_message(self, message): + """ + Delete a message from the queue. + + :type message: :class:`boto.sqs.message.Message` + :param message: The :class:`boto.sqs.message.Message` object to delete. + + :rtype: bool + :return: True if successful, False otherwise + """ + return self.connection.delete_message(self, message) + + def delete(self): + """ + Delete the queue. + """ + return self.connection.delete_queue(self) + + def clear(self, page_size=10, vtimeout=10): + """Utility function to remove all messages from a queue""" + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + self.delete_message(m) + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def count(self, page_size=10, vtimeout=10): + """ + Utility function to count the number of messages in a queue. + Note: This function now calls GetQueueAttributes to obtain + an 'approximate' count of the number of messages in a queue. + """ + a = self.get_attributes('ApproximateNumberOfMessages') + return int(a['ApproximateNumberOfMessages']) + + def count_slow(self, page_size=10, vtimeout=10): + """ + Deprecated. This is the old 'count' method that actually counts + the messages by reading them all. This gives an accurate count but + is very slow for queues with non-trivial number of messasges. + Instead, use get_attribute('ApproximateNumberOfMessages') to take + advantage of the new SQS capability. This is retained only for + the unit tests. + """ + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + n += 1 + l = self.get_messages(page_size, vtimeout) + return n + + def dump_(self, file_name, page_size=10, vtimeout=10, sep='\n'): + """Utility function to dump the messages in a queue to a file + NOTE: Page size must be < 10 else SQS errors""" + fp = open(file_name, 'wb') + n = 0 + l = self.get_messages(page_size, vtimeout) + while l: + for m in l: + fp.write(m.get_body()) + if sep: + fp.write(sep) + n += 1 + l = self.get_messages(page_size, vtimeout) + fp.close() + return n + + def save_to_file(self, fp, sep='\n'): + """ + Read all messages from the queue and persist them to file-like object. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + fp.write(m.get_body()) + if sep: + fp.write(sep) + self.delete_message(m) + m = self.read() + return n + + def save_to_filename(self, file_name, sep='\n'): + """ + Read all messages from the queue and persist them to local file. + Messages are written to the file and the 'sep' string is written + in between messages. Messages are deleted from the queue after + being written to the file. + Returns the number of messages saved. + """ + fp = open(file_name, 'wb') + n = self.save_to_file(fp, sep) + fp.close() + return n + + # for backwards compatibility + save = save_to_filename + + def save_to_s3(self, bucket): + """ + Read all messages from the queue and persist them to S3. + Messages are stored in the S3 bucket using a naming scheme of:: + + / + + Messages are deleted from the queue after being saved to S3. + Returns the number of messages saved. + """ + n = 0 + m = self.read() + while m: + n += 1 + key = bucket.new_key('%s/%s' % (self.id, m.id)) + key.set_contents_from_string(m.get_body()) + self.delete_message(m) + m = self.read() + return n + + def load_from_s3(self, bucket, prefix=None): + """ + Load messages previously saved to S3. + """ + n = 0 + if prefix: + prefix = '%s/' % prefix + else: + prefix = '%s/' % self.id[1:] + rs = bucket.list(prefix=prefix) + for key in rs: + n += 1 + m = self.new_message(key.get_contents_as_string()) + self.write(m) + return n + + def load_from_file(self, fp, sep='\n'): + """Utility function to load messages from a file-like object to a queue""" + n = 0 + body = '' + l = fp.readline() + while l: + if l == sep: + m = Message(self, body) + self.write(m) + n += 1 + print 'writing message %d' % n + body = '' + else: + body = body + l + l = fp.readline() + return n + + def load_from_filename(self, file_name, sep='\n'): + """Utility function to load messages from a local filename to a queue""" + fp = open(file_name, 'rb') + n = self.load_file_file(fp, sep) + fp.close() + return n + + # for backward compatibility + load = load_from_filename + diff --git a/vendor/boto/boto/sqs/regioninfo.py b/vendor/boto/boto/sqs/regioninfo.py new file mode 100644 index 000000000000..1d13a4005c30 --- /dev/null +++ b/vendor/boto/boto/sqs/regioninfo.py @@ -0,0 +1,40 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +from boto.ec2.regioninfo import RegionInfo + +class SQSRegionInfo(RegionInfo): + + def connect(self, **kw_params): + """ + Connect to this Region's endpoint. Returns an SQSConnection + object pointing to the endpoint associated with this region. + You may pass any of the arguments accepted by the SQSConnection + object's constructor as keyword arguments and they will be + passed along to the SQSConnection object. + + :rtype: :class:`boto.sqs.connection.SQSConnection` + :return: The connection to this regions endpoint + """ + from boto.sqs.connection import SQSConnection + return SQSConnection(region=self, **kw_params) + diff --git a/vendor/boto/boto/tests/__init__.py b/vendor/boto/boto/tests/__init__.py new file mode 100644 index 000000000000..449bd162a8ea --- /dev/null +++ b/vendor/boto/boto/tests/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + + diff --git a/vendor/boto/boto/tests/devpay_s3.py b/vendor/boto/boto/tests/devpay_s3.py new file mode 100644 index 000000000000..bb91125bf852 --- /dev/null +++ b/vendor/boto/boto/tests/devpay_s3.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3Connection +""" + +import time +import os +import urllib + +from boto.s3.connection import S3Connection +from boto.exception import S3PermissionsError + +# this test requires a devpay product and user token to run: + +AMAZON_USER_TOKEN = '{UserToken}...your token here...' +DEVPAY_HEADERS = { 'x-amz-security-token': AMAZON_USER_TOKEN } + +print '--- running S3Connection tests (DevPay) ---' +c = S3Connection() +# create a new, empty bucket +bucket_name = 'test-%d' % int(time.time()) +bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS) +# now try a get_bucket call and see if it's really there +bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS) +# test logging +logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS) +logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS) +bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS) +bucket.disable_logging(headers=DEVPAY_HEADERS) +c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS) +# create a new key and store it's content from a string +k = bucket.new_key() +k.name = 'foobar' +s1 = 'This is a test of file upload and download' +s2 = 'This is a second string to test file upload and download' +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +fp = open('foobar', 'wb') +# now get the contents from s3 to a local file +k.get_contents_to_file(fp, headers=DEVPAY_HEADERS) +fp.close() +fp = open('foobar') +# check to make sure content read from s3 is identical to original +assert s1 == fp.read(), 'corrupted file' +fp.close() +# test generated URLs +url = k.generate_url(3600, headers=DEVPAY_HEADERS) +file = urllib.urlopen(url) +assert s1 == file.read(), 'invalid URL %s' % url +url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS) +file = urllib.urlopen(url) +assert s1 == file.read(), 'invalid URL %s' % url +bucket.delete_key(k, headers=DEVPAY_HEADERS) +# test a few variations on get_all_keys - first load some data +# for the first one, let's override the content type +phony_mimetype = 'application/x-boto-test' +headers = {'Content-Type': phony_mimetype} +headers.update(DEVPAY_HEADERS) +k.name = 'foo/bar' +k.set_contents_from_string(s1, headers) +k.name = 'foo/bas' +k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS) +k.name = 'foo/bat' +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +k.name = 'fie/bar' +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +k.name = 'fie/bas' +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +k.name = 'fie/bat' +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +# try resetting the contents to another value +md5 = k.md5 +k.set_contents_from_string(s2, headers=DEVPAY_HEADERS) +assert k.md5 != md5 +os.unlink('foobar') +all = bucket.get_all_keys(headers=DEVPAY_HEADERS) +assert len(all) == 6 +rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS) +assert len(rs) == 3 +rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS) +assert len(rs) == 2 +rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS) +assert len(rs) == 5 +# test the lookup method +k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS) +assert isinstance(k, bucket.key_class) +assert k.content_type == phony_mimetype +k = bucket.lookup('notthere', headers=DEVPAY_HEADERS) +assert k == None +# try some metadata stuff +k = bucket.new_key() +k.name = 'has_metadata' +mdkey1 = 'meta1' +mdval1 = 'This is the first metadata value' +k.set_metadata(mdkey1, mdval1) +mdkey2 = 'meta2' +mdval2 = 'This is the second metadata value' +k.set_metadata(mdkey2, mdval2) +k.set_contents_from_string(s1, headers=DEVPAY_HEADERS) +k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS) +assert k.get_metadata(mdkey1) == mdval1 +assert k.get_metadata(mdkey2) == mdval2 +k = bucket.new_key() +k.name = 'has_metadata' +k.get_contents_as_string(headers=DEVPAY_HEADERS) +assert k.get_metadata(mdkey1) == mdval1 +assert k.get_metadata(mdkey2) == mdval2 +bucket.delete_key(k, headers=DEVPAY_HEADERS) +# test list and iterator +rs1 = bucket.list(headers=DEVPAY_HEADERS) +num_iter = 0 +for r in rs1: + num_iter = num_iter + 1 +rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) +num_keys = len(rs) +assert num_iter == num_keys +# try a key with a funny character +k = bucket.new_key() +k.name = 'testnewline\n' +k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS) +rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) +assert len(rs) == num_keys + 1 +bucket.delete_key(k, headers=DEVPAY_HEADERS) +rs = bucket.get_all_keys(headers=DEVPAY_HEADERS) +assert len(rs) == num_keys +# try some acl stuff +bucket.set_acl('public-read', headers=DEVPAY_HEADERS) +policy = bucket.get_acl(headers=DEVPAY_HEADERS) +assert len(policy.acl.grants) == 2 +bucket.set_acl('private', headers=DEVPAY_HEADERS) +policy = bucket.get_acl(headers=DEVPAY_HEADERS) +assert len(policy.acl.grants) == 1 +k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS) +k.set_acl('public-read', headers=DEVPAY_HEADERS) +policy = k.get_acl(headers=DEVPAY_HEADERS) +assert len(policy.acl.grants) == 2 +k.set_acl('private', headers=DEVPAY_HEADERS) +policy = k.get_acl(headers=DEVPAY_HEADERS) +assert len(policy.acl.grants) == 1 +# try the convenience methods for grants +# this doesn't work with devpay +#bucket.add_user_grant('FULL_CONTROL', +# 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67', +# headers=DEVPAY_HEADERS) +try: + bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS) +except S3PermissionsError: + pass +# now delete all keys in bucket +for k in all: + bucket.delete_key(k, headers=DEVPAY_HEADERS) +# now delete bucket + +c.delete_bucket(bucket, headers=DEVPAY_HEADERS) + +print '--- tests completed ---' diff --git a/vendor/boto/boto/tests/test.py b/vendor/boto/boto/tests/test.py new file mode 100755 index 000000000000..e3c3ce797764 --- /dev/null +++ b/vendor/boto/boto/tests/test.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +do the unit tests! +""" + +import sys, os, unittest +import getopt, sys +import boto + +from boto.tests.test_sqsconnection import SQSConnectionTest +from boto.tests.test_s3connection import S3ConnectionTest +from boto.tests.test_s3versioning import S3VersionTest +from boto.tests.test_ec2connection import EC2ConnectionTest +from boto.tests.test_sdbconnection import SDBConnectionTest + +def usage(): + print 'test.py [-t testsuite] [-v verbosity]' + print ' -t run specific testsuite (s3|sqs|ec2|sdb|all)' + print ' -v verbosity (0|1|2)' + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'ht:v:', + ['help', 'testsuite', 'verbosity']) + except: + usage() + sys.exit(2) + testsuite = 'all' + verbosity = 1 + for o, a in opts: + if o in ('-h', '--help'): + usage() + sys.exit() + if o in ('-t', '--testsuite'): + testsuite = a + if o in ('-v', '--verbosity'): + verbosity = int(a) + if len(args) != 0: + usage() + sys.exit() + suite = unittest.TestSuite() + if testsuite == 'all': + suite.addTest(unittest.makeSuite(SQSConnectionTest)) + suite.addTest(unittest.makeSuite(S3ConnectionTest)) + suite.addTest(unittest.makeSuite(EC2ConnectionTest)) + suite.addTest(unittest.makeSuite(SDBConnectionTest)) + elif testsuite == 's3': + suite.addTest(unittest.makeSuite(S3ConnectionTest)) + suite.addTest(unittest.makeSuite(S3VersionTest)) + elif testsuite == 's3ver': + suite.addTest(unittest.makeSuite(S3VersionTest)) + elif testsuite == 'sqs': + suite.addTest(unittest.makeSuite(SQSConnectionTest)) + elif testsuite == 'ec2': + suite.addTest(unittest.makeSuite(EC2ConnectionTest)) + elif testsuite == 'sdb': + suite.addTest(unittest.makeSuite(SDBConnectionTest)) + else: + usage() + sys.exit() + unittest.TextTestRunner(verbosity=verbosity).run(suite) + +if __name__ == "__main__": + main() diff --git a/vendor/boto/boto/tests/test_ec2connection.py b/vendor/boto/boto/tests/test_ec2connection.py new file mode 100644 index 000000000000..db6e2af78863 --- /dev/null +++ b/vendor/boto/boto/tests/test_ec2connection.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the EC2Connection +""" + +import unittest +import time +import os +from boto.ec2.connection import EC2Connection +import telnetlib +import socket + +class EC2ConnectionTest (unittest.TestCase): + + def test_1_basic(self): + # this is my user_id, if you want to run these tests you should + # replace this with yours or they won't work + user_id = '963068290131' + print '--- running EC2Connection tests ---' + c = EC2Connection() + # get list of private AMI's + rs = c.get_all_images(owners=[user_id]) + assert len(rs) > 0 + # now pick the first one + image = rs[0] + # temporarily make this image runnable by everyone + status = image.set_launch_permissions(group_names=['all']) + assert status + d = image.get_launch_permissions() + assert d.has_key('groups') + assert len(d['groups']) > 0 + # now remove that permission + status = image.remove_launch_permissions(group_names=['all']) + assert status + d = image.get_launch_permissions() + assert not d.has_key('groups') + + # create a new security group + group_name = 'test-%d' % int(time.time()) + group_desc = 'This is a security group created during unit testing' + group = c.create_security_group(group_name, group_desc) + # now get a listing of all security groups and look for our new one + rs = c.get_all_security_groups() + found = False + for g in rs: + if g.name == group_name: + found = True + assert found + # now pass arg to filter results to only our new group + rs = c.get_all_security_groups([group_name]) + assert len(rs) == 1 + group = rs[0] + # + # now delete the security group + status = c.delete_security_group(group_name) + # now make sure it's really gone + rs = c.get_all_security_groups() + found = False + for g in rs: + if g.name == group_name: + found = True + assert not found + # now create it again for use with the instance test + group = c.create_security_group(group_name, group_desc) + + # now try to launch apache image with our new security group + rs = c.get_all_images() + img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml' + for image in rs: + if image.location == img_loc: + break + reservation = image.run(security_groups=[group.name]) + instance = reservation.instances[0] + while instance.state != 'running': + print '\tinstance is %s' % instance.state + time.sleep(30) + instance.update() + # instance in now running, try to telnet to port 80 + t = telnetlib.Telnet() + try: + t.open(instance.dns_name, 80) + except socket.error: + pass + # now open up port 80 and try again, it should work + group.authorize('tcp', 80, 80, '0.0.0.0/0') + t.open(instance.dns_name, 80) + t.close() + # now revoke authorization and try again + group.revoke('tcp', 80, 80, '0.0.0.0/0') + try: + t.open(instance.dns_name, 80) + except socket.error: + pass + # now kill the instance and delete the security group + instance.stop() + # unfortunately, I can't delete the sg within this script + #sg.delete() + + # create a new key pair + key_name = 'test-%d' % int(time.time()) + status = c.create_key_pair(key_name) + assert status + # now get a listing of all key pairs and look for our new one + rs = c.get_all_key_pairs() + found = False + for k in rs: + if k.name == key_name: + found = True + assert found + # now pass arg to filter results to only our new key pair + rs = c.get_all_key_pairs([key_name]) + assert len(rs) == 1 + key_pair = rs[0] + # now delete the key pair + status = c.delete_key_pair(key_name) + # now make sure it's really gone + rs = c.get_all_key_pairs() + found = False + for k in rs: + if k.name == key_name: + found = True + assert not found + + # short test around Paid AMI capability + demo_paid_ami_id = 'ami-bd9d78d4' + demo_paid_ami_product_code = 'A79EC0DB' + l = c.get_all_images([demo_paid_ami_id]) + assert len(l) == 1 + assert len(l[0].product_codes) == 1 + assert l[0].product_codes[0] == demo_paid_ami_product_code + + print '--- tests completed ---' diff --git a/vendor/boto/boto/tests/test_s3connection.py b/vendor/boto/boto/tests/test_s3connection.py new file mode 100644 index 000000000000..a952d65d64ad --- /dev/null +++ b/vendor/boto/boto/tests/test_s3connection.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3Connection +""" + +import unittest +import time +import os +import urllib +from boto.s3.connection import S3Connection +from boto.exception import S3PermissionsError + +class S3ConnectionTest (unittest.TestCase): + + def test_1_basic(self): + print '--- running S3Connection tests ---' + c = S3Connection() + # create a new, empty bucket + bucket_name = 'test-%d' % int(time.time()) + bucket = c.create_bucket(bucket_name) + # now try a get_bucket call and see if it's really there + bucket = c.get_bucket(bucket_name) + # test logging + logging_bucket = c.create_bucket(bucket_name + '-log') + logging_bucket.set_as_logging_target() + bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name) + bucket.disable_logging() + c.delete_bucket(logging_bucket) + k = bucket.new_key() + k.name = 'foobar' + s1 = 'This is a test of file upload and download' + s2 = 'This is a second string to test file upload and download' + k.set_contents_from_string(s1) + fp = open('foobar', 'wb') + # now get the contents from s3 to a local file + k.get_contents_to_file(fp) + fp.close() + fp = open('foobar') + # check to make sure content read from s3 is identical to original + assert s1 == fp.read(), 'corrupted file' + fp.close() + # test generated URLs + url = k.generate_url(3600) + file = urllib.urlopen(url) + assert s1 == file.read(), 'invalid URL %s' % url + url = k.generate_url(3600, force_http=True) + file = urllib.urlopen(url) + assert s1 == file.read(), 'invalid URL %s' % url + bucket.delete_key(k) + # test a few variations on get_all_keys - first load some data + # for the first one, let's override the content type + phony_mimetype = 'application/x-boto-test' + headers = {'Content-Type': phony_mimetype} + k.name = 'foo/bar' + k.set_contents_from_string(s1, headers) + k.name = 'foo/bas' + k.set_contents_from_filename('foobar') + k.name = 'foo/bat' + k.set_contents_from_string(s1) + k.name = 'fie/bar' + k.set_contents_from_string(s1) + k.name = 'fie/bas' + k.set_contents_from_string(s1) + k.name = 'fie/bat' + k.set_contents_from_string(s1) + # try resetting the contents to another value + md5 = k.md5 + k.set_contents_from_string(s2) + assert k.md5 != md5 + os.unlink('foobar') + all = bucket.get_all_keys() + assert len(all) == 6 + rs = bucket.get_all_keys(prefix='foo') + assert len(rs) == 3 + rs = bucket.get_all_keys(prefix='', delimiter='/') + assert len(rs) == 2 + rs = bucket.get_all_keys(maxkeys=5) + assert len(rs) == 5 + # test the lookup method + k = bucket.lookup('foo/bar') + assert isinstance(k, bucket.key_class) + assert k.content_type == phony_mimetype + k = bucket.lookup('notthere') + assert k == None + # try some metadata stuff + k = bucket.new_key() + k.name = 'has_metadata' + mdkey1 = 'meta1' + mdval1 = 'This is the first metadata value' + k.set_metadata(mdkey1, mdval1) + mdkey2 = 'meta2' + mdval2 = 'This is the second metadata value' + k.set_metadata(mdkey2, mdval2) + # try a unicode metadata value + mdval3 = u'föö' + mdkey3 = 'meta3' + k.set_metadata(mdkey3, mdval3) + k.set_contents_from_string(s1) + k = bucket.lookup('has_metadata') + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + assert k.get_metadata(mdkey3) == mdval3 + k = bucket.new_key() + k.name = 'has_metadata' + k.get_contents_as_string() + assert k.get_metadata(mdkey1) == mdval1 + assert k.get_metadata(mdkey2) == mdval2 + assert k.get_metadata(mdkey3) == mdval3 + bucket.delete_key(k) + # test list and iterator + rs1 = bucket.list() + num_iter = 0 + for r in rs1: + num_iter = num_iter + 1 + rs = bucket.get_all_keys() + num_keys = len(rs) + assert num_iter == num_keys + # try a key with a funny character + k = bucket.new_key() + k.name = 'testnewline\n' + k.set_contents_from_string('This is a test') + rs = bucket.get_all_keys() + assert len(rs) == num_keys + 1 + bucket.delete_key(k) + rs = bucket.get_all_keys() + assert len(rs) == num_keys + # try some acl stuff + bucket.set_acl('public-read') + policy = bucket.get_acl() + assert len(policy.acl.grants) == 2 + bucket.set_acl('private') + policy = bucket.get_acl() + assert len(policy.acl.grants) == 1 + k = bucket.lookup('foo/bar') + k.set_acl('public-read') + policy = k.get_acl() + assert len(policy.acl.grants) == 2 + k.set_acl('private') + policy = k.get_acl() + assert len(policy.acl.grants) == 1 + # try the convenience methods for grants + bucket.add_user_grant('FULL_CONTROL', + 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67') + try: + bucket.add_email_grant('foobar', 'foo@bar.com') + except S3PermissionsError: + pass + # now delete all keys in bucket + for k in all: + bucket.delete_key(k) + # now delete bucket + c.delete_bucket(bucket) + print '--- tests completed ---' diff --git a/vendor/boto/boto/tests/test_s3versioning.py b/vendor/boto/boto/tests/test_s3versioning.py new file mode 100644 index 000000000000..02abedd305d3 --- /dev/null +++ b/vendor/boto/boto/tests/test_s3versioning.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the S3 Versioning and MfaDelete +""" + +import unittest +import time +import os +import urllib +from boto.s3.connection import S3Connection +from boto.exception import S3ResponseError +from boto.s3.deletemarker import DeleteMarker + +class S3VersionTest (unittest.TestCase): + + def test_1_versions(self): + print '--- running S3Version tests ---' + c = S3Connection() + # create a new, empty bucket + bucket_name = 'version-%d' % int(time.time()) + bucket = c.create_bucket(bucket_name) + + # now try a get_bucket call and see if it's really there + bucket = c.get_bucket(bucket_name) + + # enable versions + d = bucket.get_versioning_status() + assert not d.has_key('Versioning') + bucket.configure_versioning(versioning=True) + time.sleep(5) + d = bucket.get_versioning_status() + assert d['Versioning'] == 'Enabled' + + # create a new key in the versioned bucket + k = bucket.new_key() + k.name = 'foobar' + s1 = 'This is a test of s3 versioning' + s2 = 'This is the second test of s3 versioning' + k.set_contents_from_string(s1) + time.sleep(5) + + # remember the version id of this object + v1 = k.version_id + + # now get the contents from s3 + o1 = k.get_contents_as_string() + + # check to make sure content read from s3 is identical to original + assert o1 == s1 + + # now overwrite that same key with new data + k.set_contents_from_string(s2) + v2 = k.version_id + time.sleep(5) + + # now retrieve the contents as a string and compare + s3 = k.get_contents_as_string() + assert s3 == s2 + + # Now list all versions and compare to what we have + rs = bucket.get_all_versions() + assert rs[0].version_id == v2 + assert rs[1].version_id == v1 + + # Now do a regular list command and make sure only the new key shows up + rs = bucket.get_all_keys() + assert len(rs) == 1 + + # Now do regular delete + bucket.delete_key('foobar') + time.sleep(5) + + # Now list versions and make sure old versions are there + # plus the DeleteMarker + rs = bucket.get_all_versions() + assert len(rs) == 3 + assert isinstance(rs[0], DeleteMarker) + + # Now delete v1 of the key + bucket.delete_key('foobar', version_id=v1) + time.sleep(5) + + # Now list versions again and make sure v1 is not there + rs = bucket.get_all_versions() + versions = [k.version_id for k in rs] + assert v1 not in versions + assert v2 in versions + + # Now try to enable MfaDelete + mfa_sn = raw_input('MFA S/N: ') + mfa_code = raw_input('MFA Code: ') + bucket.configure_versioning(True, mfa_delete=True, mfa_token=(mfa_sn, mfa_code)) + time.sleep(5) + d = bucket.get_versioning_status() + assert d['Versioning'] == 'Enabled' + assert d['MfaDelete'] == 'Enabled' + + # Now try to delete v2 without the MFA token + try: + bucket.delete_key('foobar', version_id=v2) + except S3ResponseError: + pass + + # Now try to delete v2 with the MFA token + mfa_code = raw_input('MFA Code: ') + bucket.delete_key('foobar', version_id=v2, mfa_token=(mfa_sn, mfa_code)) + + # Now disable MfaDelete on the bucket + mfa_code = raw_input('MFA Code: ') + bucket.configure_versioning(True, mfa_delete=False, mfa_token=(mfa_sn, mfa_code)) + + # Now suspend Versioning on the bucket + bucket.configure_versioning(False) + + # now delete all keys and deletemarkers in bucket + for k in bucket.list_versions(): + bucket.delete_key(k.name, version_id=k.version_id) + + # now delete bucket + c.delete_bucket(bucket) + print '--- tests completed ---' diff --git a/vendor/boto/boto/tests/test_sdbconnection.py b/vendor/boto/boto/tests/test_sdbconnection.py new file mode 100644 index 000000000000..c2bb74e39808 --- /dev/null +++ b/vendor/boto/boto/tests/test_sdbconnection.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SDBConnection +""" + +import unittest +import time +from boto.sdb.connection import SDBConnection +from boto.exception import SDBResponseError + +class SDBConnectionTest (unittest.TestCase): + + def test_1_basic(self): + print '--- running SDBConnection tests ---' + c = SDBConnection() + rs = c.get_all_domains() + num_domains = len(rs) + + # try illegal name + try: + domain = c.create_domain('bad:domain:name') + except SDBResponseError: + pass + + # now create one that should work and should be unique (i.e. a new one) + domain_name = 'test%d' % int(time.time()) + domain = c.create_domain(domain_name) + rs = c.get_all_domains() + assert len(rs) == num_domains+1 + + # now let's a couple of items and attributes + item_1 = 'item1' + same_value = 'same_value' + attrs_1 = {'name1' : same_value, 'name2' : 'diff_value_1'} + domain.put_attributes(item_1, attrs_1) + item_2 = 'item2' + attrs_2 = {'name1' : same_value, 'name2' : 'diff_value_2'} + domain.put_attributes(item_2, attrs_2) + time.sleep(10) + + # try to get the attributes and see if they match + item = domain.get_attributes(item_1) + assert len(item.keys()) == len(attrs_1.keys()) + assert item['name1'] == attrs_1['name1'] + assert item['name2'] == attrs_1['name2'] + + # try a search or two + rs = domain.query("['name1'='%s']" % same_value) + n = 0 + for item in rs: + n += 1 + assert n == 2 + rs = domain.query("['name2'='diff_value_2']") + n = 0 + for item in rs: + n += 1 + assert n == 1 + + # delete all attributes associated with item_1 + stat = domain.delete_attributes(item_1) + assert stat + + # now try a batch put operation on the domain + item3 = {'name3_1' : 'value3_1', + 'name3_2' : 'value3_2', + 'name3_3' : ['value3_3_1', 'value3_3_2']} + + item4 = {'name4_1' : 'value4_1', + 'name4_2' : ['value4_2_1', 'value4_2_2'], + 'name4_3' : 'value4_3'} + items = {'item3' : item3, 'item4' : item4} + domain.batch_put_attributes(items) + time.sleep(10) + item = domain.get_attributes('item3') + assert item['name3_2'] == 'value3_2' + + # now delete the domain + stat = c.delete_domain(domain) + assert stat + + print '--- tests completed ---' + diff --git a/vendor/boto/boto/tests/test_sqsconnection.py b/vendor/boto/boto/tests/test_sqsconnection.py new file mode 100644 index 000000000000..0fbd1f1afaa4 --- /dev/null +++ b/vendor/boto/boto/tests/test_sqsconnection.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Some unit tests for the SQSConnection +""" + +import unittest +import time +from boto.sqs.connection import SQSConnection +from boto.sqs.message import MHMessage +from boto.exception import SQSError + +class SQSConnectionTest (unittest.TestCase): + + def test_1_basic(self): + print '--- running SQSConnection tests ---' + c = SQSConnection() + rs = c.get_all_queues() + num_queues = 0 + for q in rs: + num_queues += 1 + + # try illegal name + try: + queue = c.create_queue('bad_queue_name') + except SQSError: + pass + + # now create one that should work and should be unique (i.e. a new one) + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue = c.create_queue(queue_name, timeout) + time.sleep(60) + rs = c.get_all_queues() + i = 0 + for q in rs: + i += 1 + assert i == num_queues+1 + assert queue.count_slow() == 0 + + # check the visibility timeout + t = queue.get_timeout() + assert t == timeout, '%d != %d' % (t, timeout) + + # now try to get queue attributes + a = q.get_attributes() + assert a.has_key('ApproximateNumberOfMessages') + assert a.has_key('VisibilityTimeout') + a = q.get_attributes('ApproximateNumberOfMessages') + assert a.has_key('ApproximateNumberOfMessages') + assert not a.has_key('VisibilityTimeout') + a = q.get_attributes('VisibilityTimeout') + assert not a.has_key('ApproximateNumberOfMessages') + assert a.has_key('VisibilityTimeout') + + # now change the visibility timeout + timeout = 45 + queue.set_timeout(timeout) + time.sleep(60) + t = queue.get_timeout() + assert t == timeout, '%d != %d' % (t, timeout) + + # now add a message + message_body = 'This is a test\n' + message = queue.new_message(message_body) + queue.write(message) + time.sleep(30) + assert queue.count_slow() == 1 + time.sleep(30) + + # now read the message from the queue with a 10 second timeout + message = queue.read(visibility_timeout=10) + assert message + assert message.get_body() == message_body + + # now immediately try another read, shouldn't find anything + message = queue.read() + assert message == None + + # now wait 30 seconds and try again + time.sleep(30) + message = queue.read() + assert message + + if c.APIVersion == '2007-05-01': + # now terminate the visibility timeout for this message + message.change_visibility(0) + # now see if we can read it in the queue + message = queue.read() + assert message + + # now delete the message + queue.delete_message(message) + time.sleep(30) + assert queue.count_slow() == 0 + + # create another queue so we can test force deletion + # we will also test MHMessage with this queue + queue_name = 'test%d' % int(time.time()) + timeout = 60 + queue = c.create_queue(queue_name, timeout) + queue.set_message_class(MHMessage) + time.sleep(30) + + # now add a couple of messages + message = queue.new_message() + message['foo'] = 'bar' + queue.write(message) + message_body = {'fie' : 'baz', 'foo' : 'bar'} + message = queue.new_message(body=message_body) + queue.write(message) + time.sleep(30) + + m = queue.read() + assert m['foo'] == 'bar' + + # now delete that queue and messages + c.delete_queue(queue, True) + + print '--- tests completed ---' + diff --git a/vendor/boto/boto/utils.py b/vendor/boto/boto/utils.py new file mode 100644 index 000000000000..255d42f6419b --- /dev/null +++ b/vendor/boto/boto/utils.py @@ -0,0 +1,561 @@ +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +# +# Parts of this code were copied or derived from sample code supplied by AWS. +# The following notice applies to that code. +# +# This software code is made available "AS IS" without warranties of any +# kind. You may copy, display, modify and redistribute the software +# code either by itself or as incorporated into your code; provided that +# you do not remove any proprietary notices. Your use of this software +# code is at your own risk and you waive any claim against Amazon +# Digital Services, Inc. or its affiliates with respect to your use of +# this software code. (c) 2006 Amazon Digital Services, Inc. or its +# affiliates. + +""" +Some handy utility functions used by several classes. +""" + +import re +import urllib +import urllib2 +import subprocess +import StringIO +import time +import logging.handlers +import boto +import tempfile +import smtplib +import datetime +from email.MIMEMultipart import MIMEMultipart +from email.MIMEBase import MIMEBase +from email.MIMEText import MIMEText +from email.Utils import formatdate +from email import Encoders + +try: + import hashlib + _hashfn = hashlib.sha512 +except ImportError: + import md5 + _hashfn = md5.md5 + +METADATA_PREFIX = 'x-amz-meta-' +AMAZON_HEADER_PREFIX = 'x-amz-' + +# generates the aws canonical string for the given parameters +def canonical_string(method, path, headers, expires=None): + interesting_headers = {} + for key in headers: + lk = key.lower() + if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX): + interesting_headers[lk] = headers[key].strip() + + # these keys get empty strings if they don't exist + if not interesting_headers.has_key('content-type'): + interesting_headers['content-type'] = '' + if not interesting_headers.has_key('content-md5'): + interesting_headers['content-md5'] = '' + + # just in case someone used this. it's not necessary in this lib. + if interesting_headers.has_key('x-amz-date'): + interesting_headers['date'] = '' + + # if you're using expires for query string auth, then it trumps date + # (and x-amz-date) + if expires: + interesting_headers['date'] = str(expires) + + sorted_header_keys = interesting_headers.keys() + sorted_header_keys.sort() + + buf = "%s\n" % method + for key in sorted_header_keys: + val = interesting_headers[key] + if key.startswith(AMAZON_HEADER_PREFIX): + buf += "%s:%s\n" % (key, val) + else: + buf += "%s\n" % val + + # don't include anything after the first ? in the resource... + buf += "%s" % path.split('?')[0] + + # ...unless there is an acl or torrent parameter + if re.search("[&?]acl($|=|&)", path): + buf += "?acl" + elif re.search("[&?]logging($|=|&)", path): + buf += "?logging" + elif re.search("[&?]torrent($|=|&)", path): + buf += "?torrent" + elif re.search("[&?]location($|=|&)", path): + buf += "?location" + elif re.search("[&?]requestPayment($|=|&)", path): + buf += "?requestPayment" + elif re.search("[&?]versions($|=|&)", path): + buf += "?versions" + elif re.search("[&?]versioning($|=|&)", path): + buf += "?versioning" + else: + m = re.search("[&?]versionId=([^&]+)($|=|&)", path) + if m: + buf += '?versionId=' + m.group(1) + + return buf + +def merge_meta(headers, metadata): + final_headers = headers.copy() + for k in metadata.keys(): + if k.lower() in ['cache-control', 'content-md5', 'content-type', + 'content-encoding', 'content-disposition', + 'date', 'expires']: + final_headers[k] = metadata[k] + else: + final_headers[METADATA_PREFIX + k] = metadata[k] + + return final_headers + +def get_aws_metadata(headers): + metadata = {} + for hkey in headers.keys(): + if hkey.lower().startswith(METADATA_PREFIX): + val = urllib.unquote_plus(headers[hkey]) + metadata[hkey[len(METADATA_PREFIX):]] = unicode(val, 'utf-8') + del headers[hkey] + return metadata + +def retry_url(url, retry_on_404=True): + for i in range(0, 10): + try: + req = urllib2.Request(url) + resp = urllib2.urlopen(req) + return resp.read() + except urllib2.HTTPError, e: + # in 2.6 you use getcode(), in 2.5 and earlier you use code + if hasattr(e, 'getcode'): + code = e.getcode() + else: + code = e.code + if code == 404 and not retry_on_404: + return '' + except: + pass + boto.log.exception('Caught exception reading instance data') + time.sleep(2**i) + boto.log.error('Unable to read instance data, giving up') + return '' + +def _get_instance_metadata(url): + d = {} + data = retry_url(url) + if data: + fields = data.split('\n') + for field in fields: + if field.endswith('/'): + d[field[0:-1]] = _get_instance_metadata(url + field) + else: + p = field.find('=') + if p > 0: + key = field[p+1:] + resource = field[0:p] + '/openssh-key' + else: + key = resource = field + val = retry_url(url + resource) + p = val.find('\n') + if p > 0: + val = val.split('\n') + d[key] = val + return d + +def get_instance_metadata(version='latest'): + """ + Returns the instance metadata as a nested Python dictionary. + Simple values (e.g. local_hostname, hostname, etc.) will be + stored as string values. Values such as ancestor-ami-ids will + be stored in the dict as a list of string values. More complex + fields such as public-keys and will be stored as nested dicts. + """ + url = 'http://169.254.169.254/%s/meta-data/' % version + return _get_instance_metadata(url) + +def get_instance_userdata(version='latest', sep=None): + url = 'http://169.254.169.254/%s/user-data' % version + user_data = retry_url(url, retry_on_404=False) + if user_data: + if sep: + l = user_data.split(sep) + user_data = {} + for nvpair in l: + t = nvpair.split('=') + user_data[t[0].strip()] = t[1].strip() + return user_data + +ISO8601 = '%Y-%m-%dT%H:%M:%SZ' + +def get_ts(ts=None): + if not ts: + ts = time.gmtime() + return time.strftime(ISO8601, ts) + +def parse_ts(ts): + return datetime.datetime.strptime(ts, ISO8601) + +def find_class(module_name, class_name=None): + if class_name: + module_name = "%s.%s" % (module_name, class_name) + modules = module_name.split('.') + c = None + + try: + for m in modules[1:]: + if c: + c = getattr(c, m) + else: + c = getattr(__import__(".".join(modules[0:-1])), m) + return c + except: + return None + +def update_dme(username, password, dme_id, ip_address): + """ + Update your Dynamic DNS record with DNSMadeEasy.com + """ + dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip' + dme_url += '?username=%s&password=%s&id=%s&ip=%s' + s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address)) + return s.read() + +def fetch_file(uri, file=None, username=None, password=None): + """ + Fetch a file based on the URI provided. If you do not pass in a file pointer + a tempfile.NamedTemporaryFile, or None if the file could not be + retrieved is returned. + The URI can be either an HTTP url, or "s3://bucket_name/key_name" + """ + boto.log.info('Fetching %s' % uri) + if file == None: + file = tempfile.NamedTemporaryFile() + try: + if uri.startswith('s3://'): + bucket_name, key_name = uri[len('s3://'):].split('/', 1) + c = boto.connect_s3() + bucket = c.get_bucket(bucket_name) + key = bucket.get_key(key_name) + key.get_contents_to_file(file) + else: + if username and password: + passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + passman.add_password(None, uri, username, password) + authhandler = urllib2.HTTPBasicAuthHandler(passman) + opener = urllib2.build_opener(authhandler) + urllib2.install_opener(opener) + s = urllib2.urlopen(uri) + file.write(s.read()) + file.seek(0) + except: + raise + boto.log.exception('Problem Retrieving file: %s' % uri) + file = None + return file + +class ShellCommand(object): + + def __init__(self, command, wait=True): + self.exit_code = 0 + self.command = command + self.log_fp = StringIO.StringIO() + self.wait = wait + self.run() + + def run(self): + boto.log.info('running:%s' % self.command) + self.process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + if(self.wait): + while self.process.poll() == None: + time.sleep(1) + t = self.process.communicate() + self.log_fp.write(t[0]) + self.log_fp.write(t[1]) + boto.log.info(self.log_fp.getvalue()) + self.exit_code = self.process.returncode + return self.exit_code + + def setReadOnly(self, value): + raise AttributeError + + def getStatus(self): + return self.exit_code + + status = property(getStatus, setReadOnly, None, 'The exit code for the command') + + def getOutput(self): + return self.log_fp.getvalue() + + output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command') + +class AuthSMTPHandler(logging.handlers.SMTPHandler): + """ + This class extends the SMTPHandler in the standard Python logging module + to accept a username and password on the constructor and to then use those + credentials to authenticate with the SMTP server. To use this, you could + add something like this in your boto config file: + + [handler_hand07] + class=boto.utils.AuthSMTPHandler + level=WARN + formatter=form07 + args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject') + """ + + def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject): + """ + Initialize the handler. + + We have extended the constructor to accept a username/password + for SMTP authentication. + """ + logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject) + self.username = username + self.password = password + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + It would be really nice if I could add authorization to this class + without having to resort to cut and paste inheritance but, no. + """ + try: + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port) + smtp.login(self.username, self.password) + msg = self.format(record) + msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( + self.fromaddr, + ','.join(self.toaddrs), + self.getSubject(record), + formatdate(), msg) + smtp.sendmail(self.fromaddr, self.toaddrs, msg) + smtp.quit() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + +class LRUCache(dict): + """A dictionary-like object that stores only a certain number of items, and + discards its least recently used item when full. + + >>> cache = LRUCache(3) + >>> cache['A'] = 0 + >>> cache['B'] = 1 + >>> cache['C'] = 2 + >>> len(cache) + 3 + + >>> cache['A'] + 0 + + Adding new items to the cache does not increase its size. Instead, the least + recently used item is dropped: + + >>> cache['D'] = 3 + >>> len(cache) + 3 + >>> 'B' in cache + False + + Iterating over the cache returns the keys, starting with the most recently + used: + + >>> for key in cache: + ... print key + D + A + C + + This code is based on the LRUCache class from Genshi which is based on + Mighty's LRUCache from ``myghtyutils.util``, written + by Mike Bayer and released under the MIT license (Genshi uses the + BSD License). See: + + http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py + """ + + class _Item(object): + def __init__(self, key, value): + self.previous = self.next = None + self.key = key + self.value = value + def __repr__(self): + return repr(self.value) + + def __init__(self, capacity): + self._dict = dict() + self.capacity = capacity + self.head = None + self.tail = None + + def __contains__(self, key): + return key in self._dict + + def __iter__(self): + cur = self.head + while cur: + yield cur.key + cur = cur.next + + def __len__(self): + return len(self._dict) + + def __getitem__(self, key): + item = self._dict[key] + self._update_item(item) + return item.value + + def __setitem__(self, key, value): + item = self._dict.get(key) + if item is None: + item = self._Item(key, value) + self._dict[key] = item + self._insert_item(item) + else: + item.value = value + self._update_item(item) + self._manage_size() + + def __repr__(self): + return repr(self._dict) + + def _insert_item(self, item): + item.previous = None + item.next = self.head + if self.head is not None: + self.head.previous = item + else: + self.tail = item + self.head = item + self._manage_size() + + def _manage_size(self): + while len(self._dict) > self.capacity: + del self._dict[self.tail.key] + if self.tail != self.head: + self.tail = self.tail.previous + self.tail.next = None + else: + self.head = self.tail = None + + def _update_item(self, item): + if self.head == item: + return + + previous = item.previous + previous.next = item.next + if item.next is not None: + item.next.previous = previous + else: + self.tail = previous + + item.previous = None + item.next = self.head + self.head.previous = self.head = item + +class Password(object): + """ + Password object that stores itself as SHA512 hashed. + """ + def __init__(self, str=None): + """ + Load the string from an initial value, this should be the raw SHA512 hashed password + """ + self.str = str + + def set(self, value): + self.str = _hashfn(value).hexdigest() + + def __str__(self): + return str(self.str) + + def __eq__(self, other): + if other == None: + return False + return str(_hashfn(other).hexdigest()) == str(self.str) + + def __len__(self): + if self.str: + return len(self.str) + else: + return 0 + +def notify(subject, body=None, html_body=None, to_string=None, attachments=[], append_instance_id=True): + if append_instance_id: + subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject) + if not to_string: + to_string = boto.config.get_value('Notification', 'smtp_to', None) + if to_string: + try: + from_string = boto.config.get_value('Notification', 'smtp_from', 'boto') + msg = MIMEMultipart() + msg['From'] = from_string + msg['To'] = to_string + msg['Date'] = formatdate(localtime=True) + msg['Subject'] = subject + + if body: + msg.attach(MIMEText(body)) + + if html_body: + part = MIMEBase('text', 'html') + part.set_payload(html_body) + Encoders.encode_base64(part) + msg.attach(part) + + for part in attachments: + msg.attach(part) + + smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost') + + # Alternate port support + if boto.config.get_value("Notification", "smtp_port"): + server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port"))) + else: + server = smtplib.SMTP(smtp_host) + + # TLS support + if boto.config.getbool("Notification", "smtp_tls"): + server.ehlo() + server.starttls() + server.ehlo() + smtp_user = boto.config.get_value('Notification', 'smtp_user', '') + smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '') + if smtp_user: + server.login(smtp_user, smtp_pass) + server.sendmail(from_string, to_string, msg.as_string()) + server.quit() + except: + boto.log.exception('notify failed') + diff --git a/vendor/boto/boto/vpc/__init__.py b/vendor/boto/boto/vpc/__init__.py new file mode 100644 index 000000000000..16c420d5304a --- /dev/null +++ b/vendor/boto/boto/vpc/__init__.py @@ -0,0 +1,473 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a connection to the EC2 service. +""" + +from boto.ec2.connection import EC2Connection +from boto.vpc.vpc import VPC +from boto.vpc.customergateway import CustomerGateway +from boto.vpc.vpngateway import VpnGateway, Attachment +from boto.vpc.dhcpoptions import DhcpOptions +from boto.vpc.subnet import Subnet +from boto.vpc.vpnconnection import VpnConnection + +class VPCConnection(EC2Connection): + + # VPC methods + + def get_all_vpcs(self, vpc_ids=None, filters=None): + """ + Retrieve information about your VPCs. You can filter results to + return information only about those VPCs that match your search + parameters. Otherwise, all VPCs associated with your account + are returned. + + :type vpc_ids: list + :param vpc_ids: A list of strings with the desired VPC ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the VPC (pending or available) + - *cidrBlock*, CIDR block of the VPC + - *dhcpOptionsId*, the ID of a set of DHCP options + + :rtype: list + :return: A list of :class:`boto.vpc.vpc.VPC` + """ + params = {} + if vpc_ids: + self.build_list_params(params, vpc_ids, 'VpcId') + if filters: + i = 1 + for filter in filters: + params[('Filter.%d.Key' % i)] = filter[0] + params[('Filter.%d.Value.1')] = filter[1] + i += 1 + return self.get_list('DescribeVpcs', params, [('item', VPC)]) + + def create_vpc(self, cidr_block): + """ + Create a new Virtual Private Cloud. + + :type cidr_block: str + :param cidr_block: A valid CIDR block + + :rtype: The newly created VPC + :return: A :class:`boto.vpc.vpc.VPC` object + """ + params = {'CidrBlock' : cidr_block} + return self.get_object('CreateVpc', params, VPC) + + def delete_vpc(self, vpc_id): + """ + Delete a Virtual Private Cloud. + + :type vpc_id: str + :param vpc_id: The ID of the vpc to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'VpcId': vpc_id} + return self.get_status('DeleteVpc', params) + + # Customer Gateways + + def get_all_customer_gateways(self, customer_gateway_ids=None, filters=None): + """ + Retrieve information about your CustomerGateways. You can filter results to + return information only about those CustomerGateways that match your search + parameters. Otherwise, all CustomerGateways associated with your account + are returned. + + :type customer_gateway_ids: list + :param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the CustomerGateway + (pending,available,deleting,deleted) + - *type*, the type of customer gateway (ipsec.1) + - *ipAddress* the IP address of customer gateway's + internet-routable external inteface + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.CustomerGateway` + """ + params = {} + if customer_gateway_ids: + self.build_list_params(params, customer_gateway_ids, 'CustomerGatewayId') + if filters: + i = 1 + for filter in filters: + params[('Filter.%d.Key' % i)] = filter[0] + params[('Filter.%d.Value.1')] = filter[1] + i += 1 + return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)]) + + def create_customer_gateway(self, type, ip_address, bgp_asn): + """ + Create a new Customer Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1' + + :type ip_address: str + :param ip_address: Internet-routable IP address for customer's gateway. + Must be a static address. + + :type bgp_asn: str + :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP) + Autonomous System Number (ASN) + + :rtype: The newly created CustomerGateway + :return: A :class:`boto.vpc.customergateway.CustomerGateway` object + """ + params = {'Type' : type, + 'IpAddress' : ip_address, + 'BgpAsn' : bgp_asn} + return self.get_object('CreateCustomerGateway', params, CustomerGateway) + + def delete_customer_gateway(self, customer_gateway_id): + """ + Delete a Customer Gateway. + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer_gateway to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'CustomerGatewayId': customer_gateway_id} + return self.get_status('DeleteCustomerGateway', params) + + # VPN Gateways + + def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None): + """ + Retrieve information about your VpnGateways. You can filter results to + return information only about those VpnGateways that match your search + parameters. Otherwise, all VpnGateways associated with your account + are returned. + + :type vpn_gateway_ids: list + :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the VpnGateway + (pending,available,deleting,deleted) + - *type*, the type of customer gateway (ipsec.1) + - *availabilityZone*, the Availability zone the + VPN gateway is in. + + :rtype: list + :return: A list of :class:`boto.vpc.customergateway.VpnGateway` + """ + params = {} + if vpn_gateway_ids: + self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId') + if filters: + i = 1 + for filter in filters: + params[('Filter.%d.Key' % i)] = filter[0] + params[('Filter.%d.Value.1')] = filter[1] + i += 1 + return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)]) + + def create_vpn_gateway(self, type, availability_zone=None): + """ + Create a new Vpn Gateway + + :type type: str + :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1' + + :type availability_zone: str + :param availability_zone: The Availability Zone where you want the VPN gateway. + + :rtype: The newly created VpnGateway + :return: A :class:`boto.vpc.vpngateway.VpnGateway` object + """ + params = {'Type' : type} + if availability_zone: + params['AvailabilityZone'] = availability_zone + return self.get_object('CreateVpnGateway', params, VpnGateway) + + def delete_vpn_gateway(self, vpn_gateway_id): + """ + Delete a Vpn Gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id} + return self.get_status('DeleteVpnGateway', params) + + def attach_vpn_gateway(self, vpn_gateway_id, vpc_id): + """ + Attaches a VPN gateway to a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to attach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to attach the gateway to. + + :rtype: An attachment + :return: a :class:`boto.vpc.vpngateway.Attachment` + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId' : vpc_id} + return self.get_object('AttachVpnGateway', params, Attachment) + + # Subnets + + def get_all_subnets(self, subnet_ids=None, filters=None): + """ + Retrieve information about your Subnets. You can filter results to + return information only about those Subnets that match your search + parameters. Otherwise, all Subnets associated with your account + are returned. + + :type subnet_ids: list + :param subnet_ids: A list of strings with the desired Subnet ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the Subnet + (pending,available) + - *vpdId*, the ID of teh VPC the subnet is in. + - *cidrBlock*, CIDR block of the subnet + - *availabilityZone*, the Availability Zone + the subnet is in. + + + :rtype: list + :return: A list of :class:`boto.vpc.subnet.Subnet` + """ + params = {} + if subnet_ids: + self.build_list_params(params, subnet_ids, 'SubnetId') + if filters: + i = 1 + for filter in filters: + params[('Filter.%d.Key' % i)] = filter[0] + params[('Filter.%d.Value.1')] = filter[1] + i += 1 + return self.get_list('DescribeSubnets', params, [('item', Subnet)]) + + def create_subnet(self, vpc_id, cidr_block, availability_zone=None): + """ + Create a new Subnet + + :type vpc_id: str + :param vpc_id: The ID of the VPC where you want to create the subnet. + + :type cidr_block: str + :param cidr_block: The CIDR block you want the subnet to cover. + + :type availability_zone: str + :param availability_zone: The AZ you want the subnet in + + :rtype: The newly created Subnet + :return: A :class:`boto.vpc.customergateway.Subnet` object + """ + params = {'VpcId' : vpc_id, + 'CidrBlock' : cidr_block} + if availability_zone: + params['AvailabilityZone'] = availability_zone + return self.get_object('CreateSubnet', params, Subnet) + + def delete_subnet(self, subnet_id): + """ + Delete a subnet. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'SubnetId': subnet_id} + return self.get_status('DeleteSubnet', params) + + + # DHCP Options + + def get_all_dhcp_options(self, dhcp_options_ids=None): + """ + Retrieve information about your DhcpOptions. + + :type dhcp_options_ids: list + :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's + + :rtype: list + :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions` + """ + params = {} + if dhcp_options_ids: + self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') + return self.get_list('DescribeDhcpOptions', params, [('item', DhcpOptions)]) + + def create_dhcp_options(self, vpc_id, cidr_block, availability_zone=None): + """ + Create a new DhcpOption + + :type vpc_id: str + :param vpc_id: The ID of the VPC where you want to create the subnet. + + :type cidr_block: str + :param cidr_block: The CIDR block you want the subnet to cover. + + :type availability_zone: str + :param availability_zone: The AZ you want the subnet in + + :rtype: The newly created DhcpOption + :return: A :class:`boto.vpc.customergateway.DhcpOption` object + """ + params = {'VpcId' : vpc_id, + 'CidrBlock' : cidr_block} + if availability_zone: + params['AvailabilityZone'] = availability_zone + return self.get_object('CreateDhcpOption', params, DhcpOptions) + + def delete_dhcp_options(self, dhcp_options_id): + """ + Delete a DHCP Options + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the DHCP Options to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id} + return self.get_status('DeleteDhcpOptions', params) + + def associate_dhcp_options(self, dhcp_options_id, vpc_id): + """ + Associate a set of Dhcp Options with a VPC. + + :type dhcp_options_id: str + :param dhcp_options_id: The ID of the Dhcp Options + + :type vpc_id: str + :param vpc_id: The ID of the VPC. + + :rtype: bool + :return: True if successful + """ + params = {'DhcpOptionsId': dhcp_options_id, + 'VpcId' : vpc_id} + return self.get_status('AssociateDhcpOptions', params) + + # VPN Connection + + def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None): + """ + Retrieve information about your VPN_CONNECTIONs. You can filter results to + return information only about those VPN_CONNECTIONs that match your search + parameters. Otherwise, all VPN_CONNECTIONs associated with your account + are returned. + + :type vpn_connection_ids: list + :param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + Possible filter keys are: + + - *state*, the state of the VPN_CONNECTION + pending,available,deleting,deleted + - *type*, the type of connection, currently 'ipsec.1' + - *customerGatewayId*, the ID of the customer gateway + associated with the VPN + - *vpnGatewayId*, the ID of the VPN gateway associated + with the VPN connection + + :rtype: list + :return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection` + """ + params = {} + if vpn_connection_ids: + self.build_list_params(params, vpn_connection_ids, 'Vpn_ConnectionId') + if filters: + i = 1 + for filter in filters: + params[('Filter.%d.Key' % i)] = filter[0] + params[('Filter.%d.Value.1')] = filter[1] + i += 1 + return self.get_list('DescribeVpnConnections', params, [('item', VpnConnection)]) + + def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id): + """ + Create a new VPN Connection. + + :type type: str + :param type: The type of VPN Connection. Currently only 'ipsec.1' + is supported + + :type customer_gateway_id: str + :param customer_gateway_id: The ID of the customer gateway. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the VPN gateway. + + :rtype: The newly created VpnConnection + :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object + """ + params = {'Type' : type, + 'CustomerGatewayId' : customer_gateway_id, + 'VpnGatewayId' : vpn_gateway_id} + return self.get_object('CreateVpnConnection', params, VpnConnection) + + def delete_vpn_connection(self, vpn_connection_id): + """ + Delete a VPN Connection. + + :type vpn_connection_id: str + :param vpn_connection_id: The ID of the vpn_connection to be deleted. + + :rtype: bool + :return: True if successful + """ + params = {'VpnConnectionId': vpn_connection_id} + return self.get_status('DeleteVpnConnection', params) + + diff --git a/vendor/boto/boto/vpc/customergateway.py b/vendor/boto/boto/vpc/customergateway.py new file mode 100644 index 000000000000..c50a616d5e95 --- /dev/null +++ b/vendor/boto/boto/vpc/customergateway.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Customer Gateway +""" + +from boto.ec2.ec2object import EC2Object + +class CustomerGateway(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.type = None + self.state = None + self.ip_address = None + self.bgp_asn = None + + def __repr__(self): + return 'CustomerGateway:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'customerGatewayId': + self.id = value + elif name == 'ipAddress': + self.ip_address = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'bgpAsn': + self.bgp_asn = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/vpc/dhcpoptions.py b/vendor/boto/boto/vpc/dhcpoptions.py new file mode 100644 index 000000000000..4fce7dc90fa3 --- /dev/null +++ b/vendor/boto/boto/vpc/dhcpoptions.py @@ -0,0 +1,69 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a DHCP Options set +""" + +from boto.ec2.ec2object import EC2Object + +class DhcpValueSet(list): + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'value': + self.append(value) + +class DhcpConfigSet(dict): + + def startElement(self, name, attrs, connection): + if name == 'valueSet': + if not self.has_key(self._name): + self[self._name] = DhcpValueSet() + return self[self._name] + + def endElement(self, name, value, connection): + if name == 'key': + self._name = value + +class DhcpOptions(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.options = None + + def __repr__(self): + return 'DhcpOptions:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'dhcpConfigurationSet': + self.options = DhcpConfigSet() + return self.options + + def endElement(self, name, value, connection): + if name == 'dhcpOptionsId': + self.id = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/vpc/subnet.py b/vendor/boto/boto/vpc/subnet.py new file mode 100644 index 000000000000..de8a959e074e --- /dev/null +++ b/vendor/boto/boto/vpc/subnet.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Subnet +""" + +from boto.ec2.ec2object import EC2Object + +class Subnet(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.state = None + self.cidr_block = None + self.available_ip_address_count = 0 + self.availability_zone = None + + def __repr__(self): + return 'Subnet:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'subnetId': + self.id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + elif name == 'availableIpAddressCount': + self.available_ip_address_count = int(value) + elif name == 'availabilityZone': + self.availability_zone = value + else: + setattr(self, name, value) + diff --git a/vendor/boto/boto/vpc/vpc.py b/vendor/boto/boto/vpc/vpc.py new file mode 100644 index 000000000000..152cff3ecec1 --- /dev/null +++ b/vendor/boto/boto/vpc/vpc.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Virtual Private Cloud. +""" + +from boto.ec2.ec2object import EC2Object + +class VPC(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.dhcp_options_id = None + self.state = None + self.cidr_block = None + + def __repr__(self): + return 'VPC:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.id = value + elif name == 'dhcpOptionsId': + self.dhcp_options_id = value + elif name == 'state': + self.state = value + elif name == 'cidrBlock': + self.cidr_block = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpc(self.id) + diff --git a/vendor/boto/boto/vpc/vpnconnection.py b/vendor/boto/boto/vpc/vpnconnection.py new file mode 100644 index 000000000000..c02789b533b8 --- /dev/null +++ b/vendor/boto/boto/vpc/vpnconnection.py @@ -0,0 +1,60 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a VPN Connectionn +""" + +from boto.ec2.ec2object import EC2Object + +class VpnConnection(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.state = None + self.customer_gateway_configuration = None + self.type = None + self.customer_gateway_id = None + self.vpn_gateway_id = None + + def __repr__(self): + return 'VpnConnection:%s' % self.id + + def endElement(self, name, value, connection): + if name == 'vpnConnectionId': + self.id = value + elif name == 'state': + self.state = value + elif name == 'CustomerGatewayConfiguration': + self.customer_gateway_configuration = value + elif name == 'type': + self.type = value + elif name == 'customerGatewayId': + self.customer_gateway_id = value + elif name == 'vpnGatewayId': + self.vpn_gateway_id = value + else: + setattr(self, name, value) + + def delete(self): + return self.connection.delete_vpn_connection(self.id) + diff --git a/vendor/boto/boto/vpc/vpngateway.py b/vendor/boto/boto/vpc/vpngateway.py new file mode 100644 index 000000000000..0fa0a9efd675 --- /dev/null +++ b/vendor/boto/boto/vpc/vpngateway.py @@ -0,0 +1,80 @@ +# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Vpn Gateway +""" + +from boto.ec2.ec2object import EC2Object + +class Attachment(object): + + def __init__(self, connection=None): + self.vpc_id = None + self.state = None + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + if name == 'vpcId': + self.vpc_id = value + elif name == 'state': + self.state = value + else: + setattr(self, name, value) + +class VpnGateway(EC2Object): + + def __init__(self, connection=None): + EC2Object.__init__(self, connection) + self.id = None + self.type = None + self.state = None + self.availability_zone = None + self.attachments = [] + + def __repr__(self): + return 'VpnGateway:%s' % self.id + + def startElement(self, name, attrs, connection): + if name == 'item': + att = Attachment() + self.attachments.append(att) + return att + + def endElement(self, name, value, connection): + if name == 'vpnGatewayId': + self.id = value + elif name == 'type': + self.type = value + elif name == 'state': + self.state = value + elif name == 'availabilityZone': + self.availability_zone = value + elif name == 'attachments': + pass + else: + setattr(self, name, value) + + def attach(self, vpc_id): + return self.connection.attach_vpn_gateway(self.id, vpc_id) + diff --git a/vendor/boto/cq.py b/vendor/boto/cq.py new file mode 100755 index 000000000000..241515697ec1 --- /dev/null +++ b/vendor/boto/cq.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +import getopt, sys +from boto.sqs.connection import SQSConnection +from boto.exception import SQSError + +def usage(): + print 'cq.py [-c] [-q queue_name] [-o output_file] [-t timeout]' + +def main(): + try: + opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:', + ['help', 'clear', 'queue', + 'output', 'timeout']) + except: + usage() + sys.exit(2) + queue_name = '' + output_file = '' + timeout = 30 + clear = False + for o, a in opts: + if o in ('-h', '--help'): + usage() + sys.exit() + if o in ('-q', '--queue'): + queue_name = a + if o in ('-o', '--output'): + output_file = a + if o in ('-c', '--clear'): + clear = True + if o in ('-t', '--timeout'): + timeout = int(a) + c = SQSConnection() + if queue_name: + try: + rs = [c.create_queue(queue_name)] + except SQSError, e: + print 'An Error Occurred:' + print '%s: %s' % (e.status, e.reason) + print e.body + sys.exit() + else: + try: + rs = c.get_all_queues() + except SQSError, e: + print 'An Error Occurred:' + print '%s: %s' % (e.status, e.reason) + print e.body + sys.exit() + for q in rs: + if clear: + n = q.clear() + print 'clearing %d messages from %s' % (n, q.id) + elif output_file: + q.dump(output_file) + else: + print q.id, q.count(vtimeout=timeout) + +if __name__ == "__main__": + main() + diff --git a/vendor/boto/docs/Makefile b/vendor/boto/docs/Makefile new file mode 100644 index 000000000000..5fd1f9202836 --- /dev/null +++ b/vendor/boto/docs/Makefile @@ -0,0 +1,89 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf $(BUILDDIR)/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/boto.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/boto.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." diff --git a/vendor/boto/docs/make.bat b/vendor/boto/docs/make.bat new file mode 100644 index 000000000000..d6b0b7b6a5a3 --- /dev/null +++ b/vendor/boto/docs/make.bat @@ -0,0 +1,113 @@ +@ECHO OFF + +REM Command file for Sphinx documentation + +set SPHINXBUILD=sphinx-build +set BUILDDIR=build +set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source +if NOT "%PAPER%" == "" ( + set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% +) + +if "%1" == "" goto help + +if "%1" == "help" ( + :help + echo.Please use `make ^` where ^ is one of + echo. html to make standalone HTML files + echo. dirhtml to make HTML files named index.html in directories + echo. pickle to make pickle files + echo. json to make JSON files + echo. htmlhelp to make HTML files and a HTML help project + echo. qthelp to make HTML files and a qthelp project + echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter + echo. changes to make an overview over all changed/added/deprecated items + echo. linkcheck to check all external links for integrity + echo. doctest to run all doctests embedded in the documentation if enabled + goto end +) + +if "%1" == "clean" ( + for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i + del /q /s %BUILDDIR%\* + goto end +) + +if "%1" == "html" ( + %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/html. + goto end +) + +if "%1" == "dirhtml" ( + %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml + echo. + echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. + goto end +) + +if "%1" == "pickle" ( + %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle + echo. + echo.Build finished; now you can process the pickle files. + goto end +) + +if "%1" == "json" ( + %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json + echo. + echo.Build finished; now you can process the JSON files. + goto end +) + +if "%1" == "htmlhelp" ( + %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp + echo. + echo.Build finished; now you can run HTML Help Workshop with the ^ +.hhp project file in %BUILDDIR%/htmlhelp. + goto end +) + +if "%1" == "qthelp" ( + %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp + echo. + echo.Build finished; now you can run "qcollectiongenerator" with the ^ +.qhcp project file in %BUILDDIR%/qthelp, like this: + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\boto.qhcp + echo.To view the help file: + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\boto.ghc + goto end +) + +if "%1" == "latex" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + echo. + echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "changes" ( + %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes + echo. + echo.The overview file is in %BUILDDIR%/changes. + goto end +) + +if "%1" == "linkcheck" ( + %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck + echo. + echo.Link check complete; look for any errors in the above output ^ +or in %BUILDDIR%/linkcheck/output.txt. + goto end +) + +if "%1" == "doctest" ( + %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest + echo. + echo.Testing of doctests in the sources finished, look at the ^ +results in %BUILDDIR%/doctest/output.txt. + goto end +) + +:end diff --git a/vendor/boto/docs/source/_templates/layout.html b/vendor/boto/docs/source/_templates/layout.html new file mode 100644 index 000000000000..cdf85bbf0a39 --- /dev/null +++ b/vendor/boto/docs/source/_templates/layout.html @@ -0,0 +1,3 @@ +{% extends '!layout.html' %} + +{% block sidebarsearch %}{{ super() }}{% endblock %} diff --git a/vendor/boto/docs/source/autoscale_tut.rst b/vendor/boto/docs/source/autoscale_tut.rst new file mode 100644 index 000000000000..9f9d39940d20 --- /dev/null +++ b/vendor/boto/docs/source/autoscale_tut.rst @@ -0,0 +1,140 @@ +.. _autoscale_tut: + +============================================= +An Introduction to boto's Autoscale interface +============================================= + +This tutorial focuses on the boto interface to the Autoscale service. This +assumes you are familiar with boto's EC2 interface and concepts. + +Autoscale Concepts +------------------ + +The AWS Autoscale service is comprised of three core concepts: + + #. *Autoscale Group (AG):* An AG can be viewed as a collection of criteria for + maintaining or scaling a set of EC2 instances over one or more availability + zones. An AG is limited to a single region. + #. *Launch Configuration (LC):* An LC is the set of information needed by the + AG to launch new instances - this can encompass image ids, startup data, + security groups and keys. Only one LC is attached to an AG. + #. *Triggers*: A trigger is essentially a set of rules for determining when to + scale an AG up or down. These rules can encompass a set of metrics such as + average CPU usage across instances, or incoming requests, a threshold for + when an action will take place, as well as parameters to control how long + to wait after a threshold is crossed. + +Creating a Connection +--------------------- +The first step in accessing autoscaling is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.ec2.autoscale import AutoScaleConnection +>>> conn = AutoScaleConnection('', '') + +Alternatively, you can use the shortcut: + +>>> conn = boto.connect_autoscale() + +A Note About Regions and Endpoints +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Like EC2 the Autoscale service has a different endpoint for each region. By +default the US endpoint is used. To choose a specific region, instantiate the +AutoScaleConnection object with that region's endpoint. + +>>> ec2 = boto.connect_autoscale(host='eu-west-1.autoscaling.amazonaws.com') + +Alternatively, edit your boto.cfg with the default Autoscale endpoint to use:: + + [Boto] + autoscale_endpoint = eu-west-1.autoscaling.amazonaws.com + +Getting Existing AutoScale Groups +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To retrieve existing autoscale groups: + +>>> conn.get_all_groups() + +You will get back a list of AutoScale group objects, one for each AG you have. + +Creating Autoscaling Groups +--------------------------- +An Autoscaling group has a number of parameters associated with it. + + #. *Name*: The name of the AG. + #. *Availability Zones*: The list of availability zones it is defined over. + #. *Minimum Size*: Minimum number of instances running at one time. + #. *Maximum Size*: Maximum number of instances running at one time. + #. *Launch Configuration (LC)*: A set of instructions on how to launch an instance. + #. *Load Balancer*: An optional ELB load balancer to use. See the ELB tutorial + for information on how to create a load balancer. + +For the purposes of this tutorial, let's assume we want to create one autoscale +group over the us-east-1a and us-east-1b availability zones. We want to have +two instances in each availability zone, thus a minimum size of 4. For now we +won't worry about scaling up or down - we'll introduce that later when we talk +about triggers. Thus we'll set a maximum size of 4 as well. We'll also associate +the AG with a load balancer which we assume we've already created, called 'my_lb'. + +Our LC tells us how to start an instance. This will at least include the image +id to use, security_group, and key information. We assume the image id, key +name and security groups have already been defined elsewhere - see the EC2 +tutorial for information on how to create these. + +>>> from boto.ec2.autoscale import LaunchConfiguration +>>> from boto.ec2.autoscale import AutoScalingGroup +>>> lc = LaunchConfiguration(name='my-launch_config', image_id='my-ami', + key_name='my_key_name', + security_groups=['my_security_groups']) +>>> conn.create_launch_configuration(lc) + +We now have created a launch configuration called 'my-launch-config'. We are now +ready to associate it with our new autoscale group. + +>>> ag = AutoScalingGroup(group_name='my_group', load_balancers=['my-lb'], + availability_zones=['us-east-1a', 'us-east-1b'], + launch_config=lc, min_size=4, max_size=4) +>>> conn.create_auto_scaling_group(ag) + +We now have a new autoscaling group defined! At this point instances should be +starting to launch. To view activity on an autoscale group: + +>>> ag.get_activities() + [Activity:Launching a new EC2 instance status:Successful progress:100, + ...] + +or alternatively: + +>>> conn.get_all_activities(ag) + +This autoscale group is fairly useful in that it will maintain the minimum size without +breaching the maximum size defined. That means if one instance crashes, the autoscale +group will use the launch configuration to start a new one in an attempt to maintain +its minimum defined size. It knows instance health using the health check defined on +its associated load balancer. + +Scaling a Group Up or Down +^^^^^^^^^^^^^^^^^^^^^^^^^^ +It might be more useful to also define means to scale a group up or down +depending on certain criteria. For example, if the average CPU utilization of +all your instances goes above 60%, you may want to scale up a number of +instances to deal with demand - likewise you might want to scale down if usage +drops. These criteria are defined in *triggers*. + +For example, let's modify our above group to have a maxsize of 8 and define means +of scaling up based on CPU utilization. We'll say we should scale up if the average +CPU usage goes above 80% and scale down if it goes below 40%. + +>>> from boto.ec2.autoscale import Trigger +>>> tr = Trigger(name='my_trigger', autoscale_group=ag, + measure_name='CPUUtilization', statistic='Average', + unit='Percent', + dimensions=[('AutoScalingGroupName', ag.name)], + period=60, lower_threshold=40, + lower_breach_scale_increment='-5', + upper_threshold=80, + upper_breach_scale_increment='10', + breach_duration=360) +>> conn.create_trigger(tr) + diff --git a/vendor/boto/docs/source/boto_theme/static/boto.css_t b/vendor/boto/docs/source/boto_theme/static/boto.css_t new file mode 100644 index 000000000000..932e5183dba6 --- /dev/null +++ b/vendor/boto/docs/source/boto_theme/static/boto.css_t @@ -0,0 +1,239 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + font-size: 100%; + background-color: #111111; + color: #555555; + margin: 0; + padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 300px; +} + +hr{ + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #fafafa; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 1em 30px 30px 30px; + font-size: 0.9em; +} + +div.footer { + color: #555; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444444; +} + +div.related { + background-color: #6F6555; /*#6BA81E;*/ + line-height: 36px; + color: #CCCCCC; + text-shadow: 0px 1px 0 #444444; + font-size: 1.1em; +} + +div.related a { + color: #D9C5A7; +} + +div.related .right { + font-size: 0.9em; +} + +div.sphinxsidebar { + font-size: 0.9em; + line-height: 1.5em; + width: 300px +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + color: #222222; + font-size: 1.2em; + font-weight: bold; + margin: 0; + padding: 5px 10px; + text-shadow: 1px 1px 0 white +} + +div.sphinxsidebar h3 a { + color: #444444; +} + +div.sphinxsidebar p { + color: #888888; + padding: 5px 20px; + margin: 0.5em 0px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 10px 10px 20px; + padding: 0; + color: #000000; +} + +div.sphinxsidebar a { + color: #444444; +} + +div.sphinxsidebar a:hover { + color: #E32E00; +} + +div.sphinxsidebar input { + border: 1px solid #cccccc; + font-family: sans-serif; + font-size: 1.1em; + padding: 0.15em 0.3em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #005B81; + text-decoration: none; +} + +a:hover { + color: #E32E00; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif; + font-weight: bold; + color: #069; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 0px; + text-shadow: 0px 1px 0 white; + border-bottom: 1px solid #C8D5E3; +} + +div.body h1 { margin-top: 0; font-size: 165%; } +div.body h2 { font-size: 135%; } +div.body h3 { font-size: 120%; } +div.body h4 { font-size: 110%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #eeeeee; + border: 1px solid #cccccc; +} + +div.seealso { + background-color: #ffffcc; + border: 1px solid #ffff66; +} + +div.topic { + background-color: #fafafa; + border-width: 0; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #ff6666; +} + + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: #fafafa; + color: #222222; + line-height: 1.5em; + font-size: 1.1em; + margin: 1.5em 0 1.5em 0; + -webkit-box-shadow: 0px 0px 4px #d8d8d8; + -moz-box-shadow: 0px 0px 4px #d8d8d8; + box-shadow: 0px 0px 4px #d8d8d8; +} + +tt { + color: #222222; + padding: 1px 2px; + font-size: 1.2em; + font-family: monospace; +} + +#table-of-contents ul { + padding-left: 2em; +} + +div.sphinxsidebarwrapper div a {margin: 0.7em;} \ No newline at end of file diff --git a/vendor/boto/docs/source/boto_theme/static/pygments.css b/vendor/boto/docs/source/boto_theme/static/pygments.css new file mode 100644 index 000000000000..1f2d2b61871e --- /dev/null +++ b/vendor/boto/docs/source/boto_theme/static/pygments.css @@ -0,0 +1,61 @@ +.hll { background-color: #ffffcc } +.c { color: #408090; font-style: italic } /* Comment */ +.err { border: 1px solid #FF0000 } /* Error */ +.k { color: #007020; font-weight: bold } /* Keyword */ +.o { color: #666666 } /* Operator */ +.cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.cp { color: #007020 } /* Comment.Preproc */ +.c1 { color: #408090; font-style: italic } /* Comment.Single */ +.cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.gd { color: #A00000 } /* Generic.Deleted */ +.ge { font-style: italic } /* Generic.Emph */ +.gr { color: #FF0000 } /* Generic.Error */ +.gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.gi { color: #00A000 } /* Generic.Inserted */ +.go { color: #303030 } /* Generic.Output */ +.gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.gs { font-weight: bold } /* Generic.Strong */ +.gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.gt { color: #0040D0 } /* Generic.Traceback */ +.kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.kp { color: #007020 } /* Keyword.Pseudo */ +.kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.kt { color: #902000 } /* Keyword.Type */ +.m { color: #208050 } /* Literal.Number */ +.s { color: #4070a0 } /* Literal.String */ +.na { color: #4070a0 } /* Name.Attribute */ +.nb { color: #007020 } /* Name.Builtin */ +.nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.no { color: #60add5 } /* Name.Constant */ +.nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.ne { color: #007020 } /* Name.Exception */ +.nf { color: #06287e } /* Name.Function */ +.nl { color: #002070; font-weight: bold } /* Name.Label */ +.nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.nt { color: #062873; font-weight: bold } /* Name.Tag */ +.nv { color: #bb60d5 } /* Name.Variable */ +.ow { color: #007020; font-weight: bold } /* Operator.Word */ +.w { color: #bbbbbb } /* Text.Whitespace */ +.mf { color: #208050 } /* Literal.Number.Float */ +.mh { color: #208050 } /* Literal.Number.Hex */ +.mi { color: #208050 } /* Literal.Number.Integer */ +.mo { color: #208050 } /* Literal.Number.Oct */ +.sb { color: #4070a0 } /* Literal.String.Backtick */ +.sc { color: #4070a0 } /* Literal.String.Char */ +.sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.s2 { color: #4070a0 } /* Literal.String.Double */ +.se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.sh { color: #4070a0 } /* Literal.String.Heredoc */ +.si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.sx { color: #c65d09 } /* Literal.String.Other */ +.sr { color: #235388 } /* Literal.String.Regex */ +.s1 { color: #4070a0 } /* Literal.String.Single */ +.ss { color: #517918 } /* Literal.String.Symbol */ +.bp { color: #007020 } /* Name.Builtin.Pseudo */ +.vc { color: #bb60d5 } /* Name.Variable.Class */ +.vg { color: #bb60d5 } /* Name.Variable.Global */ +.vi { color: #bb60d5 } /* Name.Variable.Instance */ +.il { color: #208050 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/vendor/boto/docs/source/boto_theme/theme.conf b/vendor/boto/docs/source/boto_theme/theme.conf new file mode 100644 index 000000000000..7d09085abb25 --- /dev/null +++ b/vendor/boto/docs/source/boto_theme/theme.conf @@ -0,0 +1,3 @@ +[theme] +inherit = basic +stylesheet = boto.css \ No newline at end of file diff --git a/vendor/boto/docs/source/conf.py b/vendor/boto/docs/source/conf.py new file mode 100644 index 000000000000..57b1221b0a8c --- /dev/null +++ b/vendor/boto/docs/source/conf.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- + +import sys, os + +extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo'] +templates_path = ['_templates'] +source_suffix = '.rst' +master_doc = 'index' +project = u'boto' +copyright = u'2009,2010, Mitch Garnaat' +version = '1.9' +exclude_trees = [] +pygments_style = 'sphinx' +html_theme = 'boto_theme' +html_theme_path = ["."] +html_static_path = ['_static'] +htmlhelp_basename = 'botodoc' +latex_documents = [ + ('index', 'boto.tex', u'boto Documentation', + u'Mitch Garnaat', 'manual'), +] +intersphinx_mapping = {'http://docs.python.org/': None} + +try: + release = os.environ.get('SVN_REVISION', 'HEAD') + print release +except Exception, e: + print e + +html_title = "boto v%s (r%s)" % (version, release) diff --git a/vendor/boto/docs/source/documentation.rst b/vendor/boto/docs/source/documentation.rst new file mode 100644 index 000000000000..d4999d99daa1 --- /dev/null +++ b/vendor/boto/docs/source/documentation.rst @@ -0,0 +1,59 @@ +.. _documentation: + +======================= +About the Documentation +======================= + +boto's documentation uses the Sphinx__ documentation system, which in turn is +based on docutils__. The basic idea is that lightly-formatted plain-text +documentation is transformed into HTML, PDF, and any other output format. + +__ http://sphinx.pocoo.org/ +__ http://docutils.sf.net/ + +To actually build the documentation locally, you'll currently need to install +Sphinx -- ``easy_install Sphinx`` should do the trick. + +Then, building the html is easy; just ``make html`` from the ``docs`` directory. + +To get started contributing, you'll want to read the `ReStructuredText +Primer`__. After that, you'll want to read about the `Sphinx-specific markup`__ +that's used to manage metadata, indexing, and cross-references. + +__ http://sphinx.pocoo.org/rest.html +__ http://sphinx.pocoo.org/markup/ + +The main thing to keep in mind as you write and edit docs is that the more +semantic markup you can add the better. So:: + + Import ``boto`` to your script... + +Isn't nearly as helpful as:: + + Add :mod:`boto` to your script... + +This is because Sphinx will generate a proper link for the latter, which greatly +helps readers. There's basically no limit to the amount of useful markup you can +add. + + +The fabfile +----------- + +There is a Fabric__ file that can be used to build and deploy the documentation +to a webserver that you ssh access to. + +__ http://fabfile.org + +To build and deploy:: + + cd docs/ + fab deploy:remote_path='/var/www/folder/whatever' --hosts=user@host + +This will get the latest code from subversion, add the revision number to the +docs conf.py file, call ``make html`` to build the documentation, then it will +tarball it up and scp up to the host you specified and untarball it in the +folder you specified creating a symbolic link from the untarballed versioned +folder to ``{remote_path}/boto-docs``. + + diff --git a/vendor/boto/docs/source/ec2_tut.rst b/vendor/boto/docs/source/ec2_tut.rst new file mode 100644 index 000000000000..6326243ce2b3 --- /dev/null +++ b/vendor/boto/docs/source/ec2_tut.rst @@ -0,0 +1,420 @@ +.. _ec2_tut: + +======================================= +An Introduction to boto's EC2 interface +======================================= + +This tutorial focuses on the boto interface to the Elastic Compute Cloud +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- +The first step in accessing EC2 is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.ec2.connection import EC2Connection +>>> conn = EC2Connection('', '') + +At this point the variable conn will point to an EC2Connection object. In +this example, the AWS access key and AWS secret key are passed in to the +method explicitely. Alternatively, you can set the environment variables: + +AWS_ACCESS_KEY_ID - Your AWS Access Key ID +AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = EC2Connection() + +There is also a shortcut function in the boto package, called connect_ec2 +that may provide a slightly easier means of creating a connection: + +>>> import boto +>>> conn = boto.connect_ec2() + +In either case, conn will point to an EC2Connection object which we will +use throughout the remainder of this tutorial. + +A Note About Regions +-------------------- +The 2008-12-01 version of the EC2 API introduced the idea of Regions. +A Region is geographically distinct and is completely isolated from +other EC2 Regions. At the time of the launch of the 2008-12-01 API +there were two available regions, us-east-1 and eu-west-1. Each +Region has it's own service endpoint and therefore would require +it's own EC2Connection object in boto. + +The default behavior in boto, as shown above, is to connect you with +the us-east-1 region which is exactly the same as the behavior prior +to the introduction of Regions. + +However, if you would like to connect to a region other than us-east-1, +there are a couple of ways to accomplish that. The first way, is to +as EC2 to provide a list of currently supported regions. You can do +that using the regions function in the boto.ec2 module: + +>>> import boto.ec2 +>>> regions = boto.ec2.regions() +>>> regions +[RegionInfo:eu-west-1, RegionInfo:us-east-1] +>>> + +As you can see, a list of available regions is returned. Each region +is represented by a RegionInfo object. A RegionInfo object has two +attributes; a name and an endpoint. + +>>> eu = regions[0] +>>> eu.name +u'eu-west-1' +>>> eu.endpoint +u'eu-west-1.ec2.amazonaws.com' +>>> + +You can easily create a connection to a region by using the connect +method of the RegionInfo object: + +>>> conn_eu = eu.connect() +>>> conn_eu + +>>> + +The variable conn_eu is now bound to an EC2Connection object connected +to the endpoint of the eu-west-1 region and all operations performed via +that connection and all objects created by that connection will be scoped +to the eu-west-1 region. You can always tell which region a connection +is associated with by accessing it's region attribute: + +>>> conn_eu.region +RegionInfo:eu-west-1 +>>> + +Supporting EC2 objects such as SecurityGroups, KeyPairs, Addresses, +Volumes, Images and SnapShots are local to a particular region. So +don't expect to find the security groups you created in the us-east-1 +region to be available in the eu-west-1 region. + +Some objects in boto, such as SecurityGroup, have a new method called +copy_to_region which will attempt to create a copy of the object in +another region. For example: + +>>> regions +[RegionInfo:eu-west-1, RegionInfo:us-east-1] +>>> conn_us = regions[1].connect() +>>> groups = conn_us.get_all_security_groups() +>>> groups +[SecurityGroup:alfresco, SecurityGroup:apache, SecurityGroup:vnc, +SecurityGroup:appserver2, SecurityGroup:FTP, SecurityGroup:webserver, +SecurityGroup:default, SecurityGroup:test-1228851996] +>>> us_group = groups[0] +>>> us_group +SecurityGroup:alfresco +>>> us_group.rules +[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)] +>>> eu_group = us_group.copy_to_region(eu) +>>> eu_group.rules +[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)] + +In the above example, we chose one of the security groups available +in the us-east-1 region (the group alfresco) and copied that security +group to the eu-west-1 region. All of the rules associated with the +original security group will be copied as well. + +If you would like your default region to be something other than +us-east-1, you can override that default in your boto config file +(either ~/.boto for personal settings or /etc/boto.cfg for system-wide +settings). For example: + +[Boto] +ec2_region_name = eu-west-1 +ec2_region_endpoint = eu-west-1.ec2.amazonaws.com + +The above lines added to either boto config file would set the default +region to be eu-west-1. + +Images & Instances +------------------ + +An Image object represents an Amazon Machine Image (AMI) which is an +encrypted machine image stored in Amazon S3. It contains all of the +information necessary to boot instances of your software in EC2. + +To get a listing of all available Images: + +>>> images = conn.get_all_images() +>>> images +[Image:ami-20b65349, Image:ami-22b6534b, Image:ami-23b6534a, Image:ami-25b6534c, Image:ami-26b6534f, Image:ami-2bb65342, Image:ami-78b15411, Image:ami-a4aa4fcd, Image:ami-c3b550aa, Image:ami-e4b6538d, Image:ami-f1b05598] +>>> for image in images: +... print image.location +ec2-public-images/fedora-core4-base.manifest.xml +ec2-public-images/fedora-core4-mysql.manifest.xml +ec2-public-images/fedora-core4-apache.manifest.xml +ec2-public-images/fedora-core4-apache-mysql.manifest.xml +ec2-public-images/developer-image.manifest.xml +ec2-public-images/getting-started.manifest.xml +marcins_cool_public_images/fedora-core-6.manifest.xml +khaz_fc6_win2003/image.manifest +aes-images/django.manifest +marcins_cool_public_images/ubuntu-6.10.manifest.xml +ckk_public_ec2_images/centos-base-4.4.manifest.xml + +The most useful thing you can do with an Image is to actually run it, so let's +run a new instance of the base Fedora image: + +>>> image = images[0] +>>> image.location +ec2-public-images/fedora-core4-base.manifest.xml +>>> reservation = image.run() + +This will begin the boot process for a new EC2 instance. The run method +returns a Reservation object which represents a collection of instances +that are all started at the same time. In this case, we only started one +but you can check the instances attribute of the Reservation object to see +all of the instances associated with this reservation: + +>>> reservation.instances +[Instance:i-6761850e] +>>> instance = reservation.instances[0] +>>> instance.state +u'pending' +>>> + +So, we have an instance booting up that is still in the pending state. We +can call the update method on the instance to get a refreshed view of it's +state: + +>>> instance.update() +>>> instance.state +u'pending' +>>> # wait a few minutes +>>> instance.update() +>>> instance.state +u'running' + +So, now our instance is running. The time it takes to boot a new instance +varies based on a number of different factors but usually it takes less than +five minutes. + +Now the instance is up and running you can find out its DNS name like this: + +>>> instance.dns_name +u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com' + +This provides the public DNS name for your instance. Since the 2007--3-22 +release of the EC2 service, the default addressing scheme for instances +uses NAT-addresses which means your instance has both a public IP address and a +non-routable private IP address. You can access each of these addresses +like this: + +>>> instance.public_dns_name +u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com' +>>> instance.private_dns_name +u'domU-12-31-35-00-42-33.z-2.compute-1.internal' + +Even though your instance has a public DNS name, you won't be able to +access it yet because you need to set up some security rules which are +described later in this tutorial. + +Since you are now being charged for that instance we just created, you will +probably want to know how to terminate the instance, as well. The simplest +way is to use the stop method of the Instance object: + +>>> instance.stop() +>>> instance.update() +>>> instance.state +u'shutting-down' +>>> # wait a minute +>>> instance.update() +>>> instance.state +u'terminated' +>>> + +When we created our new instance, we didn't pass any args to the run method +so we got all of the default values. The full set of possible parameters +to the run method are: + +min_count - The minimum number of instances to launch. +max_count - The maximum number of instances to launch. +keypair - Keypair to launch instances with (either a KeyPair object or a string with the name of the desired keypair. +security_groups - A list of security groups to associate with the instance. This can either be a list of SecurityGroup objects or a list of strings with the names of the desired security groups. +user_data - Data to be made available to the launched instances. This should be base64 encoded according to the EC2 documentation. + +So, if I wanted to create two instances of the base image and launch them +with my keypair, called gsg-keypair, I would to this: + +>>> reservation.image.run(2,2,'gsg-keypair') +>>> reservation.instances +[Instance:i-5f618536, Instance:i-5e618537] +>>> for i in reservation.instances: +... print i.status +u'pending' +u'pending' +>>> + +Later, when you are finished with the instances you can either stop each +individually or you can call the stop_all method on the Reservation object: + +>>> reservation.stop_all() +>>> + +If you just want to get a list of all of your running instances, use +the get_all_instances method of the connection object. Note that the +list returned is actually a list of Reservation objects (which contain +the Instances) and that the list may include recently terminated instances +for a small period of time subsequent to their termination. + +>>> instances = conn.get_all_instances() +>>> instances +[Reservation:r-a76085ce, Reservation:r-a66085cf, Reservation:r-8c6085e5] +>>> r = instances[0] +>>> for inst in r.instances: +... print inst.state +u'terminated' +>>> + +A recent addition to the EC2 api's is to allow other EC2 users to launch +your images. There are a couple of ways of accessing this capability in +boto but I'll show you the simplest way here. First of all, you need to +know the Amazon ID for the user in question. The Amazon Id is a twelve +digit number that appears on your Account Activity page at AWS. It looks +like this: + +1234-5678-9012 + +To use this number in API calls, you need to remove the dashes so in our +example the user ID would be 12345678912. To allow the user associated +with this ID to launch one of your images, let's assume that the variable +image represents the Image you want to share. So: + +>>> image.get_launch_permissions() +{} +>>> + +The get_launch_permissions method returns a dictionary object two possible +entries; user_ids or groups. In our case we haven't yet given anyone +permission to launch our image so the dictionary is empty. To add our +EC2 user: + +>>> image.set_launch_permissions(['123456789012']) +True +>>> image.get_launch_permissions() +{'user_ids': [u'123456789012']} +>>> + +We have now added the desired user to the launch permissions for the Image +so that user will now be able to access and launch our Image. You can add +multiple users at one time by adding them all to the list you pass in as +a parameter to the method. To revoke the user's launch permissions: + +>>> image.remove_launch_permissions(['123456789012']) +True +>>> image.get_launch_permissions() +{} +>>> + +It is possible to pass a list of group names to the set_launch_permissions +method, as well. The only group available at the moment is the group "all" +which would allow any valid EC2 user to launch your image. + +Finally, you can completely reset the launch permissions for an Image with: + +>>> image.reset_launch_permissions() +True +>>> + +This will remove all users and groups from the launch permission list and +makes the Image private, again. + +Security Groups +---------------- + +Amazon defines a security group as: + +"A security group is a named collection of access rules. These access rules + specify which ingress, i.e. incoming, network traffic should be delivered + to your instance." + +To get a listing of all currently defined security groups: + +>>> rs = conn.get_all_security_groups() +>>> print rs +[SecurityGroup:appserver, SecurityGroup:default, SecurityGroup:vnc, SecurityGroup:webserver] +>>> + +Each security group can have an arbitrary number of rules which represent +different network ports which are being enabled. To find the rules for a +particular security group, use the rules attribute: + +>>> sg = rs[1] +>>> sg.name +u'default' +>>> sg.rules +[IPPermissions:tcp(0-65535), + IPPermissions:udp(0-65535), + IPPermissions:icmp(-1--1), + IPPermissions:tcp(22-22), + IPPermissions:tcp(80-80)] +>>> + +In addition to listing the available security groups you can also create +a new security group. I'll follow through the "Three Tier Web Service" +example included in the EC2 Developer's Guide for an example of how to +create security groups and add rules to them. + +First, let's create a group for our Apache web servers that allows HTTP +access to the world: + +>>> web = conn.create_security_group('apache', 'Our Apache Group') +>>> web +SecurityGroup:apache +>>> web.authorize('tcp', 80, 80, '0.0.0.0/0') +True +>>> + +The first argument is the ip protocol which can be one of; tcp, udp or icmp. +The second argument is the FromPort or the beginning port in the range, the +third argument is the ToPort or the ending port in the range and the last +argument is the CIDR IP range to authorize access to. + +Next we create another group for the app servers: + +>>> app = conn.create_security_group('appserver', 'The application tier') +>>> + +We then want to grant access between the web server group and the app +server group. So, rather than specifying an IP address as we did in the +last example, this time we will specify another SecurityGroup object. + +>>> app.authorize(src_group=web) +True +>>> + +Now, to verify that the web group now has access to the app servers, we want to +temporarily allow SSH access to the web servers from our computer. Let's +say that our IP address is 192.168.1.130 as it is in the EC2 Developer +Guide. To enable that access: + +>>> web.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='192.168.1.130/32') +True +>>> + +Now that this access is authorized, we could ssh into an instance running in +the web group and then try to telnet to specific ports on servers in the +appserver group, as shown in the EC2 Developer's Guide. When this testing is +complete, we would want to revoke SSH access to the web server group, like this: + +>>> web.rules +[IPPermissions:tcp(80-80), + IPPermissions:tcp(22-22)] +>>> web.revoke('tcp', 22, 22, cidr_ip='192.168.1.130/32') +True +>>> web.rules +[IPPermissions:tcp(80-80)] +>>> + + + + + + + diff --git a/vendor/boto/docs/source/elb_tut.rst b/vendor/boto/docs/source/elb_tut.rst new file mode 100644 index 000000000000..b8735781f93e --- /dev/null +++ b/vendor/boto/docs/source/elb_tut.rst @@ -0,0 +1,202 @@ +.. _elb_tut: + +========================================================== +An Introduction to boto's Elastic Load Balancing interface +========================================================== + +This tutorial focuses on the boto interface for Elastic Load Balancing +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto, and are familiar with the boto ec2 interface. + +Elastic Load Balancing Concepts +------------------------------- +Elastic Load Balancing (ELB) is intimately connected with Amazon's Elastic +Compute Cloud (EC2) service. Using the ELB service allows you to create a load +balancer - a DNS endpoint and set of ports that distributes incoming requests +to a set of ec2 instances. The advantages of using a load balancer is that it +allows you to truly scale up or down a set of backend instances without +disrupting service. Before the ELB service you had to do this manually by +launching an EC2 instance and installing load balancer software on it (nginx, +haproxy, perlbal, etc.) to distribute traffic to other EC2 instances. + +Recall that the ec2 service is split into Regions and Availability Zones (AZ). +At the time of writing, there are two Regions - US and Europe, and each region +is divided into a number of AZs (for example, us-east-1a, us-east-1b, etc.). +You can think of AZs as data centers - each runs off a different set of ISP +backbones and power providers. ELB load balancers can span multiple AZs but +cannot span multiple regions. That means that if you'd like to create a set of +instances spanning both the US and Europe Regions you'd have to create two load +balancers and have some sort of other means of distributing requests between +the two loadbalancers. An example of this could be using GeoIP techniques to +choose the correct load balancer, or perhaps DNS round robin. Keep in mind also +that traffic is distributed equally over all AZs the ELB balancer spans. This +means you should have an equal number of instances in each AZ if you want to +equally distribute load amongst all your instances. + +Creating a Connection +--------------------- +The first step in accessing ELB is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.ec2.elb import ELBConnection +>>> conn = ELBConnection('', '') + +There is also a shortcut function in the boto package, called connect_elb +that may provide a slightly easier means of creating a connection: + +>>> import boto +>>> conn = boto.connect_elb() + +In either case, conn will point to an ELBConnection object which we will +use throughout the remainder of this tutorial. + +A Note About Regions and Endpoints +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Like EC2 the ELB service has a different endpoint for each region. By default +the US endpoint is used. To choose a specific region, instantiate the +ELBConnection object with that region's endpoint. + +>>> ec2 = boto.connect_elb(host='eu-west-1.elasticloadbalancing.amazonaws.com') + +Alternatively, edit your boto.cfg with the default ELB endpoint to use:: + + [Boto] + elb_endpoint = eu-west-1.elasticloadbalancing.amazonaws.com + +Getting Existing Load Balancers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To retrieve any exiting load balancers: + +>>> conn.get_all_load_balancers() + +You will get back a list of LoadBalancer objects. + +Creating a Load Balancer +------------------------ +To create a load balancer you need the following: + #. The specific **ports and protocols** you want to load balancer over, and what port + you want to connect to all instances. + #. A **health check** - the ELB concept of a *heart beat* or *ping*. ELB will use this health + check to see whether your instances are up or down. If they go down, the load balancer + will no longer send requests to them. + #. A **list of Availability Zones** you'd like to create your load balancer over. + +Ports and Protocols +^^^^^^^^^^^^^^^^^^^ +An incoming connection to your load balancer will come on one or more ports - +for example 80 (HTTP) and 443 (HTTPS). Each can be using a protocol - +currently, the supported protocols are TCP and HTTP. We also need to tell the +load balancer which port to route connects *to* on each instance. For example, +to create a load balancer for a website that accepts connections on 80 and 443, +and that routes connections to port 8080 and 8443 on each instance, you would +specify that the load balancer ports and protocols are: + + * 80, 8080, HTTP + * 443, 8443, TCP + +This says that the load balancer will listen on two ports - 80 and 443. +Connections on 80 will use an HTTP load balancer to forward connections to port +8080 on instances. Likewise, the load balancer will listen on 443 to forward +connections to 8443 on each instance using the TCP balancer. We need to +use TCP for the HTTPS port because it is encrypted at the application +layer. Of course, we could specify the load balancer use TCP for port 80, +however specifying HTTP allows you to let ELB handle some work for you - +for example HTTP header parsing. + + +Configuring a Health Check +^^^^^^^^^^^^^^^^^^^^^^^^^^ +A health check allows ELB to determine which instances are alive and able to +respond to requests. A health check is essentially a tuple consisting of: + + * *target*: What to check on an instance. For a TCP check this is comprised of:: + + TCP:PORT_TO_CHECK + + Which attempts to open a connection on PORT_TO_CHECK. If the connection opens + successfully, that specific instance is deemed healthy, otherwise it is marked + temporarily as unhealthy. For HTTP, the situation is slightly different:: + + HTTP:PORT_TO_CHECK/RESOURCE + + This means that the health check will connect to the resource /RESOURCE on + PORT_TO_CHECK. If an HTTP 200 status is returned the instance is deemed healthy. + * *interval*: How often the check is made. This is given in seconds and defaults to 30. + The valid range of intervals goes from 5 seconds to 600 seconds. + * *timeout*: The number of seconds the load balancer will wait for a check to return a + result. + * *UnhealthyThreshold*: The number of consecutive failed checks to deem the instance + as being dead. The default is 5, and the range of valid values lies from 2 to 10. + +The following example creates a health check called *instance_health* that simply checks +instances every 20 seconds on port 80 over HTTP at the resource /health for 200 successes. + +>>> import boto +>>> from boto.ec2.elb import HealthCheck +>>> conn = boto.connect_elb() +>>> hc = HealthCheck('instance_health', interval=20, target='HTTP:8080/health') + +Putting It All Together +^^^^^^^^^^^^^^^^^^^^^^^ + +Finally, let's create a load balancer in the US region that listens on ports 80 and 443 +and distributes requests to instances on 8080 and 8443 over HTTP and TCP. We want the +load balancer to span the availability zones *us-east-1a* and *us-east-1b*: + +>>> lb = conn.create_load_balancer('my_lb', ['us-east-1a', 'us-east-1b'], + [(80, 8080, 'http'), (443, 8443, 'tcp')]) +>>> lb.configure_health_check(hc) + +The load balancer has been created. To see where you can actually connect to it, do: + +>>> print lb.dns_name +my_elb-123456789.us-east-1.elb.amazonaws.com + +You can then CNAME map a better name, i.e. www.MYWEBSITE.com to the above address. + +Adding Instances To a Load Balancer +----------------------------------- + +Now that the load balancer has been created, there are two ways to add instances to it: + + #. Manually, adding each instance in turn. + #. Mapping an autoscale group to the load balancer. Please see the Autoscale + tutorial for information on how to do this. + +Manually Adding and Removing Instances +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming you have a list of instance ids, you can add them to the load balancer + +>>> instance_ids = ['i-4f8cf126', 'i-0bb7ca62'] +>>> lb.register_instances(instance_ids) + +Keep in mind that these instances should be in Security Groups that match the +internal ports of the load balancer you just created (for this example, they +should allow incoming connections on 8080 and 8443). + +To remove instances: + +>>> lb.degregister_instances(instance_ids) + +Modifying Availability Zones for a Load Balancer +------------------------------------------------ + +If you wanted to disable one or more zones from an existing load balancer: + +>>> lb.disable_zones(['us-east-1a']) + +You can then terminate each instance in the disabled zone and then deregister then from your load +balancer. + +To enable zones: + +>>> lb.enable_zones(['us-east-1c']) + +Deleting a Load Balancer +------------------------ + +>>> lb.delete() + + diff --git a/vendor/boto/docs/source/index.rst b/vendor/boto/docs/source/index.rst new file mode 100644 index 000000000000..24b6ba058de2 --- /dev/null +++ b/vendor/boto/docs/source/index.rst @@ -0,0 +1,52 @@ +.. _index: + +=============================================== +boto: A Python interface to Amazon Web Services +=============================================== + +An integrated interface to current and future infrastructural services +offered by Amazon Web Services. + +Currently, this includes: + +- Simple Storage Service (S3) +- Simple Queue Service (SQS) +- Elastic Compute Cloud (EC2) + + * Elastic Load Balancer (ELB) + * CloudWatch + * AutoScale + +- Mechanical Turk +- SimpleDB (SDB) - See SimpleDbPage for details +- CloudFront +- Virtual Private Cloud (VPC) + +Follow project updates on Twitter (http://twitter.com/pythonboto). + +Follow Mitch on Twitter (http://twitter.com/garnaat). + + +Documentation Contents +---------------------- + +.. toctree:: + :maxdepth: 2 + + sqs_tut + s3_tut + ec2_tut + elb_tut + autoscale_tut + vpc_tut + ref/index + documentation + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/vendor/boto/docs/source/ref/boto.rst b/vendor/boto/docs/source/ref/boto.rst new file mode 100644 index 000000000000..5a241b34601d --- /dev/null +++ b/vendor/boto/docs/source/ref/boto.rst @@ -0,0 +1,47 @@ +.. _ref-boto: + +==== +boto +==== + +boto +---- + +.. automodule:: boto + :members: + :undoc-members: + +boto.connection +--------------- + +.. automodule:: boto.connection + :members: + :undoc-members: + +boto.exception +-------------- + +.. automodule:: boto.exception + :members: + :undoc-members: + +boto.handler +------------ + +.. automodule:: boto.handler + :members: + :undoc-members: + +boto.resultset +-------------- + +.. automodule:: boto.resultset + :members: + :undoc-members: + +boto.utils +---------- + +.. automodule:: boto.utils + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/cloudfront.rst b/vendor/boto/docs/source/ref/cloudfront.rst new file mode 100644 index 000000000000..5cb80beb2304 --- /dev/null +++ b/vendor/boto/docs/source/ref/cloudfront.rst @@ -0,0 +1,108 @@ +.. ref-cloudfront + +========== +cloudfront +========== + +A Crash Course in CloudFront in Boto +------------------------------------ + +This new boto module provides an interface to Amazon's new Content Service, CloudFront. + +Caveats: + +This module is not well tested. Paging of distributions is not yet +supported. CNAME support is completely untested. Use with caution. +Feedback and bug reports are greatly appreciated. + +The following shows the main features of the cloudfront module from an interactive shell: + +Create an cloudfront connection: + +>>> from boto.cloudfront import CloudFrontConnection +>>> c = CloudFrontConnection() + +Create a new :class:`boto.cloudfront.distribution.Distribution`: + +>>> distro = c.create_distribution(origin='mybucket.s3.amazonaws.com', enabled=False, comment='My new Distribution') +>>> d.domain_name +u'd2oxf3980lnb8l.cloudfront.net' +>>> d.id +u'ECH69MOIW7613' +>>> d.status +u'InProgress' +>>> d.config.comment +u'My new distribution' +>>> d.config.origin +u'mybucket.s3.amazonaws.com' +>>> d.config.caller_reference +u'31b8d9cf-a623-4a28-b062-a91856fac6d0' +>>> d.config.enabled +False + +Note that a new caller reference is created automatically, using +uuid.uuid4(). The :class:`boto.cloudfront.distribution.Distribution`, :class:`boto.cloudfront.distribution.DistributionConfig` and +:class:`boto.cloudfront.distribution.DistributionSummary` objects are defined in the :mod:`boto.cloudfront.distribution` +module. + +To get a listing of all current distributions: + +>>> rs = c.get_all_distributions() +>>> rs +[, + ] + +This returns a list of :class:`boto.cloudfront.distribution.DistributionSummary` objects. Note that paging +is not yet supported! To get a :class:`boto.cloudfront.distribution.DistributionObject` from a +:class:`boto.cloudfront.distribution.DistributionSummary` object: + +>>> ds = rs[1] +>>> distro = ds.get_distribution() +>>> distro.domain_name +u'd2oxf3980lnb8l.cloudfront.net' + +To change a property of a distribution object: + +>>> distro.comment +u'My new distribution' +>>> distro.update(comment='This is a much better comment') +>>> distro.comment +'This is a much better comment' + +You can also enable/disable a distribution using the following +convenience methods: + +>>> distro.enable() # just calls distro.update(enabled=True) + +or + +>>> distro.disable() # just calls distro.update(enabled=False) + +The only attributes that can be updated for a Distribution are +comment, enabled and cnames. + +To delete a :class:`boto.cloudfront.distribution.Distribution`: + +>>> distro.delete() + + +boto.cloudfront +--------------- + +.. automodule:: boto.cloudfront + :members: + :undoc-members: + +boto.cloudfront.distribution +---------------------------- + +.. automodule:: boto.cloudfront.distribution + :members: + :undoc-members: + +boto.cloudfront.exception +------------------------- + +.. automodule:: boto.cloudfront.exception + :members: + :undoc-members: \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/contrib.rst b/vendor/boto/docs/source/ref/contrib.rst new file mode 100644 index 000000000000..9262a0dc8192 --- /dev/null +++ b/vendor/boto/docs/source/ref/contrib.rst @@ -0,0 +1,32 @@ +.. ref-contrib + +======= +contrib +======= + +boto.contrib +------------ + +.. automodule:: boto.contrib + :members: + :undoc-members: + +boto.contrib.m2helpers +---------------------- + +.. note:: + + This module requires installation of M2Crypto__ in your Python path. + + __ http://sandbox.rulemaker.net/ngps/m2/ + +.. automodule:: boto.contrib.m2helpers + :members: + :undoc-members: + +boto.contrib.ymlmessage +----------------------- + +.. automodule:: boto.contrib.ymlmessage + :members: + :undoc-members: \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/ec2.rst b/vendor/boto/docs/source/ref/ec2.rst new file mode 100644 index 000000000000..e6215d7ab387 --- /dev/null +++ b/vendor/boto/docs/source/ref/ec2.rst @@ -0,0 +1,223 @@ +.. ref-ec2 + +=== +EC2 +=== + +boto.ec2 +-------- + +.. automodule:: boto.ec2 + :members: + :undoc-members: + +boto.ec2.address +---------------- + +.. automodule:: boto.ec2.address + :members: + :undoc-members: + +boto.ec2.autoscale +------------------ + +.. automodule:: boto.ec2.autoscale + :members: + :undoc-members: + +boto.ec2.autoscale.activity +--------------------------- + +.. automodule:: boto.ec2.autoscale.activity + :members: + :undoc-members: + +boto.ec2.autoscale.group +------------------------ + +.. automodule:: boto.ec2.autoscale.group + :members: + :undoc-members: + + +boto.ec2.autoscale.instance +--------------------------- + +.. automodule:: boto.ec2.autoscale.instance + :members: + :undoc-members: + +boto.ec2.autoscale.launchconfig +------------------------------- + +.. automodule:: boto.ec2.autoscale.launchconfig + :members: + :undoc-members: + +boto.ec2.autoscale.request +-------------------------- + +.. automodule:: boto.ec2.autoscale.request + :members: + :undoc-members: + +boto.ec2.autoscale.trigger +-------------------------- + +.. automodule:: boto.ec2.autoscale.trigger + :members: + :undoc-members: + +boto.ec2.buyreservation +----------------------- + +.. automodule:: boto.ec2.buyreservation + :members: + :undoc-members: + +boto.ec2.cloudwatch +------------------- + +.. automodule:: boto.ec2.cloudwatch + :members: + :undoc-members: + +boto.ec2.cloudwatch.datapoint +----------------------------- + +.. automodule:: boto.ec2.cloudwatch.datapoint + :members: + :undoc-members: + +boto.ec2.cloudwatch.metric +-------------------------- + +.. automodule:: boto.ec2.cloudwatch.metric + :members: + :undoc-members: + +boto.ec2.connection +------------------- + +.. automodule:: boto.ec2.connection + :members: + :undoc-members: + +boto.ec2.ec2object +------------------ + +.. automodule:: boto.ec2.ec2object + :members: + :undoc-members: + +boto.ec2.elb +------------ + +.. automodule:: boto.ec2.elb + :members: + :undoc-members: + +boto.ec2.elb.healthcheck +------------------------ + +.. automodule:: boto.ec2.elb.healthcheck + :members: + :undoc-members: + +boto.ec2.elb.instancestate +-------------------------- + +.. automodule:: boto.ec2.elb.instancestate + :members: + :undoc-members: + +boto.ec2.elb.listelement +------------------------ + +.. automodule:: boto.ec2.elb.listelement + :members: + :undoc-members: + +boto.ec2.elb.listener +--------------------- + +.. automodule:: boto.ec2.elb.listener + :members: + :undoc-members: + +boto.ec2.elb.loadbalancer +------------------------- + +.. automodule:: boto.ec2.elb.loadbalancer + :members: + :undoc-members: + +boto.ec2.image +-------------- + +.. automodule:: boto.ec2.image + :members: + :undoc-members: + +boto.ec2.instance +----------------- + +.. automodule:: boto.ec2.instance + :members: + :undoc-members: + +boto.ec2.instanceinfo +--------------------- + +.. automodule:: boto.ec2.instanceinfo + :members: + :undoc-members: + +boto.ec2.keypair +---------------- + +.. automodule:: boto.ec2.keypair + :members: + :undoc-members: + +boto.ec2.regioninfo +------------------- + +.. automodule:: boto.ec2.regioninfo + :members: + :undoc-members: + +boto.ec2.reservedinstance +------------------------- + +.. automodule:: boto.ec2.reservedinstance + :members: + :undoc-members: + +boto.ec2.securitygroup +---------------------- + +.. automodule:: boto.ec2.securitygroup + :members: + :undoc-members: + +boto.ec2.snapshot +----------------- + +.. automodule:: boto.ec2.snapshot + :members: + :undoc-members: + +boto.ec2.volume +--------------- + +.. automodule:: boto.ec2.volume + :members: + :undoc-members: + +boto.ec2.zone +------------- + +.. automodule:: boto.ec2.zone + :members: + :undoc-members: \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/fps.rst b/vendor/boto/docs/source/ref/fps.rst new file mode 100644 index 000000000000..c160eee0594f --- /dev/null +++ b/vendor/boto/docs/source/ref/fps.rst @@ -0,0 +1,19 @@ +.. ref-fps + +=== +fps +=== + +boto.fps +-------- + +.. automodule:: boto.fps + :members: + :undoc-members: + +boto.fps.connection +------------------- + +.. automodule:: boto.fps.connection + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/index.rst b/vendor/boto/docs/source/ref/index.rst new file mode 100644 index 000000000000..ca1c93029c5f --- /dev/null +++ b/vendor/boto/docs/source/ref/index.rst @@ -0,0 +1,25 @@ +.. _ref-index: + +============= +API Reference +============= + +.. toctree:: + :maxdepth: 4 + + boto + cloudfront + contrib + ec2 + fps + manage + mapreduce + mashups + mturk + pyami + rds + s3 + sdb + services + sqs + vpc \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/manage.rst b/vendor/boto/docs/source/ref/manage.rst new file mode 100644 index 000000000000..a175d88b9927 --- /dev/null +++ b/vendor/boto/docs/source/ref/manage.rst @@ -0,0 +1,47 @@ +.. ref-manage + +====== +manage +====== + +boto.manage +----------- + +.. automodule:: boto.manage + :members: + :undoc-members: + +boto.manage.cmdshell +-------------------- + +.. automodule:: boto.manage.cmdshell + :members: + :undoc-members: + +boto.manage.propget +------------------- + +.. automodule:: boto.manage.propget + :members: + :undoc-members: + +boto.manage.server +------------------ + +.. automodule:: boto.manage.server + :members: + :undoc-members: + +boto.manage.task +---------------- + +.. automodule:: boto.manage.task + :members: + :undoc-members: + +boto.manage.volume +------------------ + +.. automodule:: boto.manage.volume + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/mapreduce.rst b/vendor/boto/docs/source/ref/mapreduce.rst new file mode 100644 index 000000000000..97aa56cc3b70 --- /dev/null +++ b/vendor/boto/docs/source/ref/mapreduce.rst @@ -0,0 +1,38 @@ +.. ref-mapreduce + +========= +mapreduce +========= + +.. note:: + + I am not sure why pdb_delete, pdb_revert, pdb_describe, and pdb_upload are not available for import. + + +boto.mapreduce +-------------- + +.. automodule:: boto.mapreduce + :members: + :undoc-members: + +boto.mapreduce.lqs +------------------ + +.. automodule:: boto.mapreduce.lqs + :members: + :undoc-members: + +boto.mapreduce.partitiondb +-------------------------- + +.. automodule:: boto.mapreduce.partitiondb + :members: + :undoc-members: + +boto.mapreduce.queuetools +------------------------- + +.. automodule:: boto.mapreduce.queuetools + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/mashups.rst b/vendor/boto/docs/source/ref/mashups.rst new file mode 100644 index 000000000000..5eca84675ad0 --- /dev/null +++ b/vendor/boto/docs/source/ref/mashups.rst @@ -0,0 +1,40 @@ +.. ref-mashups + +======= +mashups +======= + +boto.mashups +------------ + +.. automodule:: boto.mashups + :members: + :undoc-members: + +boto.mashups.interactive +------------------------ + +.. automodule:: boto.mashups.interactive + :members: + :undoc-members: + +boto.mashups.iobject +-------------------- + +.. automodule:: boto.mashups.iobject + :members: + :undoc-members: + +boto.mashups.order +------------------ + +.. automodule:: boto.mashups.order + :members: + :undoc-members: + +boto.mashups.server +------------------- + +.. automodule:: boto.mashups.server + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/mturk.rst b/vendor/boto/docs/source/ref/mturk.rst new file mode 100644 index 000000000000..1c8429b32ced --- /dev/null +++ b/vendor/boto/docs/source/ref/mturk.rst @@ -0,0 +1,47 @@ +.. ref-mturk + +===== +mturk +===== + +boto.mturk +------------ + +.. automodule:: boto.mturk + :members: + :undoc-members: + +boto.mturk.connection +--------------------- + +.. automodule:: boto.mturk.connection + :members: + :undoc-members: + +boto.mturk.notification +----------------------- + +.. automodule:: boto.mturk.notification + :members: + :undoc-members: + +boto.mturk.price +---------------- + +.. automodule:: boto.mturk.price + :members: + :undoc-members: + +boto.mturk.qualification +------------------------ + +.. automodule:: boto.mturk.qualification + :members: + :undoc-members: + +boto.mturk.question +------------------- + +.. automodule:: boto.mturk.question + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/pyami.rst b/vendor/boto/docs/source/ref/pyami.rst new file mode 100644 index 000000000000..e573b34dca7f --- /dev/null +++ b/vendor/boto/docs/source/ref/pyami.rst @@ -0,0 +1,103 @@ +.. ref-pyami + +===== +pyami +===== + +boto.pyami +-------------- + +.. automodule:: boto.pyami + :members: + :undoc-members: + +boto.pyami.bootstrap +-------------------- + +.. automodule:: boto.pyami.bootstrap + :members: + :undoc-members: + +boto.pyami.config +----------------- + +.. automodule:: boto.pyami.config + :members: + :undoc-members: + +boto.pyami.copybot +------------------ + +.. automodule:: boto.pyami.copybot + :members: + :undoc-members: + +boto.pyami.installers +--------------------- + +.. automodule:: boto.pyami.installers + :members: + :undoc-members: + +boto.pyami.installers.ubuntu +---------------------------- + +.. automodule:: boto.pyami.installers.ubuntu + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.apache +----------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.apache + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.ebs +-------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.ebs + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.installer +-------------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.installer + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.mysql +---------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.mysql + :members: + :undoc-members: + +boto.pyami.installers.ubuntu.trac +--------------------------------- + +.. automodule:: boto.pyami.installers.ubuntu.trac + :members: + :undoc-members: + +boto.pyami.launch_ami +--------------------- + +.. automodule:: boto.pyami.launch_ami + :members: + :undoc-members: + +boto.pyami.scriptbase +--------------------- + +.. automodule:: boto.pyami.scriptbase + :members: + :undoc-members: + +boto.pyami.startup +------------------ + +.. automodule:: boto.pyami.startup + :members: + :undoc-members: \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/rds.rst b/vendor/boto/docs/source/ref/rds.rst new file mode 100644 index 000000000000..7f02d33254eb --- /dev/null +++ b/vendor/boto/docs/source/ref/rds.rst @@ -0,0 +1,47 @@ +.. ref-rds + +=== +RDS +=== + +boto.rds +-------- + +.. automodule:: boto.rds + :members: + :undoc-members: + +boto.rds.dbinstance +------------------- + +.. automodule:: boto.rds.dbinstance + :members: + :undoc-members: + +boto.rds.dbsecuritygroup +------------------------ + +.. automodule:: boto.rds.dbsecuritygroup + :members: + :undoc-members: + +boto.rds.dbsnapshot +------------------- + +.. automodule:: boto.rds.dbsnapshot + :members: + :undoc-members: + +boto.rds.event +-------------- + +.. automodule:: boto.rds.event + :members: + :undoc-members: + +boto.rds.parametergroup +----------------------- + +.. automodule:: boto.rds.parametergroup + :members: + :undoc-members: \ No newline at end of file diff --git a/vendor/boto/docs/source/ref/s3.rst b/vendor/boto/docs/source/ref/s3.rst new file mode 100644 index 000000000000..e9b0b39f3d35 --- /dev/null +++ b/vendor/boto/docs/source/ref/s3.rst @@ -0,0 +1,54 @@ +.. ref-s3: + +=== +S3 +=== + +boto.s3.acl +----------- + +.. automodule:: boto.s3.acl + :members: + :undoc-members: + +boto.s3.bucket +-------------- + +.. automodule:: boto.s3.bucket + :members: + :undoc-members: + +boto.s3.bucketlistresultset +--------------------------- + +.. automodule:: boto.s3.bucketlistresultset + :members: + :undoc-members: + +boto.s3.connection +------------------ + +.. automodule:: boto.s3.connection + :members: + :undoc-members: + +boto.s3.key +----------- + +.. automodule:: boto.s3.key + :members: + :undoc-members: + +boto.s3.prefix +-------------- + +.. automodule:: boto.s3.prefix + :members: + :undoc-members: + +boto.s3.user +------------ + +.. automodule:: boto.s3.user + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/sdb.rst b/vendor/boto/docs/source/ref/sdb.rst new file mode 100644 index 000000000000..8b96d00fff44 --- /dev/null +++ b/vendor/boto/docs/source/ref/sdb.rst @@ -0,0 +1,144 @@ +.. ref-sdb + +=== +sdb +=== + +boto.sdb +-------- + +.. automodule:: boto.sdb + :members: + :undoc-members: + +boto.sdb.connection +------------------- + +.. automodule:: boto.sdb.connection + :members: + :undoc-members: + +boto.sdb.db +----------- + +.. automodule:: boto.sdb.db + :members: + :undoc-members: + +boto.sdb.db.blob +---------------- + +.. automodule:: boto.sdb.db.blob + :members: + :undoc-members: + +boto.sdb.db.key +--------------- + +.. automodule:: boto.sdb.db.key + :members: + :undoc-members: + +boto.sdb.db.manager +------------------- + +.. automodule:: boto.sdb.db.manager + :members: + :undoc-members: + +boto.sdb.db.manager.pgmanager +----------------------------- + +.. note:: + + This module requires psycopg2__ to be installed in the Python path. + + __ http://initd.org/ + +.. automodule:: boto.sdb.db.manager.pgmanager + :members: + :undoc-members: + +boto.sdb.db.manager.sdbmanager +------------------------------ + +.. automodule:: boto.sdb.db.manager.sdbmanager + :members: + :undoc-members: + +boto.sdb.db.manager.xmlmanager +------------------------------ + +.. automodule:: boto.sdb.db.manager.xmlmanager + :members: + :undoc-members: + +boto.sdb.db.model +----------------- + +.. automodule:: boto.sdb.db.model + :members: + :undoc-members: + +boto.sdb.db.property +-------------------- + +.. automodule:: boto.sdb.db.property + :members: + :undoc-members: + +boto.sdb.db.query +----------------- + +.. automodule:: boto.sdb.db.query + :members: + :undoc-members: + +boto.sdb.domain +--------------- + +.. automodule:: boto.sdb.domain + :members: + :undoc-members: + +boto.sdb.item +------------- + +.. automodule:: boto.sdb.item + :members: + :undoc-members: + +boto.sdb.persist +---------------- + +.. automodule:: boto.sdb.persist + :members: + :undoc-members: + +boto.sdb.persist.checker +------------------------ + +.. automodule:: boto.sdb.persist.checker + :members: + :undoc-members: + +boto.sdb.persist.object +----------------------- + +.. automodule:: boto.sdb.persist.object + :members: + :undoc-members: + +boto.sdb.persist.property +------------------------- + +.. automodule:: boto.sdb.persist.property + :members: + :undoc-members: + +boto.sdb.queryresultset +----------------------- + +.. automodule:: boto.sdb.queryresultset + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/services.rst b/vendor/boto/docs/source/ref/services.rst new file mode 100644 index 000000000000..aa73dcc27428 --- /dev/null +++ b/vendor/boto/docs/source/ref/services.rst @@ -0,0 +1,61 @@ +.. ref-services + +======== +services +======== + +boto.services +------------- + +.. automodule:: boto.services + :members: + :undoc-members: + +boto.services.bs +---------------- + +.. automodule:: boto.services.bs + :members: + :undoc-members: + +boto.services.message +--------------------- + +.. automodule:: boto.services.message + :members: + :undoc-members: + +boto.services.result +-------------------- + +.. automodule:: boto.services.result + :members: + :undoc-members: + +boto.services.service +--------------------- + +.. automodule:: boto.services.service + :members: + :undoc-members: + +boto.services.servicedef +------------------------ + +.. automodule:: boto.services.servicedef + :members: + :undoc-members: + +boto.services.sonofmmm +---------------------- + +.. automodule:: boto.services.sonofmmm + :members: + :undoc-members: + +boto.services.submit +-------------------- + +.. automodule:: boto.services.submit + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/sqs.rst b/vendor/boto/docs/source/ref/sqs.rst new file mode 100644 index 000000000000..1d7b84e9a1a3 --- /dev/null +++ b/vendor/boto/docs/source/ref/sqs.rst @@ -0,0 +1,54 @@ +.. _ref-sqs: + +==== +SQS +==== + +boto.sqs +-------- + +.. automodule:: boto.sqs + :members: + :undoc-members: + +boto.sqs.attributes +------------------- + +.. automodule:: boto.sqs.attributes + :members: + :undoc-members: + +boto.sqs.connection +------------------- + +.. automodule:: boto.sqs.connection + :members: + :undoc-members: + +boto.sqs.jsonmessage +-------------------- + +.. automodule:: boto.sqs.jsonmessage + :members: + :undoc-members: + +boto.sqs.message +---------------- + +.. automodule:: boto.sqs.message + :members: + :undoc-members: + +boto.sqs.queue +-------------- + +.. automodule:: boto.sqs.queue + :members: + :undoc-members: + +boto.sqs.regioninfo +------------------- + +.. automodule:: boto.sqs.regioninfo + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/ref/vpc.rst b/vendor/boto/docs/source/ref/vpc.rst new file mode 100644 index 000000000000..dfa4c9131346 --- /dev/null +++ b/vendor/boto/docs/source/ref/vpc.rst @@ -0,0 +1,54 @@ +.. _ref-vpc: + +==== +VPC +==== + +boto.vpc +-------- + +.. automodule:: boto.vpc + :members: + :undoc-members: + +boto.vpc.customergateway +------------------------ + +.. automodule:: boto.vpc.customergateway + :members: + :undoc-members: + +boto.vpc.dhcpoptions +-------------------- + +.. automodule:: boto.vpc.dhcpoptions + :members: + :undoc-members: + +boto.vpc.subnet +--------------- + +.. automodule:: boto.vpc.subnet + :members: + :undoc-members: + +boto.vpc.vpc +------------ + +.. automodule:: boto.vpc.vpc + :members: + :undoc-members: + +boto.vpc.vpnconnection +---------------------- + +.. automodule:: boto.vpc.vpnconnection + :members: + :undoc-members: + +boto.vpc.vpngateway +------------------- + +.. automodule:: boto.vpc.vpngateway + :members: + :undoc-members: diff --git a/vendor/boto/docs/source/s3_tut.rst b/vendor/boto/docs/source/s3_tut.rst new file mode 100644 index 000000000000..460706c5bbed --- /dev/null +++ b/vendor/boto/docs/source/s3_tut.rst @@ -0,0 +1,213 @@ +.. _s3_tut: + +====================================== +An Introduction to boto's S3 interface +====================================== + +This tutorial focuses on the boto interface to the Simple Storage Service +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- +The first step in accessing S3 is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.s3.connection import S3Connection +>>> conn = S3Connection('', '') + +At this point the variable conn will point to an S3Connection object. In +this example, the AWS access key and AWS secret key are passed in to the +method explicitely. Alternatively, you can set the environment variables: + +AWS_ACCESS_KEY_ID - Your AWS Access Key ID +AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = S3Connection() + +There is also a shortcut function in the boto package, called connect_s3 +that may provide a slightly easier means of creating a connection: + +>>> import boto +>>> conn = boto.connect_s3() + +In either case, conn will point to an S3Connection object which we will +use throughout the remainder of this tutorial. + +Creating a Bucket +----------------- + +Once you have a connection established with S3, you will probably want to +create a bucket. A bucket is a container used to store key/value pairs +in S3. A bucket can hold un unlimited about of data so you could potentially +have just one bucket in S3 for all of your information. Or, you could create +separate buckets for different types of data. You can figure all of that out +later, first let's just create a bucket. That can be accomplished like this: + +>>> bucket = conn.create_bucket('mybucket') +Traceback (most recent call last): + File "", line 1, in ? + File "boto/connection.py", line 285, in create_bucket + raise S3CreateError(response.status, response.reason) +boto.exception.S3CreateError: S3Error[409]: Conflict + +Whoa. What happended there? Well, the thing you have to know about +buckets is that they are kind of like domain names. It's one flat name +space that everyone who uses S3 shares. So, someone has already create +a bucket called "mybucket" in S3 and that means no one else can grab that +bucket name. So, you have to come up with a name that hasn't been taken yet. +For example, something that uses a unique string as a prefix. Your +AWS_ACCESS_KEY (NOT YOUR SECRET KEY!) could work but I'll leave it to +your imagination to come up with something. I'll just assume that you +found an acceptable name. + +The create_bucket method will create the requested bucket if it does not +exist or will return the existing bucket if it does exist. + +Storing Data +---------------- + +Once you have a bucket, presumably you will want to store some data +in it. S3 doesn't care what kind of information you store in your objects +or what format you use to store it. All you need is a key that is unique +within your bucket. + +The Key object is used in boto to keep track of data stored in S3. To store +new data in S3, start by creating a new Key object: + +>>> from boto.s3.key import Key +>>> k = Key(bucket) +>>> k.key = 'foobar' +>>> k.set_contents_from_string('This is a test of S3') + +The net effect of these statements is to create a new object in S3 with a +key of "foobar" and a value of "This is a test of S3". To validate that +this worked, quit out of the interpreter and start it up again. Then: + +>>> import boto +>>> c = boto.connect_s3() +>>> b = c.create_bucket('mybucket') # substitute your bucket name here +>>> from boto.s3.key import Key +>>> k = Key(b) +>>> k.key = 'foobar' +>>> k.get_contents_as_string() +'This is a test of S3' + +So, we can definitely store and retrieve strings. A more interesting +example may be to store the contents of a local file in S3 and then retrieve +the contents to another local file. + +>>> k = Key(b) +>>> k.key = 'myfile' +>>> k.set_contents_from_filename('foo.jpg') +>>> k.get_contents_to_filename('bar.jpg') + +There are a couple of things to note about this. When you send data to +S3 from a file or filename, boto will attempt to determine the correct +mime type for that file and send it as a Content-Type header. The boto +package uses the standard mimetypes package in Python to do the mime type +guessing. The other thing to note is that boto does stream the content +to and from S3 so you should be able to send and receive large files without +any problem. + +Listing All Available Buckets +----------------------------- +In addition to accessing specific buckets via the create_bucket method +you can also get a list of all available buckets that you have created. + +>>> rs = conn.get_all_buckets() + +This returns a ResultSet object (see the SQS Tutorial for more info on +ResultSet objects). The ResultSet can be used as a sequence or list type +object to retrieve Bucket objects. + +>>> len(rs) +11 +>>> for b in rs: +... print b.name +... ++>>> b = rs[0] + +Setting / Getting the Access Control List for Buckets and Keys +-------------------------------------------------------------- +The S3 service provides the ability to control access to buckets and keys +within s3 via the Access Control List (ACL) associated with each object in +S3. There are two ways to set the ACL for an object: + +1. Create a custom ACL that grants specific rights to specific users. At the + moment, the users that are specified within grants have to be registered + users of Amazon Web Services so this isn't as useful or as general as it + could be. + +2. Use a "canned" access control policy. There are four canned policies + defined: + a. private: Owner gets FULL_CONTROL. No one else has any access rights. + b. public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access. + c. public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access. + d. authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access. + +Currently, boto only supports the second method using canned access control +policies. A future version may allow setting of arbitrary ACL's if there +is sufficient demand. + +To set the ACL for a bucket, use the set_acl method of the Bucket object. +The argument passed to this method must be one of the four permissable +canned policies named in the list CannedACLStrings contained in acl.py. +For example, to make a bucket readable by anyone: + +>>> b.set_acl('public-read') + +You can also set the ACL for Key objects, either by passing an additional +argument to the above method: + +>>> b.set_acl('public-read', 'foobar') + +where 'foobar' is the key of some object within the bucket b or you can +call the set_acl method of the Key object: + +>>> k.set_acl('public-read') + +You can also retrieve the current ACL for a Bucket or Key object using the +get_acl object. This method parses the AccessControlPolicy response sent +by S3 and creates a set of Python objects that represent the ACL. + +>>> acp = b.get_acl() +>>> acp + +>>> acp.acl + +>>> acp.acl.grants +[] +>>> for grant in acp.acl.grants: +... print grant.permission, grant.grantee +... +FULL_CONTROL + +The Python objects representing the ACL can be found in the acl.py module +of boto. + +Setting/Getting Metadata Values on Key Objects +---------------------------------------------- +S3 allows arbitrary user metadata to be assigned to objects within a bucket. +To take advantage of this S3 feature, you should use the set_metadata and +get_metadata methods of the Key object to set and retrieve metadata associated +with an S3 object. For example: + +>>> k = Key(b) +>>> k.key = 'has_metadata' +>>> k.set_metadata('meta1', 'This is the first metadata value') +>>> k.set_metadata('meta2', 'This is the second metadata value') +>>> k.set_contents_from_filename('foo.txt') + +This code associates two metadata key/value pairs with the Key k. To retrieve +those values later: + +>>> k = b.get_key('has_metadata) +>>> k.get_metadata('meta1') +'This is the first metadata value' +>>> k.get_metadata('meta2') +'This is the second metadata value' +>>> diff --git a/vendor/boto/docs/source/sqs_tut.rst b/vendor/boto/docs/source/sqs_tut.rst new file mode 100644 index 000000000000..d05cc53c2cff --- /dev/null +++ b/vendor/boto/docs/source/sqs_tut.rst @@ -0,0 +1,230 @@ +.. _sqs_tut: + +======================================= +An Introduction to boto's SQS interface +======================================= + +This tutorial focuses on the boto interface to the Simple Queue Service +from Amazon Web Services. This tutorial assumes that you have already +downloaded and installed boto. + +Creating a Connection +--------------------- +The first step in accessing SQS is to create a connection to the service. +There are two ways to do this in boto. The first is: + +>>> from boto.sqs.connection import SQSConnection +>>> conn = SQSConnection('', '') + +At this point the variable conn will point to an SQSConnection object. In +this example, the AWS access key and AWS secret key are passed in to the +method explicitely. Alternatively, you can set the environment variables: + +AWS_ACCESS_KEY_ID - Your AWS Access Key ID +AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key + +and then call the constructor without any arguments, like this: + +>>> conn = SQSConnection() + +There is also a shortcut function in the boto package, called connect_sqs +that may provide a slightly easier means of creating a connection: + +>>> import boto +>>> conn = boto.connect_sqs() + +In either case, conn will point to an SQSConnection object which we will +use throughout the remainder of this tutorial. + +Creating a Queue +---------------- + +Once you have a connection established with SQS, you will probably want to +create a queue. That can be accomplished like this: + +>>> q = conn.create_queue('myqueue') + +The create_queue method will create the requested queue if it does not +exist or will return the existing queue if it does exist. There is an +optional parameter to create_queue called visibility_timeout. This basically +controls how long a message will remain invisible to other queue readers +once it has been read (see SQS documentation for more detailed explanation). +If this is not explicitly specified the queue will be created with whatever +default value SQS provides (currently 30 seconds). If you would like to +specify another value, you could do so like this: + +>>> q = conn.create_queue('myqueue', 120) + +This would establish a default visibility timeout for this queue of 120 +seconds. As you will see later on, this default value for the queue can +also be overridden each time a message is read from the queue. If you want +to check what the default visibility timeout is for a queue: + +>>> q.get_timeout() +30 +>>> + +Writing Messages +---------------- + +Once you have a queue, presumably you will want to write some messages +to it. SQS doesn't care what kind of information you store in your messages +or what format you use to store it. As long as the amount of data per +message is less than or equal to 256Kb, it's happy. + +However, you may have a lot of specific requirements around the format of +that data. For example, you may want to store one big string or you might +want to store something that looks more like RFC822 messages or you might want +to store a binary payload such as pickled Python objects. + +The way boto deals with this is to define a simple Message object that +treats the message data as one big string which you can set and get. If that +Message object meets your needs, you're good to go. However, if you need to +incorporate different behavior in your message or handle different types of +data you can create your own Message class. You just need to register that +class with the queue so that it knows that when you read a message from the +queue that it should create one of your message objects rather than the +default boto Message object. To register your message class, you would: + +>>> q.set_message_class(MyMessage) + +where MyMessage is the class definition for your message class. Your +message class should subclass the boto Message because there is a small +bit of Python magic happening in the __setattr__ method of the boto Message +class. + +For this tutorial, let's just assume that we are using the boto Message +class. So, first we need to create a Message object: + +>>> from boto.sqs.message import Message +>>> m = Message() +>>> m.set_body('This is my first message.') +>>> status = q.write(m) + +The write method returns a True if everything went well. If the write +didn't succeed it will either return a False (meaning SQS simply chose +not to write the message for some reason) or an exception if there was +some sort of problem with the request. + +Reading Messages +---------------- + +So, now we have a message in our queue. How would we go about reading it? +Here's one way: + +>>> rs = q.get_messages() +>>> len(rs) +1 +>>> m = rs[0] +>>> m.get_body() +u'This is my first message' + +The get_messages method also returns a ResultSet object as described +above. In addition to the special attributes that we already talked +about the ResultSet object also contains any results returned by the +request. To get at the results you can treat the ResultSet as a +sequence object (e.g. a list). We can check the length (how many results) +and access particular items within the list using the slice notation +familiar to Python programmers. + +At this point, we have read the message from the queue and SQS will make +sure that this message remains invisible to other readers of the queue +until the visibility timeout period for the queue expires. If I delete +the message before the timeout period expires then no one will ever see +the message again. However, if I don't delete it (maybe because I crashed +or failed in some way, for example) it will magically reappear in my queue +for someone else to read. If you aren't happy with the default visibility +timeout defined for the queue, you can override it when you read a message: + +>>> q.get_messages(visibility_timeout=60) + +This means that regardless of what the default visibility timeout is for +the queue, this message will remain invisible to other readers for 60 +seconds. + +The get_messages method can also return more than a single message. By +passing a num_messages parameter (defaults to 1) you can control the maximum +number of messages that will be returned by the method. To show this +feature off, first let's load up a few more messages. + +>>> for i in range(1, 11): +... m = Message('This is message %d' % i) +... q.write(m) +... +>>> rs = q.get_messages(10) +>>> len(rs) +10 + +Don't be alarmed if the length of the result set returned by the get_messages +call is less than 10. Sometimes it takes some time for new messages to become +visible in the queue. Give it a minute or two and they will all show up. + +If you want a slightly simpler way to read messages from a queue, you +can use the read method. It will either return the message read or +it will return None if no messages were available. You can also pass +a visibility_timeout parameter to read, if you desire: + +>>> m = q.read(60) +>>> m.get_body() +u'This is my first message' + +Deleting Messages and Queues +---------------------------- + +Note that the first message we put in the queue is still there, even though +we have read it a number of times. That's because we never deleted it. To +remove a message from a queue: + +>>> q.delete_message(m) +[] + +If I want to delete the entire queue, I would use: + +>>> conn.delete_queue(q) + +However, this won't succeed unless the queue is empty. + +Listing All Available Queues +---------------------------- +In addition to accessing specific queues via the create_queue method +you can also get a list of all available queues that you have created. + +>>> rs = conn.get_all_queues() + +This returns a ResultSet object, as described above. The ResultSet +can be used as a sequence or list type object to retrieve Queue objects. + +>>> len(rs) +11 +>>> for q in rs: +... print q.id +... ++>>> q = rs[0] + +Other Stuff +----------- + +That covers the basic operations of creating queues, writing messages, +reading messages, deleting messages, and deleting queues. There are a +few utility methods in boto that might be useful as well. For example, +to count the number of messages in a queue: + +>>> q.count() +10 + +This can be handy but is command as well as the other two utility methods +I'll describe in a minute are inefficient and should be used with caution +on queues with lots of messages (e.g. many hundreds or more). Similarly, +you can clear (delete) all messages in a queue with: + +>>> q.clear() + +Be REAL careful with that one! Finally, if you want to dump all of the +messages in a queue to a local file: + +>>> q.dump('messages.txt', sep='\n------------------\n') + +This will read all of the messages in the queue and write the bodies of +each of the messages to the file messages.txt. The option sep argument +is a separator that will be printed between each message body in the file. diff --git a/vendor/boto/docs/source/vpc_tut.rst b/vendor/boto/docs/source/vpc_tut.rst new file mode 100644 index 000000000000..0040866f89d0 --- /dev/null +++ b/vendor/boto/docs/source/vpc_tut.rst @@ -0,0 +1,88 @@ +.. _vpc_tut: + +======================================= +An Introduction to boto's VPC interface +======================================= + +This tutorial is based on the examples in the Amazon Virtual Private +Cloud Getting Started Guide (http://docs.amazonwebservices.com/AmazonVPC/latest/GettingStartedGuide/). +In each example, it tries to show the boto request that correspond to +the AWS command line tools. + +Creating a VPC connection +------------------------- +First, we need to create a new VPC connection: + +>>> from boto.vpc import VPCConnection +>>> c = VPCConnection() + +To create a VPC +--------------- +Now that we have a VPC connection, we can create our first VPC. + +>>> vpc = c.create_vpc('10.0.0.0/24') +>>> vpc +VPC:vpc-6b1fe402 +>>> vpc.id +u'vpc-6b1fe402' +>>> vpc.state +u'pending' +>>> vpc.cidr_block +u'10.0.0.0/24' +>>> vpc.dhcp_options_id +u'default' +>>> + +To create a subnet +------------------ +The next step is to create a subnet to associate with your VPC. + +>>> subnet = c.create_subnet(vpc.id, '10.0.0.0/25') +>>> subnet.id +u'subnet-6a1fe403' +>>> subnet.state +u'pending' +>>> subnet.cidr_block +u'10.0.0.0/25' +>>> subnet.available_ip_address_count +123 +>>> subnet.availability_zone +u'us-east-1b' +>>> + +To create a customer gateway +---------------------------- +Next, we create a customer gateway. + +>>> cg = c.create_customer_gateway('ipsec.1', '12.1.2.3', 65534) +>>> cg.id +u'cgw-b6a247df' +>>> cg.type +u'ipsec.1' +>>> cg.state +u'available' +>>> cg.ip_address +u'12.1.2.3' +>>> cg.bgp_asn +u'65534' +>>> + +To create a VPN gateway +----------------------- + +>>> vg = c.create_vpn_gateway('ipsec.1') +>>> vg.id +u'vgw-44ad482d' +>>> vg.type +u'ipsec.1' +>>> vg.state +u'pending' +>>> vg.availability_zone +u'us-east-1b' +>>> + +Attaching a VPN Gateway to a VPC +-------------------------------- + +>>> vg.attach(vpc.id) +>>> diff --git a/vendor/boto/pylintrc b/vendor/boto/pylintrc new file mode 100644 index 000000000000..44ed07796f5f --- /dev/null +++ b/vendor/boto/pylintrc @@ -0,0 +1,305 @@ +# lint Python modules using external checkers. +# +# This is the main checker controlling the other ones and the reports +# generation. It is itself both a raw checker and an astng checker in order +# to: +# * handle message activation / deactivation at the module level +# * handle some basic but necessary stats'data (number of classes, methods...) +# +[MASTER] + + +# Specify a configuration file. +#rcfile= + +# Profiled execution. +profile=no + +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=.svn + +# Pickle collected data for later comparisons. +persistent=yes + +# Set the cache size for astng objects. +cache-size=500 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable only checker(s) with the given id(s). This option conflict with the +# disable-checker option +#enable-checker= + +# Enable all checker(s) except those with the given id(s). This option conflict +# with the disable-checker option +#disable-checker= + +# Enable all messages in the listed categories. +#enable-msg-cat= + +# Disable all messages in the listed categories. +#disable-msg-cat= + +# Enable the message(s) with the given id(s). +#enable-msg= + +# Disable the message(s) with the given id(s). +# disable-msg=C0323,W0142,C0301,C0103,C0111,E0213,C0302,C0203,W0703,R0201 +disable-msg=C0111,C0103,W0703,W0702 + +[REPORTS] + +# set the output format. Available formats are text, parseable, colorized and +# html +output-format=colorized + +# Include message's id in output +include-ids=yes + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells wether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note).You have access to the variables errors warning, statement which +# respectivly contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (R0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (R0004). +comment=no + +# Enable the report(s) with the given id(s). +#enable-report= + +# Disable the report(s) with the given id(s). +#disable-report= + +# checks for +# * unused variables / imports +# * undefined variables +# * redefinition of variable from builtins or from an outer scope +# * use of variable before assigment +# +[VARIABLES] + +# Tells wether we should check for unused import in __init__ files. +init-import=yes + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +# try to find bugs in the code using type inference +# +[TYPECHECK] + +# Tells wether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# When zope mode is activated, consider the acquired-members option to ignore +# access to some undefined attributes. +zope=no + +# List of members which are usually get through zope's acquisition mecanism and +# so shouldn't trigger E0201 when accessed (need zope=yes to be considered). +acquired-members=REQUEST,acl_users,aq_parent + + +# checks for : +# * doc strings +# * modules / classes / functions / methods / arguments / variables name +# * number of arguments, local variables, branches, returns and statements in +# functions, methods +# * required module attributes +# * dangerous default values as arguments +# * redefinition of function / method / class +# * uses of the global statement +# +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# List of builtins function names that should not be used, separated by a comma +bad-functions=apply,input + + +# checks for sign of poor/misdesign: +# * number of methods, attributes, local variables... +# * size, complexity of functions, methods +# +[DESIGN] + +# Maximum number of arguments for function / method +max-args=12 + +# Maximum number of locals for function / method body +max-locals=30 + +# Maximum number of return / yield for function / method body +max-returns=12 + +# Maximum number of branch for function / method body +max-branchs=30 + +# Maximum number of statements in function / method body +max-statements=60 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=20 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=0 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +# checks for +# * external modules dependencies +# * relative / wildcard imports +# * cyclic imports +# * uses of deprecated modules +# +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report R0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report R0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report R0402 must +# not be disabled) +int-import-graph= + + +# checks for : +# * methods without self as first argument +# * overridden methods signature +# * access only to existant members via self +# * attributes not defined in the __init__ method +# * supported interfaces implementation +# * unreachable code +# +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +# ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + + +# checks for similarities and duplicated code. This computation may be +# memory / CPU intensive, so you should disable it if you experiments some +# problems. +# +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=5 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +# checks for: +# * warning notes in the code like FIXME, XXX +# * PEP 263: source code with non ascii character but no encoding declaration +# +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO,BUG: + + +# checks for : +# * unauthorized constructions +# * strict indentation +# * line length +# * use of <> instead of != +# +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module +max-module-lines=1000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + + +[MESSAGES CONTROL] +disable-msg=C0301,C0111,C0103,R0201,W0702,C0324 diff --git a/vendor/boto/setup.py b/vendor/boto/setup.py new file mode 100644 index 000000000000..bbfbf4a1ab47 --- /dev/null +++ b/vendor/boto/setup.py @@ -0,0 +1,56 @@ +#!/usr/bin/python + +# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +from boto import Version + +setup(name = "boto", + version = Version, + description = "Amazon Web Services Library", + long_description="Python interface to Amazon's Web Services.", + author = "Mitch Garnaat", + author_email = "mitch@garnaat.com", + scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin", + "bin/s3put", "bin/fetch_file", "bin/launch_instance", + "bin/list_instances", "bin/taskadmin", "bin/kill_instance", + "bin/bundle_image", "bin/pyami_sendmail"], + url = "http://code.google.com/p/boto/", + packages = [ 'boto', 'boto.sqs', 'boto.s3', + 'boto.ec2', 'boto.ec2.cloudwatch', 'boto.ec2.autoscale', 'boto.ec2.elb', + 'boto.sdb', 'boto.sdb.persist', 'boto.sdb.db', 'boto.sdb.db.manager', + 'boto.mturk', 'boto.pyami', 'boto.mashups', 'boto.contrib', 'boto.manage', + 'boto.services', 'boto.tests', 'boto.cloudfront', 'boto.rds', 'boto.vpc', + 'boto.fps', 'boto.emr'], + license = 'MIT', + platforms = 'Posix; MacOS X; Windows', + classifiers = [ 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Topic :: Internet', + ], + ) diff --git a/vendor/carrot/__init__.py b/vendor/carrot/__init__.py new file mode 100644 index 000000000000..d40099b670c0 --- /dev/null +++ b/vendor/carrot/__init__.py @@ -0,0 +1,7 @@ +"""AMQP Messaging Framework for Python""" +VERSION = (0, 10, 2) +__version__ = ".".join(map(str, VERSION)) +__author__ = "Ask Solem" +__contact__ = "askh@opera.com" +__homepage__ = "http://github.com/ask/carrot/" +__docformat__ = "restructuredtext" diff --git a/vendor/carrot/backends/__init__.py b/vendor/carrot/backends/__init__.py new file mode 100644 index 000000000000..1f1ebfb6cb66 --- /dev/null +++ b/vendor/carrot/backends/__init__.py @@ -0,0 +1,54 @@ +""" + +Working with Backends. + +""" +import sys + +from carrot.utils import rpartition + +DEFAULT_BACKEND = "carrot.backends.pyamqplib.Backend" + +BACKEND_ALIASES = { + "amqp": "carrot.backends.pyamqplib.Backend", + "amqplib": "carrot.backends.pyamqplib.Backend", + "stomp": "carrot.backends.pystomp.Backend", + "stompy": "carrot.backends.pystomp.Backend", + "memory": "carrot.backends.queue.Backend", + "mem": "carrot.backends.queue.Backend", + "pika": "carrot.backends.pikachu.AsyncoreBackend", + "pikachu": "carrot.backends.pikachu.AsyncoreBackend", + "syncpika": "carrot.backends.pikachu.SyncBackend", +} + +_backend_cache = {} + + +def resolve_backend(backend=None): + backend = BACKEND_ALIASES.get(backend, backend) + backend_module_name, _, backend_cls_name = rpartition(backend, ".") + return backend_module_name, backend_cls_name + + +def _get_backend_cls(backend=None): + backend_module_name, backend_cls_name = resolve_backend(backend) + __import__(backend_module_name) + backend_module = sys.modules[backend_module_name] + return getattr(backend_module, backend_cls_name) + + +def get_backend_cls(backend=None): + """Get backend class by name. + + The backend string is the full path to a backend class, e.g.:: + + "carrot.backends.pyamqplib.Backend" + + If the name does not include "``.``" (is not fully qualified), + the alias table will be consulted. + + """ + backend = backend or DEFAULT_BACKEND + if backend not in _backend_cache: + _backend_cache[backend] = _get_backend_cls(backend) + return _backend_cache[backend] diff --git a/vendor/carrot/backends/base.py b/vendor/carrot/backends/base.py new file mode 100644 index 000000000000..7f3cc92fd1e3 --- /dev/null +++ b/vendor/carrot/backends/base.py @@ -0,0 +1,185 @@ +""" + +Backend base classes. + +""" +from carrot import serialization + +ACKNOWLEDGED_STATES = frozenset(["ACK", "REJECTED", "REQUEUED"]) + + +class MessageStateError(Exception): + """The message has already been acknowledged.""" + + +class BaseMessage(object): + """Base class for received messages.""" + _state = None + + MessageStateError = MessageStateError + + def __init__(self, backend, **kwargs): + self.backend = backend + self.body = kwargs.get("body") + self.delivery_tag = kwargs.get("delivery_tag") + self.content_type = kwargs.get("content_type") + self.content_encoding = kwargs.get("content_encoding") + self.delivery_info = kwargs.get("delivery_info", {}) + self._decoded_cache = None + self._state = "RECEIVED" + + def decode(self): + """Deserialize the message body, returning the original + python structure sent by the publisher.""" + return serialization.decode(self.body, self.content_type, + self.content_encoding) + + @property + def payload(self): + """The decoded message.""" + if not self._decoded_cache: + self._decoded_cache = self.decode() + return self._decoded_cache + + def ack(self): + """Acknowledge this message as being processed., + This will remove the message from the queue. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + "Message already acknowledged with state: %s" % self._state) + self.backend.ack(self.delivery_tag) + self._state = "ACK" + + def reject(self): + """Reject this message. + + The message will be discarded by the server. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + "Message already acknowledged with state: %s" % self._state) + self.backend.reject(self.delivery_tag) + self._state = "REJECTED" + + def requeue(self): + """Reject this message and put it back on the queue. + + You must not use this method as a means of selecting messages + to process. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + "Message already acknowledged with state: %s" % self._state) + self.backend.requeue(self.delivery_tag) + self._state = "REQUEUED" + + @property + def acknowledged(self): + return self._state in ACKNOWLEDGED_STATES + + +class BaseBackend(object): + """Base class for backends.""" + default_port = None + extra_options = None + + def __init__(self, connection, **kwargs): + self.connection = connection + self.extra_options = kwargs.get("extra_options") + + def queue_declare(self, *args, **kwargs): + """Declare a queue by name.""" + pass + + def queue_delete(self, *args, **kwargs): + """Delete a queue by name.""" + pass + + def exchange_declare(self, *args, **kwargs): + """Declare an exchange by name.""" + pass + + def queue_bind(self, *args, **kwargs): + """Bind a queue to an exchange.""" + pass + + def get(self, *args, **kwargs): + """Pop a message off the queue.""" + pass + + def declare_consumer(self, *args, **kwargs): + pass + + def consume(self, *args, **kwargs): + """Iterate over the declared consumers.""" + pass + + def cancel(self, *args, **kwargs): + """Cancel the consumer.""" + pass + + def ack(self, delivery_tag): + """Acknowledge the message.""" + pass + + def queue_purge(self, queue, **kwargs): + """Discard all messages in the queue. This will delete the messages + and results in an empty queue.""" + return 0 + + def reject(self, delivery_tag): + """Reject the message.""" + pass + + def requeue(self, delivery_tag): + """Requeue the message.""" + pass + + def purge(self, queue, **kwargs): + """Discard all messages in the queue.""" + pass + + def message_to_python(self, raw_message): + """Convert received message body to a python datastructure.""" + return raw_message + + def prepare_message(self, message_data, delivery_mode, **kwargs): + """Prepare message for sending.""" + return message_data + + def publish(self, message, exchange, routing_key, **kwargs): + """Publish a message.""" + pass + + def close(self): + """Close the backend.""" + pass + + def establish_connection(self): + """Establish a connection to the backend.""" + pass + + def close_connection(self, connection): + """Close the connection.""" + pass + + def flow(self, active): + """Enable/disable flow from peer.""" + pass + + def qos(self, prefetch_size, prefetch_count, apply_global=False): + """Request specific Quality of Service.""" + pass diff --git a/vendor/carrot/backends/pikachu.py b/vendor/carrot/backends/pikachu.py new file mode 100644 index 000000000000..d47e1c01a6aa --- /dev/null +++ b/vendor/carrot/backends/pikachu.py @@ -0,0 +1,209 @@ +import asyncore +import weakref +import functools +import itertools + +import pika + +from carrot.backends.base import BaseMessage, BaseBackend + +DEFAULT_PORT = 5672 + + +class Message(BaseMessage): + + def __init__(self, backend, amqp_message, **kwargs): + channel, method, header, body = amqp_message + self._channel = channel + self._method = method + self._header = header + self.backend = backend + + kwargs.update({"body": body, + "delivery_tag": method.delivery_tag, + "content_type": header.content_type, + "content_encoding": header.content_encoding, + "delivery_info": dict( + consumer_tag=method.consumer_tag, + routing_key=method.routing_key, + delivery_tag=method.delivery_tag, + exchange=method.exchange)}) + + super(Message, self).__init__(backend, **kwargs) + + +class SyncBackend(BaseBackend): + default_port = DEFAULT_PORT + _connection_cls = pika.BlockingConnection + + Message = Message + + def __init__(self, connection, **kwargs): + self.connection = connection + self.default_port = kwargs.get("default_port", self.default_port) + self._channel_ref = None + + @property + def _channel(self): + return callable(self._channel_ref) and self._channel_ref() + + @property + def channel(self): + """If no channel exists, a new one is requested.""" + if not self._channel: + self._channel_ref = weakref.ref(self.connection.get_channel()) + return self._channel + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.connection + if not conninfo.port: + conninfo.port = self.default_port + credentials = pika.PlainCredentials(conninfo.userid, + conninfo.password) + return self._connection_cls(pika.ConnectionParameters( + conninfo.hostname, + port=conninfo.port, + virtual_host=conninfo.virtual_host, + credentials=credentials)) + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.close() + + def queue_exists(self, queue): + return False # FIXME + + def queue_delete(self, queue, if_unused=False, if_empty=False): + """Delete queue by name.""" + return self.channel.queue_delete(queue=queue, if_unused=if_unused, + if_empty=if_empty) + + def queue_purge(self, queue, **kwargs): + """Discard all messages in the queue. This will delete the messages + and results in an empty queue.""" + return self.channel.queue_purge(queue=queue) + + def queue_declare(self, queue, durable, exclusive, auto_delete, + warn_if_exists=False): + """Declare a named queue.""" + + return self.channel.queue_declare(queue=queue, + durable=durable, + exclusive=exclusive, + auto_delete=auto_delete) + + def exchange_declare(self, exchange, type, durable, auto_delete): + """Declare an named exchange.""" + return self.channel.exchange_declare(exchange=exchange, + type=type, + durable=durable, + auto_delete=auto_delete) + + def queue_bind(self, queue, exchange, routing_key, arguments=None): + """Bind queue to an exchange using a routing key.""" + return self.channel.queue_bind(queue=queue, + exchange=exchange, + routing_key=routing_key, + arguments=arguments) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(backend=self, amqp_message=raw_message) + + def get(self, queue, no_ack=False): + """Receive a message from a declared queue by name. + + :returns: A :class:`Message` object if a message was received, + ``None`` otherwise. If ``None`` was returned, it probably means + there was no messages waiting on the queue. + + """ + raw_message = self.channel.basic_get(queue, no_ack=no_ack) + if not raw_message: + return None + return self.message_to_python(raw_message) + + def declare_consumer(self, queue, no_ack, callback, consumer_tag, + nowait=False): + """Declare a consumer.""" + + @functools.wraps(callback) + def _callback_decode(channel, method, header, body): + return callback((channel, method, header, body)) + + return self.channel.basic_consume(_callback_decode, + queue=queue, + no_ack=no_ack, + consumer_tag=consumer_tag) + + def consume(self, limit=None): + """Returns an iterator that waits for one message at a time.""" + for total_message_count in itertools.count(): + if limit and total_message_count >= limit: + raise StopIteration + self.connection.connection.drain_events() + yield True + + def cancel(self, consumer_tag): + """Cancel a channel by consumer tag.""" + if not self._channel: + return + self.channel.basic_cancel(consumer_tag) + + def close(self): + """Close the channel if open.""" + if self._channel and not self._channel.handler.channel_close: + self._channel.close() + self._channel_ref = None + + def ack(self, delivery_tag): + """Acknowledge a message by delivery tag.""" + return self.channel.basic_ack(delivery_tag) + + def reject(self, delivery_tag): + """Reject a message by deliver tag.""" + return self.channel.basic_reject(delivery_tag, requeue=False) + + def requeue(self, delivery_tag): + """Reject and requeue a message by delivery tag.""" + return self.channel.basic_reject(delivery_tag, requeue=True) + + def prepare_message(self, message_data, delivery_mode, priority=None, + content_type=None, content_encoding=None): + """Encapsulate data into a AMQP message.""" + properties = pika.BasicProperties(priority=priority, + content_type=content_type, + content_encoding=content_encoding, + delivery_mode=delivery_mode) + return message_data, properties + + def publish(self, message, exchange, routing_key, mandatory=None, + immediate=None, headers=None): + """Publish a message to a named exchange.""" + body, properties = message + + if headers: + properties.headers = headers + + ret = self.channel.basic_publish(body=body, + properties=properties, + exchange=exchange, + routing_key=routing_key, + mandatory=mandatory, + immediate=immediate) + if mandatory or immediate: + self.close() + + def qos(self, prefetch_size, prefetch_count, apply_global=False): + """Request specific Quality of Service.""" + self.channel.basic_qos(prefetch_size, prefetch_count, + apply_global) + + def flow(self, active): + """Enable/disable flow from peer.""" + self.channel.flow(active) + + +class AsyncoreBackend(SyncBackend): + _connection_cls = pika.AsyncoreConnection diff --git a/vendor/carrot/backends/pyamqplib.py b/vendor/carrot/backends/pyamqplib.py new file mode 100644 index 000000000000..0e97f08e34ca --- /dev/null +++ b/vendor/carrot/backends/pyamqplib.py @@ -0,0 +1,328 @@ +""" + +`amqplib`_ backend for carrot. + +.. _`amqplib`: http://barryp.org/software/py-amqplib/ + +""" +from amqplib import client_0_8 as amqp +from amqplib.client_0_8.exceptions import AMQPChannelException +from amqplib.client_0_8.serialization import AMQPReader, AMQPWriter +from carrot.backends.base import BaseMessage, BaseBackend +from itertools import count +import warnings +import weakref + +DEFAULT_PORT = 5672 + + +class Connection(amqp.Connection): + + def drain_events(self, allowed_methods=None): + """Wait for an event on any channel.""" + return self.wait_multi(self.channels.values()) + + def wait_multi(self, channels, allowed_methods=None): + """Wait for an event on a channel.""" + chanmap = dict((chan.channel_id, chan) for chan in channels) + chanid, method_sig, args, content = self._wait_multiple( + chanmap.keys(), allowed_methods) + + channel = chanmap[chanid] + + if content \ + and channel.auto_decode \ + and hasattr(content, 'content_encoding'): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + amqp_method = channel._METHOD_MAP.get(method_sig, None) + + if amqp_method is None: + raise Exception('Unknown AMQP method (%d, %d)' % method_sig) + + if content is None: + return amqp_method(channel, args) + else: + return amqp_method(channel, args, content) + + def _wait_multiple(self, channel_ids, allowed_methods): + for channel_id in channel_ids: + method_queue = self.channels[channel_id].method_queue + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None) \ + or (method_sig in allowed_methods) \ + or (method_sig == (20, 40)): + method_queue.remove(queued_method) + method_sig, args, content = queued_method + return channel_id, method_sig, args, content + + # Nothing queued, need to wait for a method from the peer + while True: + channel, method_sig, args, content = \ + self.method_reader.read_method() + + if (channel in channel_ids) \ + and ((allowed_methods is None) \ + or (method_sig in allowed_methods) \ + or (method_sig == (20, 40))): + return channel, method_sig, args, content + + # Not the channel and/or method we were looking for. Queue + # this method for later + self.channels[channel].method_queue.append((method_sig, + args, + content)) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if channel == 0: + self.wait() + + +class QueueAlreadyExistsWarning(UserWarning): + """A queue with that name already exists, so a recently changed + ``routing_key`` or other settings might be ignored unless you + rename the queue or restart the broker.""" + + +class Message(BaseMessage): + """A message received by the broker. + + Usually you don't insantiate message objects yourself, but receive + them using a :class:`carrot.messaging.Consumer`. + + :param backend: see :attr:`backend`. + :param amqp_message: see :attr:`_amqp_message`. + + + .. attribute:: body + + The message body. + + .. attribute:: delivery_tag + + The message delivery tag, uniquely identifying this message. + + .. attribute:: backend + + The message backend used. + A subclass of :class:`carrot.backends.base.BaseBackend`. + + .. attribute:: _amqp_message + + A :class:`amqplib.client_0_8.basic_message.Message` instance. + This is a private attribute and should not be accessed by + production code. + + """ + + def __init__(self, backend, amqp_message, **kwargs): + self._amqp_message = amqp_message + self.backend = backend + + for attr_name in ("body", + "delivery_tag", + "content_type", + "content_encoding", + "delivery_info"): + kwargs[attr_name] = getattr(amqp_message, attr_name, None) + + super(Message, self).__init__(backend, **kwargs) + + +class Backend(BaseBackend): + """amqplib backend + + :param connection: see :attr:`connection`. + + + .. attribute:: connection + + A :class:`carrot.connection.BrokerConnection` instance. An established + connection to the broker. + + """ + default_port = DEFAULT_PORT + + Message = Message + + def __init__(self, connection, **kwargs): + self.connection = connection + self.default_port = kwargs.get("default_port", self.default_port) + self._channel_ref = None + + @property + def _channel(self): + return callable(self._channel_ref) and self._channel_ref() + + @property + def channel(self): + """If no channel exists, a new one is requested.""" + if not self._channel: + self._channel_ref = weakref.ref(self.connection.get_channel()) + return self._channel + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.connection + if not conninfo.port: + conninfo.port = self.default_port + return Connection(host=conninfo.host, + userid=conninfo.userid, + password=conninfo.password, + virtual_host=conninfo.virtual_host, + insist=conninfo.insist, + ssl=conninfo.ssl, + connect_timeout=conninfo.connect_timeout) + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.close() + + def queue_exists(self, queue): + """Check if a queue has been declared. + + :rtype bool: + + """ + try: + self.channel.queue_declare(queue=queue, passive=True) + except AMQPChannelException, e: + if e.amqp_reply_code == 404: + return False + raise e + else: + return True + + def queue_delete(self, queue, if_unused=False, if_empty=False): + """Delete queue by name.""" + return self.channel.queue_delete(queue, if_unused, if_empty) + + def queue_purge(self, queue, **kwargs): + """Discard all messages in the queue. This will delete the messages + and results in an empty queue.""" + return self.channel.queue_purge(queue=queue) + + def queue_declare(self, queue, durable, exclusive, auto_delete, + warn_if_exists=False): + """Declare a named queue.""" + + if warn_if_exists and self.queue_exists(queue): + warnings.warn(QueueAlreadyExistsWarning( + QueueAlreadyExistsWarning.__doc__)) + + return self.channel.queue_declare(queue=queue, + durable=durable, + exclusive=exclusive, + auto_delete=auto_delete) + + def exchange_declare(self, exchange, type, durable, auto_delete): + """Declare an named exchange.""" + return self.channel.exchange_declare(exchange=exchange, + type=type, + durable=durable, + auto_delete=auto_delete) + + def queue_bind(self, queue, exchange, routing_key, arguments=None): + """Bind queue to an exchange using a routing key.""" + return self.channel.queue_bind(queue=queue, + exchange=exchange, + routing_key=routing_key, + arguments=arguments) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(backend=self, amqp_message=raw_message) + + def get(self, queue, no_ack=False): + """Receive a message from a declared queue by name. + + :returns: A :class:`Message` object if a message was received, + ``None`` otherwise. If ``None`` was returned, it probably means + there was no messages waiting on the queue. + + """ + raw_message = self.channel.basic_get(queue, no_ack=no_ack) + if not raw_message: + return None + return self.message_to_python(raw_message) + + def declare_consumer(self, queue, no_ack, callback, consumer_tag, + nowait=False): + """Declare a consumer.""" + return self.channel.basic_consume(queue=queue, + no_ack=no_ack, + callback=callback, + consumer_tag=consumer_tag, + nowait=nowait) + + def consume(self, limit=None): + """Returns an iterator that waits for one message at a time.""" + for total_message_count in count(): + if limit and total_message_count >= limit: + raise StopIteration + self.channel.wait() + yield True + + def cancel(self, consumer_tag): + """Cancel a channel by consumer tag.""" + if not self.channel.connection: + return + self.channel.basic_cancel(consumer_tag) + + def close(self): + """Close the channel if open.""" + if self._channel and self._channel.is_open: + self._channel.close() + self._channel_ref = None + + def ack(self, delivery_tag): + """Acknowledge a message by delivery tag.""" + return self.channel.basic_ack(delivery_tag) + + def reject(self, delivery_tag): + """Reject a message by deliver tag.""" + return self.channel.basic_reject(delivery_tag, requeue=False) + + def requeue(self, delivery_tag): + """Reject and requeue a message by delivery tag.""" + return self.channel.basic_reject(delivery_tag, requeue=True) + + def prepare_message(self, message_data, delivery_mode, priority=None, + content_type=None, content_encoding=None): + """Encapsulate data into a AMQP message.""" + message = amqp.Message(message_data, priority=priority, + content_type=content_type, + content_encoding=content_encoding) + message.properties["delivery_mode"] = delivery_mode + return message + + def publish(self, message, exchange, routing_key, mandatory=None, + immediate=None, headers=None): + """Publish a message to a named exchange.""" + + if headers: + message.properties["headers"] = headers + + ret = self.channel.basic_publish(message, exchange=exchange, + routing_key=routing_key, + mandatory=mandatory, + immediate=immediate) + if mandatory or immediate: + self.close() + + def qos(self, prefetch_size, prefetch_count, apply_global=False): + """Request specific Quality of Service.""" + self.channel.basic_qos(prefetch_size, prefetch_count, + apply_global) + + def flow(self, active): + """Enable/disable flow from peer.""" + self.channel.flow(active) diff --git a/vendor/carrot/backends/pystomp.py b/vendor/carrot/backends/pystomp.py new file mode 100644 index 000000000000..b3156db84dfa --- /dev/null +++ b/vendor/carrot/backends/pystomp.py @@ -0,0 +1,192 @@ +from stompy import Client +from stompy import Empty as QueueEmpty +from carrot.backends.base import BaseMessage, BaseBackend +from itertools import count +import socket + +DEFAULT_PORT = 61613 + + +class Message(BaseMessage): + """A message received by the STOMP broker. + + Usually you don't insantiate message objects yourself, but receive + them using a :class:`carrot.messaging.Consumer`. + + :param backend: see :attr:`backend`. + :param frame: see :attr:`_frame`. + + .. attribute:: body + + The message body. + + .. attribute:: delivery_tag + + The message delivery tag, uniquely identifying this message. + + .. attribute:: backend + + The message backend used. + A subclass of :class:`carrot.backends.base.BaseBackend`. + + .. attribute:: _frame + + The frame received by the STOMP client. This is considered a private + variable and should never be used in production code. + + """ + + def __init__(self, backend, frame, **kwargs): + self._frame = frame + self.backend = backend + + kwargs["body"] = frame.body + kwargs["delivery_tag"] = frame.headers["message-id"] + kwargs["content_type"] = frame.headers.get("content-type") + kwargs["content_encoding"] = frame.headers.get("content-encoding") + kwargs["priority"] = frame.headers.get("priority") + + super(Message, self).__init__(backend, **kwargs) + + def ack(self): + """Acknowledge this message as being processed., + This will remove the message from the queue. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + "Message already acknowledged with state: %s" % self._state) + self.backend.ack(self._frame) + self._state = "ACK" + + def reject(self): + raise NotImplementedError( + "The STOMP backend does not implement basic.reject") + + def requeue(self): + raise NotImplementedError( + "The STOMP backend does not implement requeue") + + +class Backend(BaseBackend): + Stomp = Client + Message = Message + default_port = DEFAULT_PORT + + def __init__(self, connection, **kwargs): + self.connection = connection + self.default_port = kwargs.get("default_port", self.default_port) + self._channel = None + self._consumers = {} # open consumers by consumer tag + self._callbacks = {} + + def establish_connection(self): + conninfo = self.connection + if not conninfo.port: + conninfo.port = self.default_port + stomp = self.Stomp(conninfo.hostname, conninfo.port) + stomp.connect() + return stomp + + def close_connection(self, connection): + try: + connection.disconnect() + except socket.error: + pass + + def queue_exists(self, queue): + return True + + def queue_purge(self, queue, **kwargs): + for purge_count in count(0): + try: + frame = self.channel.get_nowait() + except QueueEmpty: + return purge_count + else: + self.channel.ack(frame) + + def declare_consumer(self, queue, no_ack, callback, consumer_tag, + **kwargs): + ack = no_ack and "auto" or "client" + self.channel.subscribe(queue, ack=ack) + self._consumers[consumer_tag] = queue + self._callbacks[queue] = callback + + def consume(self, limit=None): + """Returns an iterator that waits for one message at a time.""" + for total_message_count in count(): + if limit and total_message_count >= limit: + raise StopIteration + while True: + frame = self.channel.get() + if frame: + break + queue = frame.headers.get("destination") + + if not queue or queue not in self._callbacks: + continue + + self._callbacks[queue](frame) + + yield True + + def queue_declare(self, queue, *args, **kwargs): + self.channel.subscribe(queue, ack="client") + + def get(self, queue, no_ack=False): + try: + frame = self.channel.get_nowait() + except QueueEmpty: + return None + else: + return self.message_to_python(frame) + + def ack(self, frame): + self.channel.ack(frame) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(backend=self, frame=raw_message) + + def prepare_message(self, message_data, delivery_mode, priority=0, + content_type=None, content_encoding=None): + persistent = "false" + if delivery_mode == 2: + persistent = "true" + priority = priority or 0 + return {"body": message_data, + "persistent": persistent, + "priority": priority, + "content-encoding": content_encoding, + "content-type": content_type} + + def publish(self, message, exchange, routing_key, **kwargs): + message["destination"] = exchange + self.channel.stomp.send(message) + + def cancel(self, consumer_tag): + if not self._channel or consumer_tag not in self._consumers: + return + queue = self._consumers.pop(consumer_tag) + self.channel.unsubscribe(queue) + + def close(self): + for consumer_tag in self._consumers.keys(): + self.cancel(consumer_tag) + if self._channel: + try: + self._channel.disconnect() + except socket.error: + pass + + @property + def channel(self): + if not self._channel: + # Sorry, but the python-stomp library needs one connection + # for each channel. + self._channel = self.establish_connection() + return self._channel diff --git a/vendor/carrot/backends/queue.py b/vendor/carrot/backends/queue.py new file mode 100644 index 000000000000..4a6ad1799bc0 --- /dev/null +++ b/vendor/carrot/backends/queue.py @@ -0,0 +1,76 @@ +""" + + Backend for unit-tests, using the Python :mod:`Queue` module. + +""" +from Queue import Queue +from carrot.backends.base import BaseMessage, BaseBackend +import time +import itertools + +mqueue = Queue() + + +class Message(BaseMessage): + """Message received from the backend. + + See :class:`carrot.backends.base.BaseMessage`. + + """ + + +class Backend(BaseBackend): + """Backend using the Python :mod:`Queue` library. Usually only + used while executing unit tests. + + Please not that this backend does not support queues, exchanges + or routing keys, so *all messages will be sent to all consumers*. + + """ + + Message = Message + + def get(self, *args, **kwargs): + """Get the next waiting message from the queue. + + :returns: A :class:`Message` instance, or ``None`` if there is + no messages waiting. + + """ + if not mqueue.qsize(): + return None + message_data, content_type, content_encoding = mqueue.get() + return self.Message(backend=self, body=message_data, + content_type=content_type, + content_encoding=content_encoding) + + def declare_consumer(self, queue, no_ack, callback, consumer_tag, + nowait=False): + """Declare a consumer.""" + self.callback = callback + + def consume(self, limit=None): + """Go into consume mode.""" + for total_message_count in itertools.count(): + if limit and total_message_count >= limit: + raise StopIteration + + message = self.get() + if message: + self.callback(message.decode(), message) + yield True + else: + time.sleep(0.1) + + def purge(self, queue, **kwargs): + """Discard all messages in the queue.""" + mqueue = Queue() + + def prepare_message(self, message_data, delivery_mode, + content_type, content_encoding, **kwargs): + """Prepare message for sending.""" + return (message_data, content_type, content_encoding) + + def publish(self, message, exchange, routing_key, **kwargs): + """Publish a message to the queue.""" + mqueue.put(message) diff --git a/vendor/carrot/connection.py b/vendor/carrot/connection.py new file mode 100644 index 000000000000..e392ec1d7fa4 --- /dev/null +++ b/vendor/carrot/connection.py @@ -0,0 +1,229 @@ +""" + +Getting a connection to the AMQP server. + +""" +from amqplib.client_0_8.connection import AMQPConnectionException +from carrot.backends import get_backend_cls +import warnings +import socket + +DEFAULT_CONNECT_TIMEOUT = 5 # seconds +SETTING_PREFIX = "BROKER" +COMPAT_SETTING_PREFIX = "AMQP" +ARG_TO_DJANGO_SETTING = { + "hostname": "HOST", + "userid": "USER", + "password": "PASSWORD", + "virtual_host": "VHOST", + "port": "PORT", +} +SETTING_DEPRECATED_FMT = "Setting %s has been renamed to %s and is " \ + "scheduled for removal in version 1.0." + + +class BrokerConnection(object): + """A network/socket connection to an AMQP message broker. + + :param hostname: see :attr:`hostname`. + :param userid: see :attr:`userid`. + :param password: see :attr:`password`. + + :keyword virtual_host: see :attr:`virtual_host`. + :keyword port: see :attr:`port`. + :keyword insist: see :attr:`insist`. + :keyword connect_timeout: see :attr:`connect_timeout`. + :keyword ssl: see :attr:`ssl`. + + .. attribute:: hostname + + The hostname to the AMQP server + + .. attribute:: userid + + A valid username used to authenticate to the server. + + .. attribute:: password + + The password used to authenticate to the server. + + .. attribute:: virtual_host + + The name of the virtual host to work with. This virtual host must + exist on the server, and the user must have access to it. Consult + your brokers manual for help with creating, and mapping + users to virtual hosts. + Default is ``"/"``. + + .. attribute:: port + + The port of the AMQP server. Default is ``5672`` (amqp). + + .. attribute:: insist + + Insist on connecting to a server. In a configuration with multiple + load-sharing servers, the insist option tells the server that the + client is insisting on a connection to the specified server. + Default is ``False``. + + .. attribute:: connect_timeout + + The timeout in seconds before we give up connecting to the server. + The default is no timeout. + + .. attribute:: ssl + + Use SSL to connect to the server. + The default is ``False``. + + .. attribute:: backend_cls + + The messaging backend class used. Defaults to the ``pyamqplib`` + backend. + + """ + virtual_host = "/" + port = None + insist = False + connect_timeout = DEFAULT_CONNECT_TIMEOUT + ssl = False + _closed = True + backend_cls = None + + ConnectionException = AMQPConnectionException + + @property + def host(self): + """The host as a hostname/port pair separated by colon.""" + return ":".join([self.hostname, str(self.port)]) + + def __init__(self, hostname=None, userid=None, password=None, + virtual_host=None, port=None, **kwargs): + self.hostname = hostname + self.userid = userid + self.password = password + self.virtual_host = virtual_host or self.virtual_host + self.port = port or self.port + self.insist = kwargs.get("insist", self.insist) + self.connect_timeout = kwargs.get("connect_timeout", + self.connect_timeout) + self.ssl = kwargs.get("ssl", self.ssl) + self.backend_cls = kwargs.get("backend_cls", None) + self._closed = None + self._connection = None + + @property + def connection(self): + if self._closed == True: + return + if not self._connection: + self._connection = self._establish_connection() + self._closed = False + return self._connection + + def __enter__(self): + return self + + def __exit__(self, e_type, e_value, e_trace): + if e_type: + raise e_type(e_value) + self.close() + + def _establish_connection(self): + return self.create_backend().establish_connection() + + def get_backend_cls(self): + """Get the currently used backend class.""" + backend_cls = self.backend_cls + if not backend_cls or isinstance(backend_cls, basestring): + backend_cls = get_backend_cls(backend_cls) + return backend_cls + + def create_backend(self): + """Create a new instance of the current backend in + :attr:`backend_cls`.""" + backend_cls = self.get_backend_cls() + return backend_cls(connection=self) + + def get_channel(self): + """Request a new AMQP channel.""" + return self.connection.channel() + + def connect(self): + """Establish a connection to the AMQP server.""" + self._closed = False + return self.connection + + def close(self): + """Close the currently open connection.""" + try: + if self._connection: + backend = self.create_backend() + backend.close_connection(self._connection) + except socket.error: + pass + self._closed = True + +# For backwards compatability. +AMQPConnection = BrokerConnection + + +def get_django_conninfo(): + # FIXME can't wait to remove this mess in 1.0 [askh] + ci = {} + from django.conf import settings as django_settings + + ci["backend_cls"] = getattr(django_settings, "CARROT_BACKEND", None) + + for arg_name, setting_name in ARG_TO_DJANGO_SETTING.items(): + setting = "%s_%s" % (SETTING_PREFIX, setting_name) + compat_setting = "%s_%s" % (COMPAT_SETTING_PREFIX, setting_name) + if hasattr(django_settings, setting): + ci[arg_name] = getattr(django_settings, setting, None) + elif hasattr(django_settings, compat_setting): + ci[arg_name] = getattr(django_settings, compat_setting, None) + warnings.warn(DeprecationWarning(SETTING_DEPRECATED_FMT % ( + compat_setting, setting))) + + if "hostname" not in ci: + if hasattr(django_settings, "AMQP_SERVER"): + ci["hostname"] = django_settings.AMQP_SERVER + warnings.warn(DeprecationWarning( + "AMQP_SERVER has been renamed to BROKER_HOST and is" + "scheduled for removal in version 1.0.")) + + return ci + + +class DjangoBrokerConnection(BrokerConnection): + """A version of :class:`BrokerConnection` that takes configuration + from the Django ``settings.py`` module. + + :keyword hostname: The hostname of the AMQP server to connect to, + if not provided this is taken from ``settings.BROKER_HOST``. + + :keyword userid: The username of the user to authenticate to the server + as. If not provided this is taken from ``settings.BROKER_USER``. + + :keyword password: The users password. If not provided this is taken + from ``settings.BROKER_PASSWORD``. + + :keyword virtual_host: The name of the virtual host to work with. + This virtual host must exist on the server, and the user must + have access to it. Consult your brokers manual for help with + creating, and mapping users to virtual hosts. If not provided + this is taken from ``settings.BROKER_VHOST``. + + :keyword port: The port the AMQP server is running on. If not provided + this is taken from ``settings.BROKER_PORT``, or if that is not set, + the default is ``5672`` (amqp). + + """ + + + def __init__(self, *args, **kwargs): + kwargs = dict(get_django_conninfo(), **kwargs) + super(DjangoBrokerConnection, self).__init__(*args, **kwargs) + +# For backwards compatability. +DjangoAMQPConnection = DjangoBrokerConnection diff --git a/vendor/carrot/messaging.py b/vendor/carrot/messaging.py new file mode 100644 index 000000000000..d82a85110c18 --- /dev/null +++ b/vendor/carrot/messaging.py @@ -0,0 +1,981 @@ +""" + +Sending/Receiving Messages. + +""" +from itertools import count +from carrot.utils import gen_unique_id +import warnings + +from carrot import serialization + + +class Consumer(object): + """Message consumer. + + :param connection: see :attr:`connection`. + :param queue: see :attr:`queue`. + :param exchange: see :attr:`exchange`. + :param routing_key: see :attr:`routing_key`. + + :keyword durable: see :attr:`durable`. + :keyword auto_delete: see :attr:`auto_delete`. + :keyword exclusive: see :attr:`exclusive`. + :keyword exchange_type: see :attr:`exchange_type`. + :keyword auto_ack: see :attr:`auto_ack`. + :keyword no_ack: see :attr:`no_ack`. + :keyword auto_declare: see :attr:`auto_declare`. + + + .. attribute:: connection + + The connection to the broker. + A :class:`carrot.connection.BrokerConnection` instance. + + .. attribute:: queue + + Name of the queue. + + .. attribute:: exchange + + Name of the exchange the queue binds to. + + .. attribute:: routing_key + + The routing key (if any). The interpretation of the routing key + depends on the value of the :attr:`exchange_type` attribute: + + * direct exchange + + Matches if the routing key property of the message and + the :attr:`routing_key` attribute are identical. + + * fanout exchange + + Always matches, even if the binding does not have a key. + + * topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (``"."``, like domain names), and + two special characters are available; star (``"*"``) and hash + (``"#"``). The star matches any word, and the hash matches + zero or more words. For example ``"*.stock.#"`` matches the + routing keys ``"usd.stock"`` and ``"eur.stock.db"`` but not + ``"stock.nasdaq"``. + + .. attribute:: durable + + Durable exchanges remain active when a server restarts. Non-durable + exchanges (transient exchanges) are purged when a server restarts. + Default is ``True``. + + .. attribute:: auto_delete + + If set, the exchange is deleted when all queues have finished + using it. Default is ``False``. + + .. attribute:: exclusive + + Exclusive queues may only be consumed from by the current connection. + When :attr:`exclusive` is on, this also implies :attr:`auto_delete`. + Default is ``False``. + + .. attribute:: exchange_type + + AMQP defines four default exchange types (routing algorithms) that + covers most of the common messaging use cases. An AMQP broker can + also define additional exchange types, so see your message brokers + manual for more information about available exchange types. + + * Direct + + Direct match between the routing key in the message, and the + routing criteria used when a queue is bound to this exchange. + + * Topic + + Wildcard match between the routing key and the routing pattern + specified in the binding. The routing key is treated as zero + or more words delimited by ``"."`` and supports special + wildcard characters. ``"*"`` matches a single word and ``"#"`` + matches zero or more words. + + * Fanout + + Queues are bound to this exchange with no arguments. Hence any + message sent to this exchange will be forwarded to all queues + bound to this exchange. + + * Headers + + Queues are bound to this exchange with a table of arguments + containing headers and values (optional). A special argument + named "x-match" determines the matching algorithm, where + ``"all"`` implies an ``AND`` (all pairs must match) and + ``"any"`` implies ``OR`` (at least one pair must match). + + Use the :attr:`routing_key`` is used to specify the arguments, + the same when sending messages. + + This description of AMQP exchange types was shamelessly stolen + from the blog post `AMQP in 10 minutes: Part 4`_ by + Rajith Attapattu. Recommended reading. + + .. _`AMQP in 10 minutes: Part 4`: + http://bit.ly/amqp-exchange-types + + .. attribute:: callbacks + + List of registered callbacks to trigger when a message is received + by :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`. + + .. attribute:: warn_if_exists + + Emit a warning if the queue has already been declared. If a queue + already exists, and you try to redeclare the queue with new settings, + the new settings will be silently ignored, so this can be + useful if you've recently changed the :attr:`routing_key` attribute + or other settings. + + .. attribute:: auto_ack + + Acknowledgement is handled automatically once messages are received. + This means that the :meth:`carrot.backends.base.BaseMessage.ack` and + :meth:`carrot.backends.base.BaseMessage.reject` methods + on the message object are no longer valid. + By default :attr:`auto_ack` is set to ``False``, and the receiver is + required to manually handle acknowledgment. + + .. attribute:: no_ack + + Disable acknowledgement on the server-side. This is different from + :attr:`auto_ack` in that acknowledgement is turned off altogether. + This functionality increases performance but at the cost of + reliability. Messages can get lost if a client dies before it can + deliver them to the application. + + .. attribute auto_declare + + If this is ``True`` the following will be automatically declared: + + * The queue if :attr:`queue` is set. + * The exchange if :attr:`exchange` is set. + * The :attr:`queue` will be bound to the :attr:`exchange`. + + This is the default behaviour. + + + :raises `amqplib.client_0_8.channel.AMQPChannelException`: if the queue is + exclusive and the queue already exists and is owned by another + connection. + + + Example Usage + + >>> consumer = Consumer(connection=DjangoBrokerConnection(), + ... queue="foo", exchange="foo", routing_key="foo") + >>> def process_message(message_data, message): + ... print("Got message %s: %s" % ( + ... message.delivery_tag, message_data)) + >>> consumer.register_callback(process_message) + >>> consumer.wait() # Go into receive loop + + """ + queue = "" + exchange = "" + routing_key = "" + durable = True + exclusive = False + auto_delete = False + exchange_type = "direct" + channel_open = False + warn_if_exists = False + auto_declare = True + auto_ack = False + no_ack = False + _closed = True + + def __init__(self, connection, queue=None, exchange=None, + routing_key=None, **kwargs): + self.connection = connection + self.backend = kwargs.get("backend", None) + if not self.backend: + self.backend = self.connection.create_backend() + self.queue = queue or self.queue + + # Binding. + self.queue = queue or self.queue + self.exchange = exchange or self.exchange + self.routing_key = routing_key or self.routing_key + self.callbacks = [] + + # Options + self.durable = kwargs.get("durable", self.durable) + self.exclusive = kwargs.get("exclusive", self.exclusive) + self.auto_delete = kwargs.get("auto_delete", self.auto_delete) + self.exchange_type = kwargs.get("exchange_type", self.exchange_type) + self.warn_if_exists = kwargs.get("warn_if_exists", + self.warn_if_exists) + self.auto_ack = kwargs.get("auto_ack", self.auto_ack) + self.auto_declare = kwargs.get("auto_declare", self.auto_declare) + + # exclusive implies auto-delete. + if self.exclusive: + self.auto_delete = True + + self.consumer_tag = self._generate_consumer_tag() + + if self.auto_declare: + self.declare() + + def __enter__(self): + return self + + def __exit__(self, e_type, e_value, e_trace): + if e_type: + raise e_type(e_value) + self.close() + + def __iter__(self): + """iter(Consumer) -> Consumer.iterqueue(infinite=True)""" + return self.iterqueue(infinite=True) + + def _generate_consumer_tag(self): + """Generate a unique consumer tag. + + :rtype string: + + """ + return "%s.%s-%s" % ( + self.__class__.__module__, + self.__class__.__name__, + gen_unique_id()) + + def declare(self): + """Declares the queue, the exchange and binds the queue to + the exchange.""" + arguments = None + routing_key = self.routing_key + if self.exchange_type == "headers": + arguments, routing_key = routing_key, "" + + if self.queue: + self.backend.queue_declare(queue=self.queue, durable=self.durable, + exclusive=self.exclusive, + auto_delete=self.auto_delete, + warn_if_exists=self.warn_if_exists) + if self.exchange: + self.backend.exchange_declare(exchange=self.exchange, + type=self.exchange_type, + durable=self.durable, + auto_delete=self.auto_delete) + if self.queue: + self.backend.queue_bind(queue=self.queue, + exchange=self.exchange, + routing_key=routing_key, + arguments=arguments) + self._closed = False + return self + + def _receive_callback(self, raw_message): + """Internal method used when a message is received in consume mode.""" + message = self.backend.message_to_python(raw_message) + + if self.auto_ack and not message.acknowledged: + message.ack() + self.receive(message.payload, message) + + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Receive the next message waiting on the queue. + + :returns: A :class:`carrot.backends.base.BaseMessage` instance, + or ``None`` if there's no messages to be received. + + :keyword enable_callbacks: Enable callbacks. The message will be + processed with all registered callbacks. Default is disabled. + :keyword auto_ack: Override the default :attr:`auto_ack` setting. + :keyword no_ack: Override the default :attr:`no_ack` setting. + + """ + no_ack = no_ack or self.no_ack + auto_ack = auto_ack or self.auto_ack + message = self.backend.get(self.queue, no_ack=no_ack) + if message: + if auto_ack and not message.acknowledged: + message.ack() + if enable_callbacks: + self.receive(message.payload, message) + return message + + def process_next(self): + """**DEPRECATED** Use :meth:`fetch` like this instead: + + >>> message = self.fetch(enable_callbacks=True) + + """ + warnings.warn(DeprecationWarning( + "Consumer.process_next has been deprecated in favor of \ + Consumer.fetch(enable_callbacks=True)")) + return self.fetch(enable_callbacks=True) + + def receive(self, message_data, message): + """This method is called when a new message is received by + running :meth:`wait`, :meth:`process_next` or :meth:`iterqueue`. + + When a message is received, it passes the message on to the + callbacks listed in the :attr:`callbacks` attribute. + You can register callbacks using :meth:`register_callback`. + + :param message_data: The deserialized message data. + + :param message: The :class:`carrot.backends.base.BaseMessage` instance. + + :raises NotImplementedError: If no callbacks has been registered. + + """ + if not self.callbacks: + raise NotImplementedError("No consumer callbacks registered") + for callback in self.callbacks: + callback(message_data, message) + + def register_callback(self, callback): + """Register a callback function to be triggered by :meth:`receive`. + + The ``callback`` function must take two arguments: + + * message_data + + The deserialized message data + + * message + + The :class:`carrot.backends.base.BaseMessage` instance. + """ + self.callbacks.append(callback) + + def discard_all(self, filterfunc=None): + """Discard all waiting messages. + + :param filterfunc: A filter function to only discard the messages this + filter returns. + + :returns: the number of messages discarded. + + *WARNING*: All incoming messages will be ignored and not processed. + + Example using filter: + + >>> def waiting_feeds_only(message): + ... try: + ... message_data = message.decode() + ... except: # Should probably be more specific. + ... pass + ... + ... if message_data.get("type") == "feed": + ... return True + ... else: + ... return False + """ + if not filterfunc: + return self.backend.queue_purge(self.queue) + + if self.no_ack or self.auto_ack: + raise Exception("discard_all: Can't use filter with auto/no-ack.") + + discarded_count = 0 + while True: + message = self.fetch() + if message is None: + return discarded_count + + if filterfunc(message): + message.ack() + discarded_count += 1 + + def iterconsume(self, limit=None, no_ack=None): + """Iterator processing new messages as they arrive. + Every new message will be passed to the callbacks, and the iterator + returns ``True``. The iterator is infinite unless the ``limit`` + argument is specified or someone closes the consumer. + + :meth:`iterconsume` uses transient requests for messages on the + server, while :meth:`iterequeue` uses synchronous access. In most + cases you want :meth:`iterconsume`, but if your environment does not + support this behaviour you can resort to using :meth:`iterqueue` + instead. + + Also, :meth:`iterconsume` does not return the message + at each step, something which :meth:`iterqueue` does. + + :keyword limit: Maximum number of messages to process. + + :raises StopIteration: if limit is set and the message limit has been + reached. + + """ + no_ack = no_ack or self.no_ack + self.backend.declare_consumer(queue=self.queue, no_ack=no_ack, + callback=self._receive_callback, + consumer_tag=self.consumer_tag, + nowait=True) + self.channel_open = True + return self.backend.consume(limit=limit) + + def wait(self, limit=None): + """Go into consume mode. + + Mostly for testing purposes and simple programs, you probably + want :meth:`iterconsume` or :meth:`iterqueue` instead. + + This runs an infinite loop, processing all incoming messages + using :meth:`receive` to apply the message to all registered + callbacks. + + """ + it = self.iterconsume(limit) + while True: + it.next() + + def iterqueue(self, limit=None, infinite=False): + """Infinite iterator yielding pending messages, by using + synchronous direct access to the queue (``basic_get``). + + :meth:`iterqueue` is used where synchronous functionality is more + important than performance. If you can, use :meth:`iterconsume` + instead. + + :keyword limit: If set, the iterator stops when it has processed + this number of messages in total. + + :keyword infinite: Don't raise :exc:`StopIteration` if there is no + messages waiting, but return ``None`` instead. If infinite you + obviously shouldn't consume the whole iterator at once without + using a ``limit``. + + :raises StopIteration: If there is no messages waiting, and the + iterator is not infinite. + + """ + for items_since_start in count(): + item = self.fetch() + if (not infinite and item is None) or \ + (limit and items_since_start >= limit): + raise StopIteration + yield item + + def cancel(self): + """Cancel a running :meth:`iterconsume` session.""" + if self.channel_open: + try: + self.backend.cancel(self.consumer_tag) + except KeyError: + pass + + def close(self): + """Close the channel to the queue.""" + self.cancel() + self.backend.close() + self._closed = True + + def flow(self, active): + """This method asks the peer to pause or restart the flow of + content data. + + This is a simple flow-control mechanism that a + peer can use to avoid oveflowing its queues or otherwise + finding itself receiving more messages than it can process. + Note that this method is not intended for window control. The + peer that receives a request to stop sending content should + finish sending the current content, if any, and then wait + until it receives the ``flow(active=True)`` restart method. + + """ + self.backend.flow(active) + + def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): + """Request specific Quality of Service. + + This method requests a specific quality of service. The QoS + can be specified for the current channel or for all channels + on the connection. The particular properties and semantics of + a qos method always depend on the content class semantics. + Though the qos method could in principle apply to both peers, + it is currently meaningful only for the server. + + :param prefetch_size: Prefetch window in octets. + The client can request that messages be sent in + advance so that when the client finishes processing a + message, the following message is already held + locally, rather than needing to be sent down the + channel. Prefetching gives a performance improvement. + This field specifies the prefetch window size in + octets. The server will send a message in advance if + it is equal to or smaller in size than the available + prefetch size (and also falls into other prefetch + limits). May be set to zero, meaning "no specific + limit", although other prefetch limits may still + apply. The ``prefetch_size`` is ignored if the + :attr:`no_ack` option is set. + + :param prefetch_count: Specifies a prefetch window in terms of whole + messages. This field may be used in combination with + ``prefetch_size``; A message will only be sent + in advance if both prefetch windows (and those at the + channel and connection level) allow it. The prefetch- + count is ignored if the :attr:`no_ack` option is set. + + :keyword apply_global: By default the QoS settings apply to the + current channel only. If this is set, they are applied + to the entire connection. + + """ + return self.backend.qos(prefetch_size, prefetch_count, apply_global) + + +class Publisher(object): + """Message publisher. + + :param connection: see :attr:`connection`. + :param exchange: see :attr:`exchange`. + :param routing_key: see :attr:`routing_key`. + + :keyword exchange_type: see :attr:`Consumer.exchange_type`. + :keyword durable: see :attr:`Consumer.durable`. + :keyword auto_delete: see :attr:`Consumer.auto_delete`. + :keyword serializer: see :attr:`serializer`. + :keyword auto_declare: See :attr:`auto_declare`. + + + .. attribute:: connection + + The connection to the broker. + A :class:`carrot.connection.BrokerConnection` instance. + + .. attribute:: exchange + + Name of the exchange we send messages to. + + .. attribute:: routing_key + + The default routing key for messages sent using this publisher. + See :attr:`Consumer.routing_key` for more information. + You can override the routing key by passing an explicit + ``routing_key`` argument to :meth:`send`. + + .. attribute:: delivery_mode + + The default delivery mode used for messages. The value is an integer. + The following delivery modes are supported by (at least) RabbitMQ: + + * 1 or "non-persistent" + + The message is non-persistent. Which means it is stored in + memory only, and is lost if the server dies or restarts. + + * 2 or "persistent" + The message is persistent. Which means the message is + stored both in-memory, and on disk, and therefore + preserved if the server dies or restarts. + + The default value is ``2`` (persistent). + + .. attribute:: exchange_type + + See :attr:`Consumer.exchange_type`. + + .. attribute:: durable + + See :attr:`Consumer.durable`. + + .. attribute:: auto_delete + + See :attr:`Consumer.auto_delete`. + + .. attribute:: auto_declare + + If this is ``True`` and the :attr:`exchange` name is set, the exchange + will be automatically declared at instantiation. + You can manually the declare the exchange by using the :meth:`declare` + method. + + Auto declare is on by default. + + .. attribute:: serializer + + A string identifying the default serialization method to use. + Defaults to ``json``. Can be ``json`` (default), ``raw``, + ``pickle``, ``hessian``, ``yaml``, or any custom serialization + methods that have been registered with + :mod:`carrot.serialization.registry`. + + """ + + NONE_PERSISTENT_DELIVERY_MODE = 1 + PERSISTENT_DELIVERY_MODE = 2 + DELIVERY_MODES = { + "non-persistent": NONE_PERSISTENT_DELIVERY_MODE, + "persistent": PERSISTENT_DELIVERY_MODE, + } + + exchange = "" + routing_key = "" + delivery_mode = PERSISTENT_DELIVERY_MODE + _closed = True + exchange_type = "direct" + durable = True + auto_delete = False + auto_declare = True + serializer = None + + def __init__(self, connection, exchange=None, routing_key=None, **kwargs): + self.connection = connection + self.backend = self.connection.create_backend() + self.exchange = exchange or self.exchange + self.routing_key = routing_key or self.routing_key + self.delivery_mode = kwargs.get("delivery_mode", self.delivery_mode) + self.delivery_mode = self.DELIVERY_MODES.get(self.delivery_mode, + self.delivery_mode) + self.exchange_type = kwargs.get("exchange_type", self.exchange_type) + self.durable = kwargs.get("durable", self.durable) + self.auto_delete = kwargs.get("auto_delete", self.auto_delete) + self.serializer = kwargs.get("serializer", self.serializer) + self.auto_declare = kwargs.get("auto_declare", self.auto_declare) + self._closed = False + + if self.auto_declare and self.exchange: + self.declare() + + def declare(self): + """Declare the exchange. + + Creates the exchange on the broker. + + """ + self.backend.exchange_declare(exchange=self.exchange, + type=self.exchange_type, + durable=self.durable, + auto_delete=self.auto_delete) + + def __enter__(self): + return self + + def __exit__(self, e_type, e_value, e_trace): + if e_type: + raise e_type(e_value) + self.close() + + def create_message(self, message_data, delivery_mode=None, priority=None, + content_type=None, content_encoding=None, + serializer=None): + """With any data, serialize it and encapsulate it in a AMQP + message with the proper headers set.""" + + delivery_mode = delivery_mode or self.delivery_mode + + # No content_type? Then we're serializing the data internally. + if not content_type: + serializer = serializer or self.serializer + (content_type, content_encoding, + message_data) = serialization.encode(message_data, + serializer=serializer) + else: + # If the programmer doesn't want us to serialize, + # make sure content_encoding is set. + if isinstance(message_data, unicode): + if not content_encoding: + content_encoding = 'utf-8' + message_data = message_data.encode(content_encoding) + + # If they passed in a string, we can't know anything + # about it. So assume it's binary data. + elif not content_encoding: + content_encoding = 'binary' + + return self.backend.prepare_message(message_data, delivery_mode, + priority=priority, + content_type=content_type, + content_encoding=content_encoding) + + def send(self, message_data, routing_key=None, delivery_mode=None, + mandatory=False, immediate=False, priority=0, content_type=None, + content_encoding=None, serializer=None): + """Send a message. + + :param message_data: The message data to send. Can be a list, + dictionary or a string. + + :keyword routing_key: A custom routing key for the message. + If not set, the default routing key set in the :attr:`routing_key` + attribute is used. + + :keyword mandatory: If set, the message has mandatory routing. + By default the message is silently dropped by the server if it + can't be routed to a queue. However - If the message is mandatory, + an exception will be raised instead. + + :keyword immediate: Request immediate delivery. + If the message cannot be routed to a queue consumer immediately, + an exception will be raised. This is instead of the default + behaviour, where the server will accept and queue the message, + but with no guarantee that the message will ever be consumed. + + :keyword delivery_mode: Override the default :attr:`delivery_mode`. + + :keyword priority: The message priority, ``0`` to ``9``. + + :keyword content_type: The messages content_type. If content_type + is set, no serialization occurs as it is assumed this is either + a binary object, or you've done your own serialization. + Leave blank if using built-in serialization as our library + properly sets content_type. + + :keyword content_encoding: The character set in which this object + is encoded. Use "binary" if sending in raw binary objects. + Leave blank if using built-in serialization as our library + properly sets content_encoding. + + :keyword serializer: Override the default :attr:`serializer`. + + """ + headers = None + routing_key = routing_key or self.routing_key + + if self.exchange_type == "headers": + headers, routing_key = routing_key, "" + + + message = self.create_message(message_data, priority=priority, + delivery_mode=delivery_mode, + content_type=content_type, + content_encoding=content_encoding, + serializer=serializer) + self.backend.publish(message, + exchange=self.exchange, routing_key=routing_key, + mandatory=mandatory, immediate=immediate, + headers=headers) + + def close(self): + """Close connection to queue.""" + self.backend.close() + self._closed = True + + +class Messaging(object): + """A combined message publisher and consumer.""" + queue = "" + exchange = "" + routing_key = "" + publisher_cls = Publisher + consumer_cls = Consumer + _closed = True + + def __init__(self, connection, **kwargs): + self.connection = connection + self.exchange = kwargs.get("exchange", self.exchange) + self.queue = kwargs.get("queue", self.queue) + self.routing_key = kwargs.get("routing_key", self.routing_key) + self.publisher = self.publisher_cls(connection, + exchange=self.exchange, routing_key=self.routing_key) + self.consumer = self.consumer_cls(connection, queue=self.queue, + exchange=self.exchange, routing_key=self.routing_key) + self.consumer.register_callback(self.receive) + self.callbacks = [] + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, e_type, e_value, e_trace): + if e_type: + raise e_type(e_value) + self.close() + + def register_callback(self, callback): + """See :meth:`Consumer.register_callback`""" + self.callbacks.append(callback) + + def receive(self, message_data, message): + """See :meth:`Consumer.receive`""" + if not self.callbacks: + raise NotImplementedError("No consumer callbacks registered") + for callback in self.callbacks: + callback(message_data, message) + + def send(self, message_data, delivery_mode=None): + """See :meth:`Publisher.send`""" + self.publisher.send(message_data, delivery_mode=delivery_mode) + + def fetch(self, **kwargs): + """See :meth:`Consumer.fetch`""" + return self.consumer.fetch(**kwargs) + + def close(self): + """Close any open channels.""" + self.consumer.close() + self.publisher.close() + self._closed = True + + +class ConsumerSet(object): + """Receive messages from multiple consumers. + + :param connection: see :attr:`connection`. + :param from_dict: see :attr:`from_dict`. + :param consumers: see :attr:`consumers`. + :param callbacks: see :attr:`callbacks`. + + .. attribute:: connection + + The connection to the broker. + A :class:`carrot.connection.BrokerConnection` instance. + + .. attribute:: callbacks + + A list of callbacks to be called when a message is received. + See :class:`Consumer.register_callback`. + + .. attribute:: from_dict + + Add consumers from a dictionary configuration:: + + { + "webshot": { + "exchange": "link_exchange", + "exchange_type": "topic", + "binding_key": "links.webshot", + "default_routing_key": "links.webshot", + }, + "retrieve": { + "exchange": "link_exchange", + "exchange_type" = "topic", + "binding_key": "links.*", + "default_routing_key": "links.retrieve", + "auto_delete": True, + # ... + }, + } + + .. attribute:: consumers + + Add consumers from a list of :class:`Consumer` instances. + + .. attribute:: auto_ack + + Default value for the :attr:`Consumer.auto_ack` attribute. + + """ + auto_ack = False + + def __init__(self, connection, from_dict=None, consumers=None, + callbacks=None, **options): + self.connection = connection + self.options = options + self.from_dict = from_dict or {} + self.consumers = consumers or [] + self.callbacks = callbacks or [] + self._open_consumers = [] + + self.backend = self.connection.create_backend() + + self.auto_ack = options.get("auto_ack", self.auto_ack) + + [self.add_consumer_from_dict(queue_name, **queue_options) + for queue_name, queue_options in self.from_dict.items()] + + def _receive_callback(self, raw_message): + """Internal method used when a message is received in consume mode.""" + message = self.backend.message_to_python(raw_message) + if self.auto_ack and not message.acknowledged: + message.ack() + self.receive(message.decode(), message) + + def add_consumer_from_dict(self, queue, **options): + """Add another consumer from dictionary configuration.""" + consumer = Consumer(self.connection, queue=queue, + backend=self.backend, **options) + self.consumers.append(consumer) + + def add_consumer(self, consumer): + """Add another consumer from a :class:`Consumer` instance.""" + consumer.backend = self.backend + self.consumers.append(consumer) + + def register_callback(self, callback): + """Register new callback to be called when a message is received. + See :meth:`Consumer.register_callback`""" + self.callbacks.append(callback) + + def receive(self, message_data, message): + """What to do when a message is received. + See :meth:`Consumer.receive`.""" + if not self.callbacks: + raise NotImplementedError("No consumer callbacks registered") + for callback in self.callbacks: + callback(message_data, message) + + def _declare_consumer(self, consumer, nowait=False): + """Declare consumer so messages can be received from it using + :meth:`iterconsume`.""" + # Use the ConsumerSet's consumer by default, but if the + # child consumer has a callback, honor it. + callback = consumer.callbacks and \ + consumer._receive_callback or self._receive_callback + self.backend.declare_consumer(queue=consumer.queue, + no_ack=consumer.no_ack, + nowait=nowait, + callback=callback, + consumer_tag=consumer.consumer_tag) + self._open_consumers.append(consumer.consumer_tag) + + def iterconsume(self, limit=None): + """Cycle between all consumers in consume mode. + + See :meth:`Consumer.iterconsume`. + """ + head = self.consumers[:-1] + tail = self.consumers[-1] + [self._declare_consumer(consumer, nowait=True) + for consumer in head] + self._declare_consumer(tail, nowait=False) + + return self.backend.consume(limit=limit) + + def discard_all(self): + """Discard all messages. Does not support filtering. + See :meth:`Consumer.discard_all`.""" + return sum([consumer.discard_all() + for consumer in self.consumers]) + + def flow(self, active): + """This method asks the peer to pause or restart the flow of + content data. + + See :meth:`Consumer.flow`. + + """ + self.backend.flow(active) + + def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): + """Request specific Quality of Service. + + See :meth:`Consumer.cos`. + + """ + self.backend.qos(prefetch_size, prefetch_count, apply_global) + + def cancel(self): + """Cancel a running :meth:`iterconsume` session.""" + for consumer_tag in self._open_consumers: + try: + self.backend.cancel(consumer_tag) + except KeyError: + pass + self._open_consumers = [] + + def close(self): + """Close all consumers.""" + self.cancel() + for consumer in self.consumers: + consumer.close() diff --git a/vendor/carrot/serialization.py b/vendor/carrot/serialization.py new file mode 100644 index 000000000000..508836fe89ec --- /dev/null +++ b/vendor/carrot/serialization.py @@ -0,0 +1,253 @@ +""" +Centralized support for encoding/decoding of data structures. +Requires a json library (`cjson`_, `simplejson`_, or `Python 2.6+`_). + +Optionally installs support for ``YAML`` if the necessary +PyYAML is installed. + +.. _`cjson`: http://pypi.python.org/pypi/python-cjson/ +.. _`simplejson`: http://code.google.com/p/simplejson/ +.. _`Python 2.6+`: http://docs.python.org/library/json.html +.. _`PyYAML`: http://pyyaml.org/ + +""" + +import codecs + +__all__ = ['SerializerNotInstalled', 'registry'] + + +class SerializerNotInstalled(StandardError): + """Support for the requested serialization type is not installed""" + + +class SerializerRegistry(object): + """The registry keeps track of serialization methods.""" + + def __init__(self): + self._encoders = {} + self._decoders = {} + self._default_encode = None + self._default_content_type = None + self._default_content_encoding = None + + def register(self, name, encoder, decoder, content_type, + content_encoding='utf-8'): + """Register a new encoder/decoder. + + :param name: A convenience name for the serialization method. + + :param encoder: A method that will be passed a python data structure + and should return a string representing the serialized data. + If ``None``, then only a decoder will be registered. Encoding + will not be possible. + + :param decoder: A method that will be passed a string representing + serialized data and should return a python data structure. + If ``None``, then only an encoder will be registered. + Decoding will not be possible. + + :param content_type: The mime-type describing the serialized + structure. + + :param content_encoding: The content encoding (character set) that + the :param:`decoder` method will be returning. Will usually be + ``utf-8``, ``us-ascii``, or ``binary``. + + """ + if encoder: + self._encoders[name] = (content_type, content_encoding, encoder) + if decoder: + self._decoders[content_type] = decoder + + def _set_default_serializer(self, name): + """ + Set the default serialization method used by this library. + + :param name: The name of the registered serialization method. + For example, ``json`` (default), ``pickle``, ``yaml``, + or any custom methods registered using :meth:`register`. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. + """ + try: + (self._default_content_type, self._default_content_encoding, + self._default_encode) = self._encoders[name] + except KeyError: + raise SerializerNotInstalled( + "No encoder installed for %s" % name) + + def encode(self, data, serializer=None): + """ + Serialize a data structure into a string suitable for sending + as an AMQP message body. + + :param data: The message data to send. Can be a list, + dictionary or a string. + + :keyword serializer: An optional string representing + the serialization method you want the data marshalled + into. (For example, ``json``, ``raw``, or ``pickle``). + + If ``None`` (default), then `JSON`_ will be used, unless + ``data`` is a ``str`` or ``unicode`` object. In this + latter case, no serialization occurs as it would be + unnecessary. + + Note that if ``serializer`` is specified, then that + serialization method will be used even if a ``str`` + or ``unicode`` object is passed in. + + :returns: A three-item tuple containing the content type + (e.g., ``application/json``), content encoding, (e.g., + ``utf-8``) and a string containing the serialized + data. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. + """ + if serializer == "raw": + return raw_encode(data) + if serializer and not self._encoders.get(serializer): + raise SerializerNotInstalled( + "No encoder installed for %s" % serializer) + + # If a raw string was sent, assume binary encoding + # (it's likely either ASCII or a raw binary file, but 'binary' + # charset will encompass both, even if not ideal. + if not serializer and isinstance(data, str): + # In Python 3+, this would be "bytes"; allow binary data to be + # sent as a message without getting encoder errors + return "application/data", "binary", data + + # For unicode objects, force it into a string + if not serializer and isinstance(data, unicode): + payload = data.encode("utf-8") + return "text/plain", "utf-8", payload + + if serializer: + content_type, content_encoding, encoder = \ + self._encoders[serializer] + else: + encoder = self._default_encode + content_type = self._default_content_type + content_encoding = self._default_content_encoding + + payload = encoder(data) + return content_type, content_encoding, payload + + def decode(self, data, content_type, content_encoding): + """Deserialize a data stream as serialized using ``encode`` + based on :param:`content_type`. + + :param data: The message data to deserialize. + + :param content_type: The content-type of the data. + (e.g., ``application/json``). + + :param content_encoding: The content-encoding of the data. + (e.g., ``utf-8``, ``binary``, or ``us-ascii``). + + :returns: The unserialized data. + """ + content_type = content_type or 'application/data' + content_encoding = (content_encoding or 'utf-8').lower() + + # Don't decode 8-bit strings or unicode objects + if content_encoding not in ('binary', 'ascii-8bit') and \ + not isinstance(data, unicode): + data = codecs.decode(data, content_encoding) + + try: + decoder = self._decoders[content_type] + except KeyError: + return data + + return decoder(data) + + +""" +.. data:: registry + +Global registry of serializers/deserializers. + +""" +registry = SerializerRegistry() + +""" +.. function:: encode(data, serializer=default_serializer) + +Encode data using the registry's default encoder. + +""" +encode = registry.encode + +""" +.. function:: decode(data, content_type, content_encoding): + +Decode data using the registry's default decoder. + +""" +decode = registry.decode + + +def raw_encode(data): + """Special case serializer.""" + content_type = 'application/data' + payload = data + if isinstance(payload, unicode): + content_encoding = 'utf-8' + payload = payload.encode(content_encoding) + else: + content_encoding = 'binary' + return content_type, content_encoding, payload + + +def register_json(): + """Register a encoder/decoder for JSON serialization.""" + from anyjson import serialize as json_serialize + from anyjson import deserialize as json_deserialize + + registry.register('json', json_serialize, json_deserialize, + content_type='application/json', + content_encoding='utf-8') + + +def register_yaml(): + """Register a encoder/decoder for YAML serialization. + + It is slower than JSON, but allows for more data types + to be serialized. Useful if you need to send data such as dates""" + try: + import yaml + registry.register('yaml', yaml.safe_dump, yaml.safe_load, + content_type='application/x-yaml', + content_encoding='utf-8') + except ImportError: + + def not_available(*args, **kwargs): + """In case a client receives a yaml message, but yaml + isn't installed.""" + raise SerializerNotInstalled( + "No decoder installed for YAML. Install the PyYAML library") + registry.register('yaml', None, not_available, 'application/x-yaml') + + +def register_pickle(): + """The fastest serialization method, but restricts + you to python clients.""" + import cPickle + registry.register('pickle', cPickle.dumps, cPickle.loads, + content_type='application/x-python-serialize', + content_encoding='binary') + + +# Register the base serialization methods. +register_json() +register_pickle() +register_yaml() + +# JSON is assumed to always be available, so is the default. +# (this matches the historical use of carrot.) +registry._set_default_serializer('json') diff --git a/vendor/carrot/utils.py b/vendor/carrot/utils.py new file mode 100644 index 000000000000..686578bc72b9 --- /dev/null +++ b/vendor/carrot/utils.py @@ -0,0 +1,56 @@ +from uuid import UUID, uuid4, _uuid_generate_random +try: + import ctypes +except ImportError: + ctypes = None + + +def gen_unique_id(): + """Generate a unique id, having - hopefully - a very small chance of + collission. + + For now this is provided by :func:`uuid.uuid4`. + """ + # Workaround for http://bugs.python.org/issue4607 + if ctypes and _uuid_generate_random: + buffer = ctypes.create_string_buffer(16) + _uuid_generate_random(buffer) + return str(UUID(bytes=buffer.raw)) + return str(uuid4()) + + +def _compat_rl_partition(S, sep, direction=str.split): + items = direction(S, sep, 1) + if len(items) == 1: + return items[0], sep, '' + return items[0], sep, items[1] + + +def _compat_partition(S, sep): + """``partition(S, sep) -> (head, sep, tail)`` + + Search for the separator ``sep`` in ``S``, and return the part before + it, the separator itself, and the part after it. If the separator is not + found, return ``S`` and two empty strings. + + """ + return _compat_rl_partition(S, sep, direction=str.split) + + +def _compat_rpartition(S, sep): + """``rpartition(S, sep) -> (tail, sep, head)`` + + Search for the separator ``sep`` in ``S``, starting at the end of ``S``, + and return the part before it, the separator itself, and the part + after it. If the separator is not found, return two empty + strings and ``S``. + + """ + return _compat_rl_partition(S, sep, direction=str.rsplit) + +try: + partition = str.partition + rpartition = str.rpartition +except AttributeError: # Python <= 2.4 + partition = _compat_partition + rpartition = _compat_rpartition diff --git a/vendor/lockfile/2.4.diff b/vendor/lockfile/2.4.diff new file mode 100644 index 000000000000..72318d508565 --- /dev/null +++ b/vendor/lockfile/2.4.diff @@ -0,0 +1,99 @@ +Index: lockfile/sqlitelockfile.py +=================================================================== +--- lockfile/sqlitelockfile.py (revision 93) ++++ lockfile/sqlitelockfile.py (working copy) +@@ -1,9 +1,7 @@ +-from __future__ import absolute_import, division +- + import time + import os + +-from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked ++from lockfile import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked + + class SQLiteLockFile(LockBase): + "Demonstrate SQL-based locking." +Index: lockfile/__init__.py +=================================================================== +--- lockfile/__init__.py (revision 93) ++++ lockfile/__init__.py (working copy) +@@ -24,16 +24,14 @@ + >>> lock = LockFile('somefile') + >>> print lock.is_locked() + False +->>> with lock: +-... print lock.is_locked() +-True +->>> print lock.is_locked() +-False + + >>> lock = LockFile('somefile') + >>> # It is okay to lock twice from the same thread... +->>> with lock: +-... lock.acquire() ++>>> lock.acquire() ++>>> try: ++... lock.acquire() ++... finally: ++... lock.release() + ... + >>> # Though no counter is kept, so you can't unlock multiple times... + >>> print lock.is_locked() +Index: lockfile/mkdirlockfile.py +=================================================================== +--- lockfile/mkdirlockfile.py (revision 93) ++++ lockfile/mkdirlockfile.py (working copy) +@@ -1,12 +1,10 @@ +-from __future__ import absolute_import, division +- + import time + import os + import sys + import errno + +-from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, +- AlreadyLocked) ++from lockfile import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, ++ AlreadyLocked) + + class MkdirLockFile(LockBase): + """Lock file by creating a directory.""" +Index: lockfile/pidlockfile.py +=================================================================== +--- lockfile/pidlockfile.py (revision 96) ++++ lockfile/pidlockfile.py (working copy) +@@ -12,15 +12,13 @@ + """ Lockfile behaviour implemented via Unix PID files. + """ + +-from __future__ import absolute_import +- + import os + import sys + import errno + import time + +-from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, +- LockTimeout) ++from lockfile import (LockBase, AlreadyLocked, LockFailed, NotLocked, ++ NotMyLock, LockTimeout) + + + class PIDLockFile(LockBase): +Index: lockfile/linklockfile.py +=================================================================== +--- lockfile/linklockfile.py (revision 93) ++++ lockfile/linklockfile.py (working copy) +@@ -1,10 +1,8 @@ +-from __future__ import absolute_import +- + import time + import os + +-from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, +- AlreadyLocked) ++from lockfile import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, ++ AlreadyLocked) + + class LinkLockFile(LockBase): + """Lock access to a file using atomic property of link(2). diff --git a/vendor/lockfile/ACKS b/vendor/lockfile/ACKS new file mode 100644 index 000000000000..44519d17f979 --- /dev/null +++ b/vendor/lockfile/ACKS @@ -0,0 +1,6 @@ +Thanks to the following people for help with lockfile. + + Scott Dial + Ben Finney + Frank Niessink + Konstantin Veretennicov diff --git a/vendor/lockfile/LICENSE b/vendor/lockfile/LICENSE new file mode 100644 index 000000000000..610c0793f71c --- /dev/null +++ b/vendor/lockfile/LICENSE @@ -0,0 +1,21 @@ +This is the MIT license: http://www.opensource.org/licenses/mit-license.php + +Copyright (c) 2007 Skip Montanaro. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/lockfile/MANIFEST b/vendor/lockfile/MANIFEST new file mode 100644 index 000000000000..d302eef2fd2b --- /dev/null +++ b/vendor/lockfile/MANIFEST @@ -0,0 +1,19 @@ +2.4.diff +ACKS +LICENSE +MANIFEST +README +RELEASE-NOTES +setup.py +doc/Makefile +doc/conf.py +doc/glossary.rst +doc/index.rst +doc/lockfile.rst +lockfile/__init__.py +lockfile/linklockfile.py +lockfile/mkdirlockfile.py +lockfile/pidlockfile.py +lockfile/sqlitelockfile.py +test/compliancetest.py +test/test_lockfile.py diff --git a/vendor/lockfile/PKG-INFO b/vendor/lockfile/PKG-INFO new file mode 100644 index 000000000000..0a4907b01b2b --- /dev/null +++ b/vendor/lockfile/PKG-INFO @@ -0,0 +1,47 @@ +Metadata-Version: 1.0 +Name: lockfile +Version: 0.9 +Summary: Platform-independent file locking module +Home-page: http://smontanaro.dyndns.org/python/ +Author: Skip Montanaro +Author-email: skip@pobox.com +License: MIT License +Download-URL: http://smontanaro.dyndns.org/python/lockfile-0.9.tar.gz +Description: The lockfile package exports a LockFile class which provides a simple API for + locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf + and flock functions, and the deprecated posixfile module, the API is + identical across both Unix (including Linux and Mac) and Windows platforms. + The lock mechanism relies on the atomic nature of the link (on Unix) and + mkdir (on Windows) system calls. An implementation based on SQLite is also + provided, more as a demonstration of the possibilities it provides than as + production-quality code. + + Note: In version 0.9 the API changed in two significant ways: + + * It changed from a module defining several classes to a package containing + several modules, each defining a single class. + + * Where classes had been named SomethingFileLock before the last two words + have been reversed, so that class is now SomethingLockFile. + + The previous module-level definitions of LinkFileLock, MkdirFileLock and + SQLiteFileLock will be retained until the 1.0 release. + + To install: + + python setup.py install + +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: MacOS +Classifier: Operating System :: Microsoft :: Windows :: Windows NT/2000 +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.4 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/vendor/lockfile/README b/vendor/lockfile/README new file mode 100644 index 000000000000..8ef7587e6442 --- /dev/null +++ b/vendor/lockfile/README @@ -0,0 +1,23 @@ +The lockfile package exports a LockFile class which provides a simple API for +locking files. Unlike the Windows msvcrt.locking function, the fcntl.lockf +and flock functions, and the deprecated posixfile module, the API is +identical across both Unix (including Linux and Mac) and Windows platforms. +The lock mechanism relies on the atomic nature of the link (on Unix) and +mkdir (on Windows) system calls. An implementation based on SQLite is also +provided, more as a demonstration of the possibilities it provides than as +production-quality code. + +Note: In version 0.9 the API changed in two significant ways: + + * It changed from a module defining several classes to a package containing + several modules, each defining a single class. + + * Where classes had been named SomethingFileLock before the last two words + have been reversed, so that class is now SomethingLockFile. + +The previous module-level definitions of LinkFileLock, MkdirFileLock and +SQLiteFileLock will be retained until the 1.0 release. + +To install: + + python setup.py install diff --git a/vendor/lockfile/RELEASE-NOTES b/vendor/lockfile/RELEASE-NOTES new file mode 100644 index 000000000000..7240c2635826 --- /dev/null +++ b/vendor/lockfile/RELEASE-NOTES @@ -0,0 +1,42 @@ +Version 0.9 +=========== + +* The lockfile module was reorganized into a package. + +* The names of the three main classes have changed as follows: + + LinkFileLock -> LinkLockFile + MkdirFileLock -> MkdirLockFile + SQLiteFileLock -> SQLiteLockFile + +* A PIDLockFile class was added. + +Version 0.3 +=========== + +* Fix 2.4.diff file error. + +* More documentation updates. + +Version 0.2 +=========== + +* Added 2.4.diff file to patch lockfile to work with Python 2.4 (removes use + of with statement). + +* Renamed _FileLock base class to LockBase to expose it (and its docstrings) + to pydoc. + +* Got rid of time.sleep() calls in tests (thanks to Konstantin + Veretennicov). + +* Use thread.get_ident() as the thread discriminator. + +* Updated documentation a bit. + +* Added RELEASE-NOTES. + +Version 0.1 +=========== + +* First release - All basic functionality there. diff --git a/vendor/lockfile/doc/Makefile b/vendor/lockfile/doc/Makefile new file mode 100644 index 000000000000..1b1e8d28e760 --- /dev/null +++ b/vendor/lockfile/doc/Makefile @@ -0,0 +1,73 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d .build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html web pickle htmlhelp latex changes linkcheck + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " pickle to make pickle files (usable by e.g. sphinx-web)" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview over all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + +clean: + -rm -rf .build/* + +html: + mkdir -p .build/html .build/doctrees + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) .build/html + @echo + @echo "Build finished. The HTML pages are in .build/html." + +pickle: + mkdir -p .build/pickle .build/doctrees + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) .build/pickle + @echo + @echo "Build finished; now you can process the pickle files or run" + @echo " sphinx-web .build/pickle" + @echo "to start the sphinx-web server." + +web: pickle + +htmlhelp: + mkdir -p .build/htmlhelp .build/doctrees + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) .build/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in .build/htmlhelp." + +latex: + mkdir -p .build/latex .build/doctrees + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) .build/latex + @echo + @echo "Build finished; the LaTeX files are in .build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + mkdir -p .build/changes .build/doctrees + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) .build/changes + @echo + @echo "The overview file is in .build/changes." + +linkcheck: + mkdir -p .build/linkcheck .build/doctrees + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) .build/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in .build/linkcheck/output.txt." + +html.zip: html + (cd .build/html ; zip -r ../../$@ *) diff --git a/vendor/lockfile/doc/conf.py b/vendor/lockfile/doc/conf.py new file mode 100644 index 000000000000..64678e91d140 --- /dev/null +++ b/vendor/lockfile/doc/conf.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# +# lockfile documentation build configuration file, created by +# sphinx-quickstart on Sat Sep 13 17:54:17 2008. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed automatically). +# +# All configuration values have a default value; values that are commented out +# serve to show the default value. + +import sys, os + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +#sys.path.append(os.path.abspath('some/directory')) + +# General configuration +# --------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['.templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'lockfile' + +# General substitutions. +project = 'lockfile' +copyright = '2008, Skip Montanaro' + +# The default replacements for |version| and |release|, also used in various +# other places throughout the built documents. +# +# The short X.Y version. +version = '0.3' +# The full version, including alpha/beta/rc tags. +release = '0.3' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directories, that shouldn't be searched +# for source files. +#exclude_dirs = [] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# Options for HTML output +# ----------------------- + +# The style sheet to use for HTML and HTML Help pages. A file of that name +# must exist either in Sphinx' static/ path, or in one of the custom paths +# given in html_static_path. +html_style = 'default.css' + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (within the static path) to place at the top of +# the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['.static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, the reST sources are included in the HTML build as _sources/. +#html_copy_source = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'lockfiledoc' + + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class [howto/manual]). +latex_documents = [ + ('lockfile', 'lockfile.tex', 'lockfile Documentation', + 'Skip Montanaro', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/vendor/lockfile/doc/glossary.rst b/vendor/lockfile/doc/glossary.rst new file mode 100644 index 000000000000..9401d488f488 --- /dev/null +++ b/vendor/lockfile/doc/glossary.rst @@ -0,0 +1,15 @@ +.. _glossary: + +******** +Glossary +******** + +.. if you add new entries, keep the alphabetical sorting! + +.. glossary:: + + context manager + An object which controls the environment seen in a :keyword:`with` + statement by defining :meth:`__enter__` and :meth:`__exit__` methods. + See :pep:`343`. + diff --git a/vendor/lockfile/doc/index.rst b/vendor/lockfile/doc/index.rst new file mode 100644 index 000000000000..9718a6879928 --- /dev/null +++ b/vendor/lockfile/doc/index.rst @@ -0,0 +1,22 @@ +.. lockfile documentation master file, created by sphinx-quickstart on Sat Sep 13 17:54:17 2008. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to lockfile's documentation! +==================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + lockfile.rst + glossary.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/vendor/lockfile/doc/lockfile.rst b/vendor/lockfile/doc/lockfile.rst new file mode 100644 index 000000000000..4bc0a3e31c82 --- /dev/null +++ b/vendor/lockfile/doc/lockfile.rst @@ -0,0 +1,257 @@ + +:mod:`lockfile` --- Platform-independent file locking +===================================================== + +.. module:: lockfile + :synopsis: Platform-independent file locking +.. moduleauthor:: Skip Montanaro +.. sectionauthor:: Skip Montanaro + + +.. note:: + + This package is pre-release software. Between versions 0.8 and 0.9 it + was changed from a module to a package. It is quite possible that the + API and implementation will change again in important ways as people test + it and provide feedback and bug fixes. In particular, if the mkdir-based + locking scheme is sufficient for both Windows and Unix platforms, the + link-based scheme may be deleted so that only a single locking scheme is + used, providing cross-platform lockfile cooperation. + +.. note:: + + The implementation uses the :keyword:`with` statement, both in the + tests and in the main code, so will only work out-of-the-box with Python + 2.5 or later. However, the use of the :keyword:`with` statement is + minimal, so if you apply the patch in the included 2.4.diff file you can + use it with Python 2.4. It's possible that it will work in Python 2.3 + with that patch applied as well, though the doctest code relies on APIs + new in 2.4, so will have to be rewritten somewhat to allow testing on + 2.3. As they say, patches welcome. ``;-)`` + +The :mod:`lockfile` package exports a :class:`LockFile` class which provides +a simple API for locking files. Unlike the Windows :func:`msvcrt.locking` +function, the Unix :func:`fcntl.flock`, :func:`fcntl.lockf` and the +deprecated :mod:`posixfile` module, the API is identical across both Unix +(including Linux and Mac) and Windows platforms. The lock mechanism relies +on the atomic nature of the :func:`link` (on Unix) and :func:`mkdir` (On +Windows) system calls. It also contains several lock-method-specific +modules: :mod:`lockfile.linklockfile`, :mod:`lockfile.mkdirlockfile`, and +:mod:`lockfile.sqlitelockfile`, each one exporting a single class. For +backwards compatibility with versions before 0.9 the :class:`LinkFileLock`, +:class:`MkdirFileLock` and :class:`SQLiteFileLock` objects are exposed as +attributes of the top-level lockfile package, though this use was deprecated +starting with version 0.9 and will be removed in version 1.0. + +.. note:: + + The current implementation uses :func:`os.link` on Unix, but since that + function is unavailable on Windows it uses :func:`os.mkdir` there. At + this point it's not clear that using the :func:`os.mkdir` method would be + insufficient on Unix systems. If it proves to be adequate on Unix then + the implementation could be simplified and truly cross-platform locking + would be possible. + +.. note:: + + The current implementation doesn't provide for shared vs. exclusive + locks. It should be possible for multiple reader processes to hold the + lock at the same time. + +The module defines the following exceptions: + +.. exception:: Error + + This is the base class for all exceptions raised by the :class:`LockFile` + class. + +.. exception:: LockError + + This is the base class for all exceptions raised when attempting to lock + a file. + +.. exception:: UnlockError + + This is the base class for all exceptions raised when attempting to + unlock a file. + +.. exception:: LockTimeout + + This exception is raised if the :func:`LockFile.acquire` method is + called with a timeout which expires before an existing lock is released. + +.. exception:: AlreadyLocked + + This exception is raised if the :func:`LockFile.acquire` detects a + file is already locked when in non-blocking mode. + +.. exception:: LockFailed + + This exception is raised if the :func:`LockFile.acquire` detects some + other condition (such as a non-writable directory) which prevents it from + creating its lock file. + +.. exception:: NotLocked + + This exception is raised if the file is not locked when + :func:`LockFile.release` is called. + +.. exception:: NotMyLock + + This exception is raised if the file is locked by another thread or + process when :func:`LockFile.release` is called. + +The following classes are provided: + +.. class:: linklockfile.LinkLockFile(path, threaded=True) + + This class uses the :func:`link(2)` system call as the basic lock + mechanism. *path* is an object in the file system to be locked. It need + not exist, but its directory must exist and be writable at the time the + :func:`acquire` and :func:`release` methods are called. *threaded* is + optional, but when set to :const:`True` locks will be distinguished + between threads in the same process. + +.. class:: mkdirlockfile.MkdirLockFile(path, threaded=True) + + This class uses the :func:`mkdir(2)` system call as the basic lock + mechanism. The parameters have the same meaning as for the + :class:`LinkLockFile` class. + +.. class:: sqlitelockfile.SQLiteLockFile(path, threaded=True) + + This class uses the :mod:`sqlite3` module to implement the lock + mechanism. The parameters have the same meaning as for the + :class:`LinkLockFile` class. + +.. class:: LockBase(path, threaded=True) + + This is the base class for all concrete implementations and is available + at the lockfile package level so programmers can implement other locking + schemes. + +By default, the :const:`LockFile` object refers to the +:class:`mkdirlockfile.MkdirLockFile` class on Windows. On all other +platforms it refers to the :class:`linklockfile.LinkLockFile` class. + +When locking a file the :class:`linklockfile.LinkLockFile` class creates a +uniquely named hard link to an empty lock file. That hard link contains the +hostname, process id, and if locks between threads are distinguished, the +thread identifier. For example, if you want to lock access to a file named +"README", the lock file is named "README.lock". With per-thread locks +enabled the hard link is named HOSTNAME-THREADID-PID. With only per-process +locks enabled the hard link is named HOSTNAME--PID. + +When using the :class:`mkdirlockfile.MkdirLockFile` class the lock file is a +directory. Referring to the example above, README.lock will be a directory +and HOSTNAME-THREADID-PID will be an empty file within that directory. + +.. seealso:: + + Module :mod:`msvcrt` + Provides the :func:`locking` function, the standard Windows way of + locking (parts of) a file. + + Module :mod:`posixfile` + The deprecated (since Python 1.5) way of locking files on Posix systems. + + Module :mod:`fcntl` + Provides the current best way to lock files on Unix systems + (:func:`lockf` and :func:`flock`). + +LockFile Objects +---------------- + +:class:`LockFile` objects support the :term:`context manager` protocol used +by the statement:`with` statement. The timeout option is not supported when +used in this fashion. While support for timeouts could be implemented, +there is no support for handling the eventual :exc:`Timeout` exceptions +raised by the :func:`__enter__` method, so you would have to protect the +:keyword:`with` statement with a :keyword:`try` statement. The resulting +construct would not be any simpler than just using a :keyword:`try` +statement in the first place. + +:class:`LockFile` has the following user-visible methods: + +.. method:: LockFile.acquire(timeout=None) + + Lock the file associated with the :class:`LockFile` object. If the + *timeout* is omitted or :const:`None` the caller will block until the + file is unlocked by the object currently holding the lock. If the + *timeout* is zero or a negative number the :exc:`AlreadyLocked` exception + will be raised if the file is currently locked by another process or + thread. If the *timeout* is positive, the caller will block for that + many seconds waiting for the lock to be released. If the lock is not + released within that period the :exc:`LockTimeout` exception will be + raised. + +.. method:: LockFile.release() + + Unlock the file associated with the :class:`LockFile` object. If the + file is not currently locked, the :exc:`NotLocked` exception is raised. + If the file is locked by another thread or process the :exc:`NotMyLock` + exception is raised. + +.. method:: is_locked() + + Return the status of the lock on the current file. If any process or + thread (including the current one) is locking the file, :const:`True` is + returned, otherwise :const:`False` is returned. + +.. method:: break_lock() + + If the file is currently locked, break it. + +.. method:: i_am_locking() + + Returns true if the caller holds the lock. + +Examples +-------- + +This example is the "hello world" for the :mod:`lockfile` package:: + + from lockfile import LockFile + lock = LockFile("/some/file/or/other") + with lock: + print lock.path, 'is locked.' + +To use this with Python 2.4, you can execute:: + + frm lockfile import LockFile + lock = LockFile("/some/file/or/other") + lock.acquire() + print lock.path, 'is locked.' + lock.release() + +If you don't want to wait forever, you might try:: + + from lockfile import LockFile + lock = LockFile("/some/file/or/other") + while not lock.i_am_locking(): + try: + lock.acquire(timeout=60) # wait up to 60 seconds + except LockTimeout: + lock.break_lock() + lock.acquire() + print "I locked", lock.path + lock.release() + +Other Libraries +--------------- + +The idea of implementing advisory locking with a standard API is not new +with :mod:`lockfile`. There are a number of other libraries available: + +* locknix - http://pypi.python.org/pypi/locknix - Unix only +* mx.MiscLockFile - from Marc André Lemburg, part of the mx.Base + distribution - cross-platform. +* Twisted - http://twistedmatrix.com/trac/browser/trunk/twisted/python/lockfile.py +* zc.lockfile - http://pypi.python.org/pypi/zc.lockfile + + +Contacting the Author +--------------------- + +If you encounter any problems with ``lockfile``, would like help or want to +submit a patch, contact me directly: Skip Montanaro (skip@pobox.com). diff --git a/vendor/lockfile/lockfile/__init__.py b/vendor/lockfile/lockfile/__init__.py new file mode 100644 index 000000000000..33356ad8e095 --- /dev/null +++ b/vendor/lockfile/lockfile/__init__.py @@ -0,0 +1,286 @@ + +""" +lockfile.py - Platform-independent advisory file locks. + +Requires Python 2.5 unless you apply 2.4.diff +Locking is done on a per-thread basis instead of a per-process basis. + +Usage: + +>>> lock = LockFile('somefile') +>>> try: +... lock.acquire() +... except AlreadyLocked: +... print 'somefile', 'is locked already.' +... except LockFailed: +... print 'somefile', 'can\\'t be locked.' +... else: +... print 'got lock' +got lock +>>> print lock.is_locked() +True +>>> lock.release() + +>>> lock = LockFile('somefile') +>>> print lock.is_locked() +False +>>> with lock: +... print lock.is_locked() +True +>>> print lock.is_locked() +False + +>>> lock = LockFile('somefile') +>>> # It is okay to lock twice from the same thread... +>>> with lock: +... lock.acquire() +... +>>> # Though no counter is kept, so you can't unlock multiple times... +>>> print lock.is_locked() +False + +Exceptions: + + Error - base class for other exceptions + LockError - base class for all locking exceptions + AlreadyLocked - Another thread or process already holds the lock + LockFailed - Lock failed for some other reason + UnlockError - base class for all unlocking exceptions + AlreadyUnlocked - File was not locked. + NotMyLock - File was locked but not by the current thread/process +""" + +import sys +import socket +import os +import threading +import time +import urllib +import warnings + +# Work with PEP8 and non-PEP8 versions of threading module. +if not hasattr(threading, "current_thread"): + threading.current_thread = threading.currentThread +if not hasattr(threading.Thread, "get_name"): + threading.Thread.get_name = threading.Thread.getName + +__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked', + 'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock', + 'LinkLockFile', 'MkdirLockFile', 'SQLiteLockFile', + 'LockBase'] + +class Error(Exception): + """ + Base class for other exceptions. + + >>> try: + ... raise Error + ... except Exception: + ... pass + """ + pass + +class LockError(Error): + """ + Base class for error arising from attempts to acquire the lock. + + >>> try: + ... raise LockError + ... except Error: + ... pass + """ + pass + +class LockTimeout(LockError): + """Raised when lock creation fails within a user-defined period of time. + + >>> try: + ... raise LockTimeout + ... except LockError: + ... pass + """ + pass + +class AlreadyLocked(LockError): + """Some other thread/process is locking the file. + + >>> try: + ... raise AlreadyLocked + ... except LockError: + ... pass + """ + pass + +class LockFailed(LockError): + """Lock file creation failed for some other reason. + + >>> try: + ... raise LockFailed + ... except LockError: + ... pass + """ + pass + +class UnlockError(Error): + """ + Base class for errors arising from attempts to release the lock. + + >>> try: + ... raise UnlockError + ... except Error: + ... pass + """ + pass + +class NotLocked(UnlockError): + """Raised when an attempt is made to unlock an unlocked file. + + >>> try: + ... raise NotLocked + ... except UnlockError: + ... pass + """ + pass + +class NotMyLock(UnlockError): + """Raised when an attempt is made to unlock a file someone else locked. + + >>> try: + ... raise NotMyLock + ... except UnlockError: + ... pass + """ + pass + +class LockBase: + """Base class for platform-specific lock classes.""" + def __init__(self, path, threaded=True): + """ + >>> lock = LockBase('somefile') + >>> lock = LockBase('somefile', threaded=False) + """ + self.path = path + self.lock_file = os.path.abspath(path) + ".lock" + self.hostname = socket.gethostname() + self.pid = os.getpid() + if threaded: + t = threading.current_thread() + # Thread objects in Python 2.4 and earlier do not have ident + # attrs. Worm around that. + ident = getattr(t, "ident", hash(t)) + self.tname = "%x-" % (ident & 0xffffffff) + else: + self.tname = "" + dirname = os.path.dirname(self.lock_file) + self.unique_name = os.path.join(dirname, + "%s.%s%s" % (self.hostname, + self.tname, + self.pid)) + + def acquire(self, timeout=None): + """ + Acquire the lock. + + * If timeout is omitted (or None), wait forever trying to lock the + file. + + * If timeout > 0, try to acquire the lock for that many seconds. If + the lock period expires and the file is still locked, raise + LockTimeout. + + * If timeout <= 0, raise AlreadyLocked immediately if the file is + already locked. + """ + raise NotImplemented("implement in subclass") + + def release(self): + """ + Release the lock. + + If the file is not locked, raise NotLocked. + """ + raise NotImplemented("implement in subclass") + + def is_locked(self): + """ + Tell whether or not the file is locked. + """ + raise NotImplemented("implement in subclass") + + def i_am_locking(self): + """ + Return True if this object is locking the file. + """ + raise NotImplemented("implement in subclass") + + def break_lock(self): + """ + Remove a lock. Useful if a locking thread failed to unlock. + """ + raise NotImplemented("implement in subclass") + + def __enter__(self): + """ + Context manager support. + """ + self.acquire() + return self + + def __exit__(self, *_exc): + """ + Context manager support. + """ + self.release() + +def _fl_helper(cls, mod, *args, **kwds): + warnings.warn("Import from %s module instead of lockfile package" % mod, + DeprecationWarning, stacklevel=2) + # This is a bit funky, but it's only for awhile. The way the unit tests + # are constructed this function winds up as an unbound method, so it + # actually takes three args, not two. We want to toss out self. + if not isinstance(args[0], str): + # We are testing, avoid the first arg + args = args[1:] + if len(args) == 1 and not kwds: + kwds["threaded"] = True + return cls(*args, **kwds) + +def LinkFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import LinkLockFile from the + lockfile.linklockfile module. + """ + import linklockfile + return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile", + *args, **kwds) + +def MkdirFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import MkdirLockFile from the + lockfile.mkdirlockfile module. + """ + import mkdirlockfile + return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", + *args, **kwds) + +def SQLiteFileLock(*args, **kwds): + """Factory function provided for backwards compatibility. + + Do not use in new code. Instead, import SQLiteLockFile from the + lockfile.mkdirlockfile module. + """ + import sqlitelockfile + return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile", + *args, **kwds) + +if hasattr(os, "link"): + import linklockfile as _llf + LockFile = _llf.LinkLockFile +else: + import mkdirlockfile as _mlf + LockFile = _mlf.MkdirLockFile + +FileLock = LockFile + diff --git a/vendor/lockfile/lockfile/linklockfile.py b/vendor/lockfile/lockfile/linklockfile.py new file mode 100644 index 000000000000..f8aeaefcfc16 --- /dev/null +++ b/vendor/lockfile/lockfile/linklockfile.py @@ -0,0 +1,71 @@ +from __future__ import absolute_import + +import time +import os + +from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, + AlreadyLocked) + +class LinkLockFile(LockBase): + """Lock access to a file using atomic property of link(2). + + >>> lock = LinkLockFile('somefile') + >>> lock = LinkLockFile('somefile', threaded=False) + """ + + def acquire(self, timeout=None): + try: + open(self.unique_name, "wb").close() + except IOError: + raise LockFailed("failed to create %s" % self.unique_name) + + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + while True: + # Try and create a hard link to it. + try: + print 'making a hard link %s to %s' % (self.unique_name, + self.lock_file) + os.link(self.unique_name, self.lock_file) + except OSError: + # Link creation failed. Maybe we've double-locked? + nlinks = os.stat(self.unique_name).st_nlink + if nlinks == 2: + # The original link plus the one I created == 2. We're + # good to go. + return + else: + # Otherwise the lock creation failed. + if timeout is not None and time.time() > end_time: + os.unlink(self.unique_name) + if timeout > 0: + raise LockTimeout + else: + raise AlreadyLocked + time.sleep(timeout is not None and timeout/10 or 0.1) + else: + # Link creation succeeded. We're good to go. + return + + def release(self): + if not self.is_locked(): + raise NotLocked + elif not os.path.exists(self.unique_name): + raise NotMyLock + os.unlink(self.unique_name) + os.unlink(self.lock_file) + + def is_locked(self): + return os.path.exists(self.lock_file) + + def i_am_locking(self): + return (self.is_locked() and + os.path.exists(self.unique_name) and + os.stat(self.unique_name).st_nlink == 2) + + def break_lock(self): + if os.path.exists(self.lock_file): + os.unlink(self.lock_file) + diff --git a/vendor/lockfile/lockfile/mkdirlockfile.py b/vendor/lockfile/lockfile/mkdirlockfile.py new file mode 100644 index 000000000000..fb78902d51b0 --- /dev/null +++ b/vendor/lockfile/lockfile/mkdirlockfile.py @@ -0,0 +1,79 @@ +from __future__ import absolute_import, division + +import time +import os +import sys +import errno + +from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout, + AlreadyLocked) + +class MkdirLockFile(LockBase): + """Lock file by creating a directory.""" + def __init__(self, path, threaded=True): + """ + >>> lock = MkdirLockFile('somefile') + >>> lock = MkdirLockFile('somefile', threaded=False) + """ + LockBase.__init__(self, path, threaded) + # Lock file itself is a directory. Place the unique file name into + # it. + self.unique_name = os.path.join(self.lock_file, + "%s.%s%s" % (self.hostname, + self.tname, + self.pid)) + + def acquire(self, timeout=None): + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + if timeout is None: + wait = 0.1 + else: + wait = max(0, timeout / 10) + + while True: + try: + os.mkdir(self.lock_file) + except OSError: + err = sys.exc_info()[1] + if err.errno == errno.EEXIST: + # Already locked. + if os.path.exists(self.unique_name): + # Already locked by me. + return + if timeout is not None and time.time() > end_time: + if timeout > 0: + raise LockTimeout + else: + # Someone else has the lock. + raise AlreadyLocked + time.sleep(wait) + else: + # Couldn't create the lock for some other reason + raise LockFailed("failed to create %s" % self.lock_file) + else: + open(self.unique_name, "wb").close() + return + + def release(self): + if not self.is_locked(): + raise NotLocked + elif not os.path.exists(self.unique_name): + raise NotMyLock + os.unlink(self.unique_name) + os.rmdir(self.lock_file) + + def is_locked(self): + return os.path.exists(self.lock_file) + + def i_am_locking(self): + return (self.is_locked() and + os.path.exists(self.unique_name)) + + def break_lock(self): + if os.path.exists(self.lock_file): + for name in os.listdir(self.lock_file): + os.unlink(os.path.join(self.lock_file, name)) + os.rmdir(self.lock_file) diff --git a/vendor/lockfile/lockfile/pidlockfile.py b/vendor/lockfile/lockfile/pidlockfile.py new file mode 100644 index 000000000000..3fa279c14b4f --- /dev/null +++ b/vendor/lockfile/lockfile/pidlockfile.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- + +# pidlockfile.py +# +# Copyright © 2008–2009 Ben Finney +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Lockfile behaviour implemented via Unix PID files. + """ + +from __future__ import absolute_import + +import os +import sys +import errno +import time + +from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock, + LockTimeout) + + +class PIDLockFile(LockBase): + """ Lockfile implemented as a Unix PID file. + + The lock file is a normal file named by the attribute `path`. + A lock's PID file contains a single line of text, containing + the process ID (PID) of the process that acquired the lock. + + >>> lock = PIDLockFile('somefile') + >>> lock = PIDLockFile('somefile', threaded=False) + """ + + def read_pid(self): + """ Get the PID from the lock file. + """ + return read_pid_from_pidfile(self.path) + + def is_locked(self): + """ Test if the lock is currently held. + + The lock is held if the PID file for this lock exists. + + """ + return os.path.exists(self.path) + + def i_am_locking(self): + """ Test if the lock is held by the current process. + + Returns ``True`` if the current process ID matches the + number stored in the PID file. + """ + return self.is_locked() and os.getpid() == self.read_pid() + + def acquire(self, timeout=None): + """ Acquire the lock. + + Creates the PID file for this lock, or raises an error if + the lock could not be acquired. + """ + + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + while True: + try: + write_pid_to_pidfile(self.path) + except OSError, exc: + if exc.errno == errno.EEXIST: + # The lock creation failed. Maybe sleep a bit. + if timeout is not None and time.time() > end_time: + if timeout > 0: + raise LockTimeout + else: + raise AlreadyLocked + time.sleep(timeout is not None and timeout/10 or 0.1) + else: + raise LockFailed + else: + return + + def release(self): + """ Release the lock. + + Removes the PID file to release the lock, or raises an + error if the current process does not hold the lock. + + """ + if not self.is_locked(): + raise NotLocked + if not self.i_am_locking(): + raise NotMyLock + remove_existing_pidfile(self.path) + + def break_lock(self): + """ Break an existing lock. + + Removes the PID file if it already exists, otherwise does + nothing. + + """ + remove_existing_pidfile(self.path) + +def read_pid_from_pidfile(pidfile_path): + """ Read the PID recorded in the named PID file. + + Read and return the numeric PID recorded as text in the named + PID file. If the PID file cannot be read, or if the content is + not a valid PID, return ``None``. + + """ + pid = None + try: + pidfile = open(pidfile_path, 'r') + except IOError: + pass + else: + # According to the FHS 2.3 section on PID files in /var/run: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. + # + # Programs that read PID files should be somewhat flexible + # in what they accept; i.e., they should ignore extra + # whitespace, leading zeroes, absence of the trailing + # newline, or additional lines in the PID file. + + line = pidfile.readline().strip() + try: + pid = int(line) + except ValueError: + pass + pidfile.close() + + return pid + + +def write_pid_to_pidfile(pidfile_path): + """ Write the PID in the named PID file. + + Get the numeric process ID (“PIDâ€) of the current process + and write it to the named file as a line of text. + + """ + open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) + open_mode = 0x644 + pidfile_fd = os.open(pidfile_path, open_flags, open_mode) + pidfile = os.fdopen(pidfile_fd, 'w') + + # According to the FHS 2.3 section on PID files in /var/run: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. For + # example, if crond was process number 25, /var/run/crond.pid + # would contain three characters: two, five, and newline. + + pid = os.getpid() + line = "%(pid)d\n" % vars() + pidfile.write(line) + pidfile.close() + + +def remove_existing_pidfile(pidfile_path): + """ Remove the named PID file if it exists. + + Removing a PID file that doesn't already exist puts us in the + desired state, so we ignore the condition if the file does not + exist. + + """ + try: + os.remove(pidfile_path) + except OSError, exc: + if exc.errno == errno.ENOENT: + pass + else: + raise diff --git a/vendor/lockfile/lockfile/sqlitelockfile.py b/vendor/lockfile/lockfile/sqlitelockfile.py new file mode 100644 index 000000000000..9df57b04c6eb --- /dev/null +++ b/vendor/lockfile/lockfile/sqlitelockfile.py @@ -0,0 +1,142 @@ +from __future__ import absolute_import, division + +import time +import os + +from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked + +class SQLiteLockFile(LockBase): + "Demonstrate SQL-based locking." + + import tempfile + _fd, testdb = tempfile.mkstemp() + os.close(_fd) + os.unlink(testdb) + del _fd, tempfile + + def __init__(self, path, threaded=True): + """ + >>> lock = SQLiteLockFile('somefile') + >>> lock = SQLiteLockFile('somefile', threaded=False) + """ + LockBase.__init__(self, path, threaded) + self.lock_file = unicode(self.lock_file) + self.unique_name = unicode(self.unique_name) + + import sqlite3 + self.connection = sqlite3.connect(SQLiteLockFile.testdb) + + c = self.connection.cursor() + try: + c.execute("create table locks" + "(" + " lock_file varchar(32)," + " unique_name varchar(32)" + ")") + except sqlite3.OperationalError: + pass + else: + self.connection.commit() + import atexit + atexit.register(os.unlink, SQLiteLockFile.testdb) + + def acquire(self, timeout=None): + end_time = time.time() + if timeout is not None and timeout > 0: + end_time += timeout + + if timeout is None: + wait = 0.1 + elif timeout <= 0: + wait = 0 + else: + wait = timeout / 10 + + cursor = self.connection.cursor() + + while True: + if not self.is_locked(): + # Not locked. Try to lock it. + cursor.execute("insert into locks" + " (lock_file, unique_name)" + " values" + " (?, ?)", + (self.lock_file, self.unique_name)) + self.connection.commit() + + # Check to see if we are the only lock holder. + cursor.execute("select * from locks" + " where unique_name = ?", + (self.unique_name,)) + rows = cursor.fetchall() + if len(rows) > 1: + # Nope. Someone else got there. Remove our lock. + cursor.execute("delete from locks" + " where unique_name = ?", + (self.unique_name,)) + self.connection.commit() + else: + # Yup. We're done, so go home. + return + else: + # Check to see if we are the only lock holder. + cursor.execute("select * from locks" + " where unique_name = ?", + (self.unique_name,)) + rows = cursor.fetchall() + if len(rows) == 1: + # We're the locker, so go home. + return + + # Maybe we should wait a bit longer. + if timeout is not None and time.time() > end_time: + if timeout > 0: + # No more waiting. + raise LockTimeout + else: + # Someone else has the lock and we are impatient.. + raise AlreadyLocked + + # Well, okay. We'll give it a bit longer. + time.sleep(wait) + + def release(self): + if not self.is_locked(): + raise NotLocked + if not self.i_am_locking(): + raise NotMyLock((self._who_is_locking(), self.unique_name)) + cursor = self.connection.cursor() + cursor.execute("delete from locks" + " where unique_name = ?", + (self.unique_name,)) + self.connection.commit() + + def _who_is_locking(self): + cursor = self.connection.cursor() + cursor.execute("select unique_name from locks" + " where lock_file = ?", + (self.lock_file,)) + return cursor.fetchone()[0] + + def is_locked(self): + cursor = self.connection.cursor() + cursor.execute("select * from locks" + " where lock_file = ?", + (self.lock_file,)) + rows = cursor.fetchall() + return not not rows + + def i_am_locking(self): + cursor = self.connection.cursor() + cursor.execute("select * from locks" + " where lock_file = ?" + " and unique_name = ?", + (self.lock_file, self.unique_name)) + return not not cursor.fetchall() + + def break_lock(self): + cursor = self.connection.cursor() + cursor.execute("delete from locks" + " where lock_file = ?", + (self.lock_file,)) + self.connection.commit() diff --git a/vendor/lockfile/setup.py b/vendor/lockfile/setup.py new file mode 100644 index 000000000000..3e407f57aefe --- /dev/null +++ b/vendor/lockfile/setup.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +V = "0.9" + +from distutils.core import setup +setup(name='lockfile', + author='Skip Montanaro', + author_email='skip@pobox.com', + url='http://smontanaro.dyndns.org/python/', + download_url=('http://smontanaro.dyndns.org/python/lockfile-%s.tar.gz' % + V), + version=V, + description="Platform-independent file locking module", + long_description=open("README").read(), + packages=['lockfile'], + license='MIT License', + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows :: Windows NT/2000', + 'Operating System :: POSIX', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2.4', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.0', + 'Topic :: Software Development :: Libraries :: Python Modules', + ] + ) diff --git a/vendor/lockfile/test/compliancetest.py b/vendor/lockfile/test/compliancetest.py new file mode 100644 index 000000000000..f1f221075eb0 --- /dev/null +++ b/vendor/lockfile/test/compliancetest.py @@ -0,0 +1,228 @@ +import os +import threading +import shutil + +import lockfile + +class ComplianceTest(object): + def __init__(self): + self.saved_class = lockfile.LockFile + + def _testfile(self): + """Return platform-appropriate file. Helper for tests.""" + import tempfile + return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid()) + + def setup(self): + lockfile.LockFile = self.class_to_test + + def teardown(self): + tf = self._testfile() + if os.path.isdir(tf): + shutil.rmtree(tf) + elif os.path.isfile(tf): + os.unlink(tf) + lockfile.LockFile = self.saved_class + + def _test_acquire_helper(self, tbool): + # As simple as it gets. + lock = lockfile.LockFile(self._testfile(), threaded=tbool) + lock.acquire() + assert lock.is_locked() + lock.release() + assert not lock.is_locked() + + def test_acquire_basic_threaded(self): + self._test_acquire_helper(True) + + def test_acquire_basic_unthreaded(self): + self._test_acquire_helper(False) + + def _test_acquire_no_timeout_helper(self, tbool): + # No timeout test + e1, e2 = threading.Event(), threading.Event() + t = _in_thread(self._lock_wait_unlock, e1, e2) + e1.wait() # wait for thread t to acquire lock + lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) + assert lock2.is_locked() + assert not lock2.i_am_locking() + + try: + lock2.acquire(timeout=-1) + except lockfile.AlreadyLocked: + pass + else: + lock2.release() + raise AssertionError("did not raise AlreadyLocked in" + " thread %s" % + threading.current_thread().get_name()) + + e2.set() # tell thread t to release lock + t.join() + + def test_acquire_no_timeout_threaded(self): + self._test_acquire_no_timeout_helper(True) + + def test_acquire_no_timeout_unthreaded(self): + self._test_acquire_no_timeout_helper(False) + + def _test_acquire_timeout_helper(self, tbool): + # Timeout test + e1, e2 = threading.Event(), threading.Event() + t = _in_thread(self._lock_wait_unlock, e1, e2) + e1.wait() # wait for thread t to acquire lock + lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) + assert lock2.is_locked() + try: + lock2.acquire(timeout=0.1) + except lockfile.LockTimeout: + pass + else: + lock2.release() + raise AssertionError("did not raise LockTimeout in thread %s" % + threading.current_thread().get_name()) + + e2.set() + t.join() + + def test_acquire_timeout_threaded(self): + self._test_acquire_timeout_helper(True) + + def test_acquire_timeout_unthreaded(self): + self._test_acquire_timeout_helper(False) + + def _test_release_basic_helper(self, tbool): + lock = lockfile.LockFile(self._testfile(), threaded=tbool) + lock.acquire() + assert lock.is_locked() + lock.release() + assert not lock.is_locked() + assert not lock.i_am_locking() + try: + lock.release() + except lockfile.NotLocked: + pass + except lockfile.NotMyLock: + raise AssertionError('unexpected exception: %s' % + lockfile.NotMyLock) + else: + raise AssertionError('erroneously unlocked file') + + def test_release_basic_threaded(self): + self._test_release_basic_helper(True) + + def test_release_basic_unthreaded(self): + self._test_release_basic_helper(False) + + def _test_release_from_thread_helper(self, tbool): + e1, e2 = threading.Event(), threading.Event() + t = _in_thread(self._lock_wait_unlock, e1, e2) + e1.wait() + lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) + assert lock2.is_locked() + assert not lock2.i_am_locking() + try: + lock2.release() + except lockfile.NotMyLock: + pass + else: + raise AssertionError('erroneously unlocked a file locked' + ' by another thread.') + e2.set() + t.join() + + def test_release_from_thread_threaded(self): + self._test_release_from_thread_helper(True) + + def test_release_from_thread_unthreaded(self): + self._test_release_from_thread_helper(False) + + def _test_is_locked_helper(self, tbool): + lock = lockfile.LockFile(self._testfile(), threaded=tbool) + lock.acquire() + assert lock.is_locked() + lock.release() + assert not lock.is_locked() + + def test_is_locked_threaded(self): + self._test_is_locked_helper(True) + + def test_is_locked_unthreaded(self): + self._test_is_locked_helper(False) + + def test_i_am_locking(self): + lock1 = lockfile.LockFile(self._testfile(), threaded=False) + lock1.acquire() + try: + assert lock1.is_locked() + lock2 = lockfile.LockFile(self._testfile()) + try: + assert lock1.i_am_locking() + assert not lock2.i_am_locking() + try: + lock2.acquire(timeout=2) + except lockfile.LockTimeout: + lock2.break_lock() + assert not lock2.is_locked() + assert not lock1.is_locked() + lock2.acquire() + else: + raise AssertionError('expected LockTimeout...') + assert not lock1.i_am_locking() + assert lock2.i_am_locking() + finally: + if lock2.i_am_locking(): + lock2.release() + finally: + if lock1.i_am_locking(): + lock1.release() + + def _test_break_lock_helper(self, tbool): + lock = lockfile.LockFile(self._testfile(), threaded=tbool) + lock.acquire() + assert lock.is_locked() + lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) + assert lock2.is_locked() + lock2.break_lock() + assert not lock2.is_locked() + try: + lock.release() + except lockfile.NotLocked: + pass + else: + raise AssertionError('break lock failed') + + def test_break_lock_threaded(self): + self._test_break_lock_helper(True) + + def test_break_lock_unthreaded(self): + self._test_break_lock_helper(False) + + def _lock_wait_unlock(self, event1, event2): + """Lock from another thread. Helper for tests.""" + l = lockfile.LockFile(self._testfile()) + l.acquire() + try: + event1.set() # we're in, + event2.wait() # wait for boss's permission to leave + finally: + l.release() + + def test_enter(self): + lock = lockfile.LockFile(self._testfile()) + lock.acquire() + try: + assert lock.is_locked(), "Not locked after acquire!" + finally: + lock.release() + assert not lock.is_locked(), "still locked after release!" + +def _in_thread(func, *args, **kwargs): + """Execute func(*args, **kwargs) after dt seconds. Helper for tests.""" + def _f(): + func(*args, **kwargs) + t = threading.Thread(target=_f, name='/*/*') + t.setDaemon(True) + t.start() + return t + diff --git a/vendor/lockfile/test/test_lockfile.py b/vendor/lockfile/test/test_lockfile.py new file mode 100644 index 000000000000..3b70cddd318d --- /dev/null +++ b/vendor/lockfile/test/test_lockfile.py @@ -0,0 +1,30 @@ +import sys + +import lockfile.linklockfile, lockfile.mkdirlockfile, lockfile.pidlockfile + +from compliancetest import ComplianceTest + +class TestLinkLockFile(ComplianceTest): + class_to_test = lockfile.linklockfile.LinkLockFile + +class TestMkdirLockFile(ComplianceTest): + class_to_test = lockfile.mkdirlockfile.MkdirLockFile + +class TestPIDLockFile(ComplianceTest): + class_to_test = lockfile.pidlockfile.PIDLockFile + +# Check backwards compatibility +class TestLinkFileLock(ComplianceTest): + class_to_test = lockfile.LinkFileLock + +class TestMkdirFileLock(ComplianceTest): + class_to_test = lockfile.MkdirFileLock + +try: + import sqlite3 +except ImportError: + pass +else: + import lockfile.sqlitelockfile + class TestSQLiteLockFile(ComplianceTest): + class_to_test = lockfile.sqlitelockfile.SQLiteLockFile diff --git a/vendor/pymox/COPYING b/vendor/pymox/COPYING new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/pymox/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/pymox/MANIFEST.in b/vendor/pymox/MANIFEST.in new file mode 100644 index 000000000000..326bc88dc988 --- /dev/null +++ b/vendor/pymox/MANIFEST.in @@ -0,0 +1,5 @@ +include COPYING +include mox_test.py +include mox_test_helper.py +include stubout_test.py +include stubout_testee.py diff --git a/vendor/pymox/README b/vendor/pymox/README new file mode 100644 index 000000000000..5e1fe7827f57 --- /dev/null +++ b/vendor/pymox/README @@ -0,0 +1,56 @@ +Mox is an open source mock object framework for Python, inspired by +the Java library EasyMock. + +To install: + + $ python setup.py install + +To run Mox's internal tests: + + $ python mox_test.py + +Basic usage: + + import unittest + import mox + + class PersonTest(mox.MoxTestBase): + + def testUsingMox(self): + # Create a mock Person + mock_person = self.mox.CreateMock(Person) + + test_person = ... + test_primary_key = ... + unknown_person = ... + + # Expect InsertPerson to be called with test_person; return + # test_primary_key at that point + mock_person.InsertPerson(test_person).AndReturn(test_primary_key) + + # Raise an exception when this is called + mock_person.DeletePerson(unknown_person).AndRaise(UnknownPersonError()) + + # Switch from record mode to replay mode + self.mox.ReplayAll() + + # Run the test + ret_pk = mock_person.InsertPerson(test_person) + self.assertEquals(test_primary_key, ret_pk) + self.assertRaises(UnknownPersonError, mock_person, unknown_person) + +For more documentation, see: + + http://code.google.com/p/pymox/wiki/MoxDocumentation + +For more information, see: + + http://code.google.com/p/pymox/ + +Our user and developer discussion group is: + + http://groups.google.com/group/mox-discuss + +Mox is Copyright 2008 Google Inc, and licensed under the Apache +License, Version 2.0; see the file COPYING for details. If you would +like to help us improve Mox, join the group. diff --git a/vendor/pymox/mox.py b/vendor/pymox/mox.py new file mode 100755 index 000000000000..ba1e09560c5e --- /dev/null +++ b/vendor/pymox/mox.py @@ -0,0 +1,1729 @@ +#!/usr/bin/python2.4 +# +# Copyright 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mox, an object-mocking framework for Python. + +Mox works in the record-replay-verify paradigm. When you first create +a mock object, it is in record mode. You then programmatically set +the expected behavior of the mock object (what methods are to be +called on it, with what parameters, what they should return, and in +what order). + +Once you have set up the expected mock behavior, you put it in replay +mode. Now the mock responds to method calls just as you told it to. +If an unexpected method (or an expected method with unexpected +parameters) is called, then an exception will be raised. + +Once you are done interacting with the mock, you need to verify that +all the expected interactions occured. (Maybe your code exited +prematurely without calling some cleanup method!) The verify phase +ensures that every expected method was called; otherwise, an exception +will be raised. + +WARNING! Mock objects created by Mox are not thread-safe. If you are +call a mock in multiple threads, it should be guarded by a mutex. + +TODO(stevepm): Add the option to make mocks thread-safe! + +Suggested usage / workflow: + + # Create Mox factory + my_mox = Mox() + + # Create a mock data access object + mock_dao = my_mox.CreateMock(DAOClass) + + # Set up expected behavior + mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person) + mock_dao.DeletePerson(person) + + # Put mocks in replay mode + my_mox.ReplayAll() + + # Inject mock object and run test + controller.SetDao(mock_dao) + controller.DeletePersonById('1') + + # Verify all methods were called as expected + my_mox.VerifyAll() +""" + +from collections import deque +import difflib +import inspect +import re +import types +import unittest + +import stubout + +class Error(AssertionError): + """Base exception for this module.""" + + pass + + +class ExpectedMethodCallsError(Error): + """Raised when Verify() is called before all expected methods have been called + """ + + def __init__(self, expected_methods): + """Init exception. + + Args: + # expected_methods: A sequence of MockMethod objects that should have been + # called. + expected_methods: [MockMethod] + + Raises: + ValueError: if expected_methods contains no methods. + """ + + if not expected_methods: + raise ValueError("There must be at least one expected method") + Error.__init__(self) + self._expected_methods = expected_methods + + def __str__(self): + calls = "\n".join(["%3d. %s" % (i, m) + for i, m in enumerate(self._expected_methods)]) + return "Verify: Expected methods never called:\n%s" % (calls,) + + +class UnexpectedMethodCallError(Error): + """Raised when an unexpected method is called. + + This can occur if a method is called with incorrect parameters, or out of the + specified order. + """ + + def __init__(self, unexpected_method, expected): + """Init exception. + + Args: + # unexpected_method: MockMethod that was called but was not at the head of + # the expected_method queue. + # expected: MockMethod or UnorderedGroup the method should have + # been in. + unexpected_method: MockMethod + expected: MockMethod or UnorderedGroup + """ + + Error.__init__(self) + if expected is None: + self._str = "Unexpected method call %s" % (unexpected_method,) + else: + differ = difflib.Differ() + diff = differ.compare(str(unexpected_method).splitlines(True), + str(expected).splitlines(True)) + self._str = ("Unexpected method call. unexpected:- expected:+\n%s" + % ("\n".join(diff),)) + + def __str__(self): + return self._str + + +class UnknownMethodCallError(Error): + """Raised if an unknown method is requested of the mock object.""" + + def __init__(self, unknown_method_name): + """Init exception. + + Args: + # unknown_method_name: Method call that is not part of the mocked class's + # public interface. + unknown_method_name: str + """ + + Error.__init__(self) + self._unknown_method_name = unknown_method_name + + def __str__(self): + return "Method called is not a member of the object: %s" % \ + self._unknown_method_name + + +class PrivateAttributeError(Error): + """ + Raised if a MockObject is passed a private additional attribute name. + """ + + def __init__(self, attr): + Error.__init__(self) + self._attr = attr + + def __str__(self): + return ("Attribute '%s' is private and should not be available in a mock " + "object." % attr) + +class Mox(object): + """Mox: a factory for creating mock objects.""" + + # A list of types that should be stubbed out with MockObjects (as + # opposed to MockAnythings). + _USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType, + types.ObjectType, types.TypeType] + + def __init__(self): + """Initialize a new Mox.""" + + self._mock_objects = [] + self.stubs = stubout.StubOutForTesting() + + def CreateMock(self, class_to_mock, attrs={}): + """Create a new mock object. + + Args: + # class_to_mock: the class to be mocked + class_to_mock: class + attrs: dict of attribute names to values that will be set on the mock + object. Only public attributes may be set. + + Returns: + MockObject that can be used as the class_to_mock would be. + """ + new_mock = MockObject(class_to_mock, attrs=attrs) + self._mock_objects.append(new_mock) + return new_mock + + def CreateMockAnything(self, description=None): + """Create a mock that will accept any method calls. + + This does not enforce an interface. + + Args: + description: str. Optionally, a descriptive name for the mock object being + created, for debugging output purposes. + """ + new_mock = MockAnything(description=description) + self._mock_objects.append(new_mock) + return new_mock + + def ReplayAll(self): + """Set all mock objects to replay mode.""" + + for mock_obj in self._mock_objects: + mock_obj._Replay() + + + def VerifyAll(self): + """Call verify on all mock objects created.""" + + for mock_obj in self._mock_objects: + mock_obj._Verify() + + def ResetAll(self): + """Call reset on all mock objects. This does not unset stubs.""" + + for mock_obj in self._mock_objects: + mock_obj._Reset() + + def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): + """Replace a method, attribute, etc. with a Mock. + + This will replace a class or module with a MockObject, and everything else + (method, function, etc) with a MockAnything. This can be overridden to + always use a MockAnything by setting use_mock_anything to True. + + Args: + obj: A Python object (class, module, instance, callable). + attr_name: str. The name of the attribute to replace with a mock. + use_mock_anything: bool. True if a MockAnything should be used regardless + of the type of attribute. + """ + + attr_to_replace = getattr(obj, attr_name) + + # Check for a MockAnything. This could cause confusing problems later on. + if attr_to_replace == MockAnything(): + raise TypeError('Cannot mock a MockAnything! Did you remember to ' + 'call UnsetStubs in your previous test?') + + if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything: + stub = self.CreateMock(attr_to_replace) + else: + stub = self.CreateMockAnything(description='Stub for %s' % attr_to_replace) + + self.stubs.Set(obj, attr_name, stub) + + def UnsetStubs(self): + """Restore stubs to their original state.""" + + self.stubs.UnsetAll() + +def Replay(*args): + """Put mocks into Replay mode. + + Args: + # args is any number of mocks to put into replay mode. + """ + + for mock in args: + mock._Replay() + + +def Verify(*args): + """Verify mocks. + + Args: + # args is any number of mocks to be verified. + """ + + for mock in args: + mock._Verify() + + +def Reset(*args): + """Reset mocks. + + Args: + # args is any number of mocks to be reset. + """ + + for mock in args: + mock._Reset() + + +class MockAnything: + """A mock that can be used to mock anything. + + This is helpful for mocking classes that do not provide a public interface. + """ + + def __init__(self, description=None): + """Initialize a new MockAnything. + + Args: + description: str. Optionally, a descriptive name for the mock object being + created, for debugging output purposes. + """ + self._description = description + self._Reset() + + def __str__(self): + return "" % id(self) + + def __repr__(self): + return '' + + def __getattr__(self, method_name): + """Intercept method calls on this object. + + A new MockMethod is returned that is aware of the MockAnything's + state (record or replay). The call will be recorded or replayed + by the MockMethod's __call__. + + Args: + # method name: the name of the method being called. + method_name: str + + Returns: + A new MockMethod aware of MockAnything's state (record or replay). + """ + + return self._CreateMockMethod(method_name) + + def _CreateMockMethod(self, method_name, method_to_mock=None): + """Create a new mock method call and return it. + + Args: + # method_name: the name of the method being called. + # method_to_mock: The actual method being mocked, used for introspection. + method_name: str + method_to_mock: a method object + + Returns: + A new MockMethod aware of MockAnything's state (record or replay). + """ + + return MockMethod(method_name, self._expected_calls_queue, + self._replay_mode, method_to_mock=method_to_mock, + description=self._description) + + def __nonzero__(self): + """Return 1 for nonzero so the mock can be used as a conditional.""" + + return 1 + + def __eq__(self, rhs): + """Provide custom logic to compare objects.""" + + return (isinstance(rhs, MockAnything) and + self._replay_mode == rhs._replay_mode and + self._expected_calls_queue == rhs._expected_calls_queue) + + def __ne__(self, rhs): + """Provide custom logic to compare objects.""" + + return not self == rhs + + def _Replay(self): + """Start replaying expected method calls.""" + + self._replay_mode = True + + def _Verify(self): + """Verify that all of the expected calls have been made. + + Raises: + ExpectedMethodCallsError: if there are still more method calls in the + expected queue. + """ + + # If the list of expected calls is not empty, raise an exception + if self._expected_calls_queue: + # The last MultipleTimesGroup is not popped from the queue. + if (len(self._expected_calls_queue) == 1 and + isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and + self._expected_calls_queue[0].IsSatisfied()): + pass + else: + raise ExpectedMethodCallsError(self._expected_calls_queue) + + def _Reset(self): + """Reset the state of this mock to record mode with an empty queue.""" + + # Maintain a list of method calls we are expecting + self._expected_calls_queue = deque() + + # Make sure we are in setup mode, not replay mode + self._replay_mode = False + + +class MockObject(MockAnything, object): + """A mock object that simulates the public/protected interface of a class.""" + + def __init__(self, class_to_mock, attrs={}): + """Initialize a mock object. + + This determines the methods and properties of the class and stores them. + + Args: + # class_to_mock: class to be mocked + class_to_mock: class + attrs: dict of attribute names to values that will be set on the mock + object. Only public attributes may be set. + + Raises: + PrivateAttributeError: if a supplied attribute is not public. + ValueError: if an attribute would mask an existing method. + """ + + # This is used to hack around the mixin/inheritance of MockAnything, which + # is not a proper object (it can be anything. :-) + MockAnything.__dict__['__init__'](self) + + # Get a list of all the public and special methods we should mock. + self._known_methods = set() + self._known_vars = set() + self._class_to_mock = class_to_mock + try: + self._description = class_to_mock.__name__ + # If class_to_mock is a mock itself, then we'll get an UnknownMethodCall + # error here from the underlying call to __getattr__('__name__') + except (UnknownMethodCallError, AttributeError): + try: + self._description = type(class_to_mock).__name__ + except AttributeError: + pass + + for method in dir(class_to_mock): + if callable(getattr(class_to_mock, method)): + self._known_methods.add(method) + else: + self._known_vars.add(method) + + # Set additional attributes at instantiation time; this is quicker + # than manually setting attributes that are normally created in + # __init__. + for attr, value in attrs.items(): + if attr.startswith("_"): + raise PrivateAttributeError(attr) + elif attr in self._known_methods: + raise ValueError("'%s' is a method of '%s' objects." % (attr, + class_to_mock)) + else: + setattr(self, attr, value) + + def __getattr__(self, name): + """Intercept attribute request on this object. + + If the attribute is a public class variable, it will be returned and not + recorded as a call. + + If the attribute is not a variable, it is handled like a method + call. The method name is checked against the set of mockable + methods, and a new MockMethod is returned that is aware of the + MockObject's state (record or replay). The call will be recorded + or replayed by the MockMethod's __call__. + + Args: + # name: the name of the attribute being requested. + name: str + + Returns: + Either a class variable or a new MockMethod that is aware of the state + of the mock (record or replay). + + Raises: + UnknownMethodCallError if the MockObject does not mock the requested + method. + """ + + if name in self._known_vars: + return getattr(self._class_to_mock, name) + + if name in self._known_methods: + return self._CreateMockMethod( + name, + method_to_mock=getattr(self._class_to_mock, name)) + + raise UnknownMethodCallError(name) + + def __eq__(self, rhs): + """Provide custom logic to compare objects.""" + + return (isinstance(rhs, MockObject) and + self._class_to_mock == rhs._class_to_mock and + self._replay_mode == rhs._replay_mode and + self._expected_calls_queue == rhs._expected_calls_queue) + + def __setitem__(self, key, value): + """Provide custom logic for mocking classes that support item assignment. + + Args: + key: Key to set the value for. + value: Value to set. + + Returns: + Expected return value in replay mode. A MockMethod object for the + __setitem__ method that has already been called if not in replay mode. + + Raises: + TypeError if the underlying class does not support item assignment. + UnexpectedMethodCallError if the object does not expect the call to + __setitem__. + + """ + # Verify the class supports item assignment. + if '__setitem__' not in dir(self._class_to_mock): + raise TypeError('object does not support item assignment') + + # If we are in replay mode then simply call the mock __setitem__ method. + if self._replay_mode: + return MockMethod('__setitem__', self._expected_calls_queue, + self._replay_mode)(key, value) + + + # Otherwise, create a mock method __setitem__. + return self._CreateMockMethod('__setitem__')(key, value) + + def __getitem__(self, key): + """Provide custom logic for mocking classes that are subscriptable. + + Args: + key: Key to return the value for. + + Returns: + Expected return value in replay mode. A MockMethod object for the + __getitem__ method that has already been called if not in replay mode. + + Raises: + TypeError if the underlying class is not subscriptable. + UnexpectedMethodCallError if the object does not expect the call to + __getitem__. + + """ + # Verify the class supports item assignment. + if '__getitem__' not in dir(self._class_to_mock): + raise TypeError('unsubscriptable object') + + # If we are in replay mode then simply call the mock __getitem__ method. + if self._replay_mode: + return MockMethod('__getitem__', self._expected_calls_queue, + self._replay_mode)(key) + + + # Otherwise, create a mock method __getitem__. + return self._CreateMockMethod('__getitem__')(key) + + def __iter__(self): + """Provide custom logic for mocking classes that are iterable. + + Returns: + Expected return value in replay mode. A MockMethod object for the + __iter__ method that has already been called if not in replay mode. + + Raises: + TypeError if the underlying class is not iterable. + UnexpectedMethodCallError if the object does not expect the call to + __iter__. + + """ + methods = dir(self._class_to_mock) + + # Verify the class supports iteration. + if '__iter__' not in methods: + # If it doesn't have iter method and we are in replay method, then try to + # iterate using subscripts. + if '__getitem__' not in methods or not self._replay_mode: + raise TypeError('not iterable object') + else: + results = [] + index = 0 + try: + while True: + results.append(self[index]) + index += 1 + except IndexError: + return iter(results) + + # If we are in replay mode then simply call the mock __iter__ method. + if self._replay_mode: + return MockMethod('__iter__', self._expected_calls_queue, + self._replay_mode)() + + + # Otherwise, create a mock method __iter__. + return self._CreateMockMethod('__iter__')() + + + def __contains__(self, key): + """Provide custom logic for mocking classes that contain items. + + Args: + key: Key to look in container for. + + Returns: + Expected return value in replay mode. A MockMethod object for the + __contains__ method that has already been called if not in replay mode. + + Raises: + TypeError if the underlying class does not implement __contains__ + UnexpectedMethodCaller if the object does not expect the call to + __contains__. + + """ + contains = self._class_to_mock.__dict__.get('__contains__', None) + + if contains is None: + raise TypeError('unsubscriptable object') + + if self._replay_mode: + return MockMethod('__contains__', self._expected_calls_queue, + self._replay_mode)(key) + + return self._CreateMockMethod('__contains__')(key) + + def __call__(self, *params, **named_params): + """Provide custom logic for mocking classes that are callable.""" + + # Verify the class we are mocking is callable. + callable = hasattr(self._class_to_mock, '__call__') + if not callable: + raise TypeError('Not callable') + + # Because the call is happening directly on this object instead of a method, + # the call on the mock method is made right here + mock_method = self._CreateMockMethod('__call__') + return mock_method(*params, **named_params) + + @property + def __class__(self): + """Return the class that is being mocked.""" + + return self._class_to_mock + + +class MethodCallChecker(object): + """Ensures that methods are called correctly.""" + + _NEEDED, _DEFAULT, _GIVEN = range(3) + + def __init__(self, method): + """Creates a checker. + + Args: + # method: A method to check. + method: function + + Raises: + ValueError: method could not be inspected, so checks aren't possible. + Some methods and functions like built-ins can't be inspected. + """ + try: + self._args, varargs, varkw, defaults = inspect.getargspec(method) + except TypeError: + raise ValueError('Could not get argument specification for %r' + % (method,)) + if inspect.ismethod(method): + self._args = self._args[1:] # Skip 'self'. + self._method = method + + self._has_varargs = varargs is not None + self._has_varkw = varkw is not None + if defaults is None: + self._required_args = self._args + self._default_args = [] + else: + self._required_args = self._args[:-len(defaults)] + self._default_args = self._args[-len(defaults):] + + def _RecordArgumentGiven(self, arg_name, arg_status): + """Mark an argument as being given. + + Args: + # arg_name: The name of the argument to mark in arg_status. + # arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN. + arg_name: string + arg_status: dict + + Raises: + AttributeError: arg_name is already marked as _GIVEN. + """ + if arg_status.get(arg_name, None) == MethodCallChecker._GIVEN: + raise AttributeError('%s provided more than once' % (arg_name,)) + arg_status[arg_name] = MethodCallChecker._GIVEN + + def Check(self, params, named_params): + """Ensures that the parameters used while recording a call are valid. + + Args: + # params: A list of positional parameters. + # named_params: A dict of named parameters. + params: list + named_params: dict + + Raises: + AttributeError: the given parameters don't work with the given method. + """ + arg_status = dict((a, MethodCallChecker._NEEDED) + for a in self._required_args) + for arg in self._default_args: + arg_status[arg] = MethodCallChecker._DEFAULT + + # Check that each positional param is valid. + for i in range(len(params)): + try: + arg_name = self._args[i] + except IndexError: + if not self._has_varargs: + raise AttributeError('%s does not take %d or more positional ' + 'arguments' % (self._method.__name__, i)) + else: + self._RecordArgumentGiven(arg_name, arg_status) + + # Check each keyword argument. + for arg_name in named_params: + if arg_name not in arg_status and not self._has_varkw: + raise AttributeError('%s is not expecting keyword argument %s' + % (self._method.__name__, arg_name)) + self._RecordArgumentGiven(arg_name, arg_status) + + # Ensure all the required arguments have been given. + still_needed = [k for k, v in arg_status.iteritems() + if v == MethodCallChecker._NEEDED] + if still_needed: + raise AttributeError('No values given for arguments %s' + % (' '.join(sorted(still_needed)))) + + +class MockMethod(object): + """Callable mock method. + + A MockMethod should act exactly like the method it mocks, accepting parameters + and returning a value, or throwing an exception (as specified). When this + method is called, it can optionally verify whether the called method (name and + signature) matches the expected method. + """ + + def __init__(self, method_name, call_queue, replay_mode, + method_to_mock=None, description=None): + """Construct a new mock method. + + Args: + # method_name: the name of the method + # call_queue: deque of calls, verify this call against the head, or add + # this call to the queue. + # replay_mode: False if we are recording, True if we are verifying calls + # against the call queue. + # method_to_mock: The actual method being mocked, used for introspection. + # description: optionally, a descriptive name for this method. Typically + # this is equal to the descriptive name of the method's class. + method_name: str + call_queue: list or deque + replay_mode: bool + method_to_mock: a method object + description: str or None + """ + + self._name = method_name + self.__name__ = method_name + self._call_queue = call_queue + if not isinstance(call_queue, deque): + self._call_queue = deque(self._call_queue) + self._replay_mode = replay_mode + self._description = description + + self._params = None + self._named_params = None + self._return_value = None + self._exception = None + self._side_effects = None + + try: + self._checker = MethodCallChecker(method_to_mock) + except ValueError: + self._checker = None + + def __call__(self, *params, **named_params): + """Log parameters and return the specified return value. + + If the Mock(Anything/Object) associated with this call is in record mode, + this MockMethod will be pushed onto the expected call queue. If the mock + is in replay mode, this will pop a MockMethod off the top of the queue and + verify this call is equal to the expected call. + + Raises: + UnexpectedMethodCall if this call is supposed to match an expected method + call and it does not. + """ + + self._params = params + self._named_params = named_params + + if not self._replay_mode: + if self._checker is not None: + self._checker.Check(params, named_params) + self._call_queue.append(self) + return self + + expected_method = self._VerifyMethodCall() + + if expected_method._side_effects: + result = expected_method._side_effects(*params, **named_params) + if expected_method._return_value is None: + expected_method._return_value = result + + if expected_method._exception: + raise expected_method._exception + + return expected_method._return_value + + def __getattr__(self, name): + """Raise an AttributeError with a helpful message.""" + + raise AttributeError('MockMethod has no attribute "%s". ' + 'Did you remember to put your mocks in replay mode?' % name) + + def __iter__(self): + """Raise a TypeError with a helpful message.""" + raise TypeError('MockMethod cannot be iterated. ' + 'Did you remember to put your mocks in replay mode?') + + def next(self): + """Raise a TypeError with a helpful message.""" + raise TypeError('MockMethod cannot be iterated. ' + 'Did you remember to put your mocks in replay mode?') + + def _PopNextMethod(self): + """Pop the next method from our call queue.""" + try: + return self._call_queue.popleft() + except IndexError: + raise UnexpectedMethodCallError(self, None) + + def _VerifyMethodCall(self): + """Verify the called method is expected. + + This can be an ordered method, or part of an unordered set. + + Returns: + The expected mock method. + + Raises: + UnexpectedMethodCall if the method called was not expected. + """ + + expected = self._PopNextMethod() + + # Loop here, because we might have a MethodGroup followed by another + # group. + while isinstance(expected, MethodGroup): + expected, method = expected.MethodCalled(self) + if method is not None: + return method + + # This is a mock method, so just check equality. + if expected != self: + raise UnexpectedMethodCallError(self, expected) + + return expected + + def __str__(self): + params = ', '.join( + [repr(p) for p in self._params or []] + + ['%s=%r' % x for x in sorted((self._named_params or {}).items())]) + full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value) + if self._description: + full_desc = "%s.%s" % (self._description, full_desc) + return full_desc + + def __eq__(self, rhs): + """Test whether this MockMethod is equivalent to another MockMethod. + + Args: + # rhs: the right hand side of the test + rhs: MockMethod + """ + + return (isinstance(rhs, MockMethod) and + self._name == rhs._name and + self._params == rhs._params and + self._named_params == rhs._named_params) + + def __ne__(self, rhs): + """Test whether this MockMethod is not equivalent to another MockMethod. + + Args: + # rhs: the right hand side of the test + rhs: MockMethod + """ + + return not self == rhs + + def GetPossibleGroup(self): + """Returns a possible group from the end of the call queue or None if no + other methods are on the stack. + """ + + # Remove this method from the tail of the queue so we can add it to a group. + this_method = self._call_queue.pop() + assert this_method == self + + # Determine if the tail of the queue is a group, or just a regular ordered + # mock method. + group = None + try: + group = self._call_queue[-1] + except IndexError: + pass + + return group + + def _CheckAndCreateNewGroup(self, group_name, group_class): + """Checks if the last method (a possible group) is an instance of our + group_class. Adds the current method to this group or creates a new one. + + Args: + + group_name: the name of the group. + group_class: the class used to create instance of this new group + """ + group = self.GetPossibleGroup() + + # If this is a group, and it is the correct group, add the method. + if isinstance(group, group_class) and group.group_name() == group_name: + group.AddMethod(self) + return self + + # Create a new group and add the method. + new_group = group_class(group_name) + new_group.AddMethod(self) + self._call_queue.append(new_group) + return self + + def InAnyOrder(self, group_name="default"): + """Move this method into a group of unordered calls. + + A group of unordered calls must be defined together, and must be executed + in full before the next expected method can be called. There can be + multiple groups that are expected serially, if they are given + different group names. The same group name can be reused if there is a + standard method call, or a group with a different name, spliced between + usages. + + Args: + group_name: the name of the unordered group. + + Returns: + self + """ + return self._CheckAndCreateNewGroup(group_name, UnorderedGroup) + + def MultipleTimes(self, group_name="default"): + """Move this method into group of calls which may be called multiple times. + + A group of repeating calls must be defined together, and must be executed in + full before the next expected mehtod can be called. + + Args: + group_name: the name of the unordered group. + + Returns: + self + """ + return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup) + + def AndReturn(self, return_value): + """Set the value to return when this method is called. + + Args: + # return_value can be anything. + """ + + self._return_value = return_value + return return_value + + def AndRaise(self, exception): + """Set the exception to raise when this method is called. + + Args: + # exception: the exception to raise when this method is called. + exception: Exception + """ + + self._exception = exception + + def WithSideEffects(self, side_effects): + """Set the side effects that are simulated when this method is called. + + Args: + side_effects: A callable which modifies the parameters or other relevant + state which a given test case depends on. + + Returns: + Self for chaining with AndReturn and AndRaise. + """ + self._side_effects = side_effects + return self + +class Comparator: + """Base class for all Mox comparators. + + A Comparator can be used as a parameter to a mocked method when the exact + value is not known. For example, the code you are testing might build up a + long SQL string that is passed to your mock DAO. You're only interested that + the IN clause contains the proper primary keys, so you can set your mock + up as follows: + + mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) + + Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'. + + A Comparator may replace one or more parameters, for example: + # return at most 10 rows + mock_dao.RunQuery(StrContains('SELECT'), 10) + + or + + # Return some non-deterministic number of rows + mock_dao.RunQuery(StrContains('SELECT'), IsA(int)) + """ + + def equals(self, rhs): + """Special equals method that all comparators must implement. + + Args: + rhs: any python object + """ + + raise NotImplementedError, 'method must be implemented by a subclass.' + + def __eq__(self, rhs): + return self.equals(rhs) + + def __ne__(self, rhs): + return not self.equals(rhs) + + +class IsA(Comparator): + """This class wraps a basic Python type or class. It is used to verify + that a parameter is of the given type or class. + + Example: + mock_dao.Connect(IsA(DbConnectInfo)) + """ + + def __init__(self, class_name): + """Initialize IsA + + Args: + class_name: basic python type or a class + """ + + self._class_name = class_name + + def equals(self, rhs): + """Check to see if the RHS is an instance of class_name. + + Args: + # rhs: the right hand side of the test + rhs: object + + Returns: + bool + """ + + try: + return isinstance(rhs, self._class_name) + except TypeError: + # Check raw types if there was a type error. This is helpful for + # things like cStringIO.StringIO. + return type(rhs) == type(self._class_name) + + def __repr__(self): + return str(self._class_name) + +class IsAlmost(Comparator): + """Comparison class used to check whether a parameter is nearly equal + to a given value. Generally useful for floating point numbers. + + Example mock_dao.SetTimeout((IsAlmost(3.9))) + """ + + def __init__(self, float_value, places=7): + """Initialize IsAlmost. + + Args: + float_value: The value for making the comparison. + places: The number of decimal places to round to. + """ + + self._float_value = float_value + self._places = places + + def equals(self, rhs): + """Check to see if RHS is almost equal to float_value + + Args: + rhs: the value to compare to float_value + + Returns: + bool + """ + + try: + return round(rhs-self._float_value, self._places) == 0 + except TypeError: + # This is probably because either float_value or rhs is not a number. + return False + + def __repr__(self): + return str(self._float_value) + +class StrContains(Comparator): + """Comparison class used to check whether a substring exists in a + string parameter. This can be useful in mocking a database with SQL + passed in as a string parameter, for example. + + Example: + mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) + """ + + def __init__(self, search_string): + """Initialize. + + Args: + # search_string: the string you are searching for + search_string: str + """ + + self._search_string = search_string + + def equals(self, rhs): + """Check to see if the search_string is contained in the rhs string. + + Args: + # rhs: the right hand side of the test + rhs: object + + Returns: + bool + """ + + try: + return rhs.find(self._search_string) > -1 + except Exception: + return False + + def __repr__(self): + return '' % self._search_string + + +class Regex(Comparator): + """Checks if a string matches a regular expression. + + This uses a given regular expression to determine equality. + """ + + def __init__(self, pattern, flags=0): + """Initialize. + + Args: + # pattern is the regular expression to search for + pattern: str + # flags passed to re.compile function as the second argument + flags: int + """ + + self.regex = re.compile(pattern, flags=flags) + + def equals(self, rhs): + """Check to see if rhs matches regular expression pattern. + + Returns: + bool + """ + + return self.regex.search(rhs) is not None + + def __repr__(self): + s = '' % self._key + + +class Not(Comparator): + """Checks whether a predicates is False. + + Example: + mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info))) + """ + + def __init__(self, predicate): + """Initialize. + + Args: + # predicate: a Comparator instance. + """ + + assert isinstance(predicate, Comparator), ("predicate %r must be a" + " Comparator." % predicate) + self._predicate = predicate + + def equals(self, rhs): + """Check to see whether the predicate is False. + + Args: + rhs: A value that will be given in argument of the predicate. + + Returns: + bool + """ + + return not self._predicate.equals(rhs) + + def __repr__(self): + return '' % self._predicate + + +class ContainsKeyValue(Comparator): + """Checks whether a key/value pair is in a dict parameter. + + Example: + mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info)) + """ + + def __init__(self, key, value): + """Initialize. + + Args: + # key: a key in a dict + # value: the corresponding value + """ + + self._key = key + self._value = value + + def equals(self, rhs): + """Check whether the given key/value pair is in the rhs dict. + + Returns: + bool + """ + + try: + return rhs[self._key] == self._value + except Exception: + return False + + def __repr__(self): + return '' % (self._key, self._value) + + +class ContainsAttributeValue(Comparator): + """Checks whether a passed parameter contains attributes with a given value. + + Example: + mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info)) + """ + + def __init__(self, key, value): + """Initialize. + + Args: + # key: an attribute name of an object + # value: the corresponding value + """ + + self._key = key + self._value = value + + def equals(self, rhs): + """Check whether the given attribute has a matching value in the rhs object. + + Returns: + bool + """ + + try: + return getattr(rhs, self._key) == self._value + except Exception: + return False + + +class SameElementsAs(Comparator): + """Checks whether iterables contain the same elements (ignoring order). + + Example: + mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki')) + """ + + def __init__(self, expected_seq): + """Initialize. + + Args: + expected_seq: a sequence + """ + + self._expected_seq = expected_seq + + def equals(self, actual_seq): + """Check to see whether actual_seq has same elements as expected_seq. + + Args: + actual_seq: sequence + + Returns: + bool + """ + + try: + expected = dict([(element, None) for element in self._expected_seq]) + actual = dict([(element, None) for element in actual_seq]) + except TypeError: + # Fall back to slower list-compare if any of the objects are unhashable. + expected = list(self._expected_seq) + actual = list(actual_seq) + expected.sort() + actual.sort() + return expected == actual + + def __repr__(self): + return '' % self._expected_seq + + +class And(Comparator): + """Evaluates one or more Comparators on RHS and returns an AND of the results. + """ + + def __init__(self, *args): + """Initialize. + + Args: + *args: One or more Comparator + """ + + self._comparators = args + + def equals(self, rhs): + """Checks whether all Comparators are equal to rhs. + + Args: + # rhs: can be anything + + Returns: + bool + """ + + for comparator in self._comparators: + if not comparator.equals(rhs): + return False + + return True + + def __repr__(self): + return '' % str(self._comparators) + + +class Or(Comparator): + """Evaluates one or more Comparators on RHS and returns an OR of the results. + """ + + def __init__(self, *args): + """Initialize. + + Args: + *args: One or more Mox comparators + """ + + self._comparators = args + + def equals(self, rhs): + """Checks whether any Comparator is equal to rhs. + + Args: + # rhs: can be anything + + Returns: + bool + """ + + for comparator in self._comparators: + if comparator.equals(rhs): + return True + + return False + + def __repr__(self): + return '' % str(self._comparators) + + +class Func(Comparator): + """Call a function that should verify the parameter passed in is correct. + + You may need the ability to perform more advanced operations on the parameter + in order to validate it. You can use this to have a callable validate any + parameter. The callable should return either True or False. + + + Example: + + def myParamValidator(param): + # Advanced logic here + return True + + mock_dao.DoSomething(Func(myParamValidator), true) + """ + + def __init__(self, func): + """Initialize. + + Args: + func: callable that takes one parameter and returns a bool + """ + + self._func = func + + def equals(self, rhs): + """Test whether rhs passes the function test. + + rhs is passed into func. + + Args: + rhs: any python object + + Returns: + the result of func(rhs) + """ + + return self._func(rhs) + + def __repr__(self): + return str(self._func) + + +class IgnoreArg(Comparator): + """Ignore an argument. + + This can be used when we don't care about an argument of a method call. + + Example: + # Check if CastMagic is called with 3 as first arg and 'disappear' as third. + mymock.CastMagic(3, IgnoreArg(), 'disappear') + """ + + def equals(self, unused_rhs): + """Ignores arguments and returns True. + + Args: + unused_rhs: any python object + + Returns: + always returns True + """ + + return True + + def __repr__(self): + return '' + + +class MethodGroup(object): + """Base class containing common behaviour for MethodGroups.""" + + def __init__(self, group_name): + self._group_name = group_name + + def group_name(self): + return self._group_name + + def __str__(self): + return '<%s "%s">' % (self.__class__.__name__, self._group_name) + + def AddMethod(self, mock_method): + raise NotImplementedError + + def MethodCalled(self, mock_method): + raise NotImplementedError + + def IsSatisfied(self): + raise NotImplementedError + +class UnorderedGroup(MethodGroup): + """UnorderedGroup holds a set of method calls that may occur in any order. + + This construct is helpful for non-deterministic events, such as iterating + over the keys of a dict. + """ + + def __init__(self, group_name): + super(UnorderedGroup, self).__init__(group_name) + self._methods = [] + + def AddMethod(self, mock_method): + """Add a method to this group. + + Args: + mock_method: A mock method to be added to this group. + """ + + self._methods.append(mock_method) + + def MethodCalled(self, mock_method): + """Remove a method call from the group. + + If the method is not in the set, an UnexpectedMethodCallError will be + raised. + + Args: + mock_method: a mock method that should be equal to a method in the group. + + Returns: + The mock method from the group + + Raises: + UnexpectedMethodCallError if the mock_method was not in the group. + """ + + # Check to see if this method exists, and if so, remove it from the set + # and return it. + for method in self._methods: + if method == mock_method: + # Remove the called mock_method instead of the method in the group. + # The called method will match any comparators when equality is checked + # during removal. The method in the group could pass a comparator to + # another comparator during the equality check. + self._methods.remove(mock_method) + + # If this group is not empty, put it back at the head of the queue. + if not self.IsSatisfied(): + mock_method._call_queue.appendleft(self) + + return self, method + + raise UnexpectedMethodCallError(mock_method, self) + + def IsSatisfied(self): + """Return True if there are not any methods in this group.""" + + return len(self._methods) == 0 + + +class MultipleTimesGroup(MethodGroup): + """MultipleTimesGroup holds methods that may be called any number of times. + + Note: Each method must be called at least once. + + This is helpful, if you don't know or care how many times a method is called. + """ + + def __init__(self, group_name): + super(MultipleTimesGroup, self).__init__(group_name) + self._methods = set() + self._methods_left = set() + + def AddMethod(self, mock_method): + """Add a method to this group. + + Args: + mock_method: A mock method to be added to this group. + """ + + self._methods.add(mock_method) + self._methods_left.add(mock_method) + + def MethodCalled(self, mock_method): + """Remove a method call from the group. + + If the method is not in the set, an UnexpectedMethodCallError will be + raised. + + Args: + mock_method: a mock method that should be equal to a method in the group. + + Returns: + The mock method from the group + + Raises: + UnexpectedMethodCallError if the mock_method was not in the group. + """ + + # Check to see if this method exists, and if so add it to the set of + # called methods. + for method in self._methods: + if method == mock_method: + self._methods_left.discard(method) + # Always put this group back on top of the queue, because we don't know + # when we are done. + mock_method._call_queue.appendleft(self) + return self, method + + if self.IsSatisfied(): + next_method = mock_method._PopNextMethod(); + return next_method, None + else: + raise UnexpectedMethodCallError(mock_method, self) + + def IsSatisfied(self): + """Return True if all methods in this group are called at least once.""" + return len(self._methods_left) == 0 + + +class MoxMetaTestBase(type): + """Metaclass to add mox cleanup and verification to every test. + + As the mox unit testing class is being constructed (MoxTestBase or a + subclass), this metaclass will modify all test functions to call the + CleanUpMox method of the test class after they finish. This means that + unstubbing and verifying will happen for every test with no additional code, + and any failures will result in test failures as opposed to errors. + """ + + def __init__(cls, name, bases, d): + type.__init__(cls, name, bases, d) + + # also get all the attributes from the base classes to account + # for a case when test class is not the immediate child of MoxTestBase + for base in bases: + for attr_name in dir(base): + if attr_name not in d: + d[attr_name] = getattr(base, attr_name) + + for func_name, func in d.items(): + if func_name.startswith('test') and callable(func): + setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func)) + + @staticmethod + def CleanUpTest(cls, func): + """Adds Mox cleanup code to any MoxTestBase method. + + Always unsets stubs after a test. Will verify all mocks for tests that + otherwise pass. + + Args: + cls: MoxTestBase or subclass; the class whose test method we are altering. + func: method; the method of the MoxTestBase test class we wish to alter. + + Returns: + The modified method. + """ + def new_method(self, *args, **kwargs): + mox_obj = getattr(self, 'mox', None) + stubout_obj = getattr(self, 'stubs', None) + cleanup_mox = False + cleanup_stubout = False + if mox_obj and isinstance(mox_obj, Mox): + cleanup_mox = True + if stubout_obj and isinstance(stubout_obj, stubout.StubOutForTesting): + cleanup_stubout = True + try: + func(self, *args, **kwargs) + finally: + if cleanup_mox: + mox_obj.UnsetStubs() + if cleanup_stubout: + stubout_obj.UnsetAll() + stubout_obj.SmartUnsetAll() + if cleanup_mox: + mox_obj.VerifyAll() + new_method.__name__ = func.__name__ + new_method.__doc__ = func.__doc__ + new_method.__module__ = func.__module__ + return new_method + + +class MoxTestBase(unittest.TestCase): + """Convenience test class to make stubbing easier. + + Sets up a "mox" attribute which is an instance of Mox (any mox tests will + want this), and a "stubs" attribute that is an instance of StubOutForTesting + (needed at times). Also automatically unsets any stubs and verifies that all + mock methods have been called at the end of each test, eliminating boilerplate + code. + """ + + __metaclass__ = MoxMetaTestBase + + def setUp(self): + super(MoxTestBase, self).setUp() + self.mox = Mox() + self.stubs = stubout.StubOutForTesting() diff --git a/vendor/pymox/mox_test.py b/vendor/pymox/mox_test.py new file mode 100755 index 000000000000..ea12176ba030 --- /dev/null +++ b/vendor/pymox/mox_test.py @@ -0,0 +1,1853 @@ +#!/usr/bin/python2.4 +# +# Unit tests for Mox. +# +# Copyright 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import cStringIO +import unittest +import re + +import mox + +import mox_test_helper + +OS_LISTDIR = mox_test_helper.os.listdir + +class ExpectedMethodCallsErrorTest(unittest.TestCase): + """Test creation and string conversion of ExpectedMethodCallsError.""" + + def testAtLeastOneMethod(self): + self.assertRaises(ValueError, mox.ExpectedMethodCallsError, []) + + def testOneError(self): + method = mox.MockMethod("testMethod", [], False) + method(1, 2).AndReturn('output') + e = mox.ExpectedMethodCallsError([method]) + self.assertEqual( + "Verify: Expected methods never called:\n" + " 0. testMethod(1, 2) -> 'output'", + str(e)) + + def testManyErrors(self): + method1 = mox.MockMethod("testMethod", [], False) + method1(1, 2).AndReturn('output') + method2 = mox.MockMethod("testMethod", [], False) + method2(a=1, b=2, c="only named") + method3 = mox.MockMethod("testMethod2", [], False) + method3().AndReturn(44) + method4 = mox.MockMethod("testMethod", [], False) + method4(1, 2).AndReturn('output') + e = mox.ExpectedMethodCallsError([method1, method2, method3, method4]) + self.assertEqual( + "Verify: Expected methods never called:\n" + " 0. testMethod(1, 2) -> 'output'\n" + " 1. testMethod(a=1, b=2, c='only named') -> None\n" + " 2. testMethod2() -> 44\n" + " 3. testMethod(1, 2) -> 'output'", + str(e)) + + +class OrTest(unittest.TestCase): + """Test Or correctly chains Comparators.""" + + def testValidOr(self): + """Or should be True if either Comparator returns True.""" + self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == {}) + self.assert_(mox.Or(mox.IsA(dict), mox.IsA(str)) == 'test') + self.assert_(mox.Or(mox.IsA(str), mox.IsA(str)) == 'test') + + def testInvalidOr(self): + """Or should be False if both Comparators return False.""" + self.failIf(mox.Or(mox.IsA(dict), mox.IsA(str)) == 0) + + +class AndTest(unittest.TestCase): + """Test And correctly chains Comparators.""" + + def testValidAnd(self): + """And should be True if both Comparators return True.""" + self.assert_(mox.And(mox.IsA(str), mox.IsA(str)) == '1') + + def testClauseOneFails(self): + """And should be False if the first Comparator returns False.""" + + self.failIf(mox.And(mox.IsA(dict), mox.IsA(str)) == '1') + + def testAdvancedUsage(self): + """And should work with other Comparators. + + Note: this test is reliant on In and ContainsKeyValue. + """ + test_dict = {"mock" : "obj", "testing" : "isCOOL"} + self.assert_(mox.And(mox.In("testing"), + mox.ContainsKeyValue("mock", "obj")) == test_dict) + + def testAdvancedUsageFails(self): + """Note: this test is reliant on In and ContainsKeyValue.""" + test_dict = {"mock" : "obj", "testing" : "isCOOL"} + self.failIf(mox.And(mox.In("NOTFOUND"), + mox.ContainsKeyValue("mock", "obj")) == test_dict) + + +class SameElementsAsTest(unittest.TestCase): + """Test SameElementsAs correctly identifies sequences with same elements.""" + + def testSortedLists(self): + """Should return True if two lists are exactly equal.""" + self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [1, 2.0, 'c']) + + def testUnsortedLists(self): + """Should return True if two lists are unequal but have same elements.""" + self.assert_(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c', 1]) + + def testUnhashableLists(self): + """Should return True if two lists have the same unhashable elements.""" + self.assert_(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) == + [{2: 'b'}, {'a': 1}]) + + def testEmptyLists(self): + """Should return True for two empty lists.""" + self.assert_(mox.SameElementsAs([]) == []) + + def testUnequalLists(self): + """Should return False if the lists are not equal.""" + self.failIf(mox.SameElementsAs([1, 2.0, 'c']) == [2.0, 'c']) + + def testUnequalUnhashableLists(self): + """Should return False if two lists with unhashable elements are unequal.""" + self.failIf(mox.SameElementsAs([{'a': 1}, {2: 'b'}]) == [{2: 'b'}]) + + +class ContainsKeyValueTest(unittest.TestCase): + """Test ContainsKeyValue correctly identifies key/value pairs in a dict. + """ + + def testValidPair(self): + """Should return True if the key value is in the dict.""" + self.assert_(mox.ContainsKeyValue("key", 1) == {"key": 1}) + + def testInvalidValue(self): + """Should return False if the value is not correct.""" + self.failIf(mox.ContainsKeyValue("key", 1) == {"key": 2}) + + def testInvalidKey(self): + """Should return False if they key is not in the dict.""" + self.failIf(mox.ContainsKeyValue("qux", 1) == {"key": 2}) + + +class ContainsAttributeValueTest(unittest.TestCase): + """Test ContainsAttributeValue correctly identifies properties in an object. + """ + + def setUp(self): + """Create an object to test with.""" + + + class TestObject(object): + key = 1 + + self.test_object = TestObject() + + def testValidPair(self): + """Should return True if the object has the key attribute and it matches.""" + self.assert_(mox.ContainsAttributeValue("key", 1) == self.test_object) + + def testInvalidValue(self): + """Should return False if the value is not correct.""" + self.failIf(mox.ContainsKeyValue("key", 2) == self.test_object) + + def testInvalidKey(self): + """Should return False if they the object doesn't have the property.""" + self.failIf(mox.ContainsKeyValue("qux", 1) == self.test_object) + + +class InTest(unittest.TestCase): + """Test In correctly identifies a key in a list/dict""" + + def testItemInList(self): + """Should return True if the item is in the list.""" + self.assert_(mox.In(1) == [1, 2, 3]) + + def testKeyInDict(self): + """Should return True if the item is a key in a dict.""" + self.assert_(mox.In("test") == {"test" : "module"}) + + +class NotTest(unittest.TestCase): + """Test Not correctly identifies False predicates.""" + + def testItemInList(self): + """Should return True if the item is NOT in the list.""" + self.assert_(mox.Not(mox.In(42)) == [1, 2, 3]) + + def testKeyInDict(self): + """Should return True if the item is NOT a key in a dict.""" + self.assert_(mox.Not(mox.In("foo")) == {"key" : 42}) + + def testInvalidKeyWithNot(self): + """Should return False if they key is NOT in the dict.""" + self.assert_(mox.Not(mox.ContainsKeyValue("qux", 1)) == {"key": 2}) + + +class StrContainsTest(unittest.TestCase): + """Test StrContains correctly checks for substring occurrence of a parameter. + """ + + def testValidSubstringAtStart(self): + """Should return True if the substring is at the start of the string.""" + self.assert_(mox.StrContains("hello") == "hello world") + + def testValidSubstringInMiddle(self): + """Should return True if the substring is in the middle of the string.""" + self.assert_(mox.StrContains("lo wo") == "hello world") + + def testValidSubstringAtEnd(self): + """Should return True if the substring is at the end of the string.""" + self.assert_(mox.StrContains("ld") == "hello world") + + def testInvaildSubstring(self): + """Should return False if the substring is not in the string.""" + self.failIf(mox.StrContains("AAA") == "hello world") + + def testMultipleMatches(self): + """Should return True if there are multiple occurances of substring.""" + self.assert_(mox.StrContains("abc") == "ababcabcabcababc") + + +class RegexTest(unittest.TestCase): + """Test Regex correctly matches regular expressions.""" + + def testIdentifyBadSyntaxDuringInit(self): + """The user should know immediately if a regex has bad syntax.""" + self.assertRaises(re.error, mox.Regex, '(a|b') + + def testPatternInMiddle(self): + """Should return True if the pattern matches at the middle of the string. + + This ensures that re.search is used (instead of re.find). + """ + self.assert_(mox.Regex(r"a\s+b") == "x y z a b c") + + def testNonMatchPattern(self): + """Should return False if the pattern does not match the string.""" + self.failIf(mox.Regex(r"a\s+b") == "x y z") + + def testFlagsPassedCorrectly(self): + """Should return True as we pass IGNORECASE flag.""" + self.assert_(mox.Regex(r"A", re.IGNORECASE) == "a") + + def testReprWithoutFlags(self): + """repr should return the regular expression pattern.""" + self.assert_(repr(mox.Regex(r"a\s+b")) == "") + + def testReprWithFlags(self): + """repr should return the regular expression pattern and flags.""" + self.assert_(repr(mox.Regex(r"a\s+b", flags=4)) == + "") + + +class IsATest(unittest.TestCase): + """Verify IsA correctly checks equality based upon class type, not value.""" + + def testEqualityValid(self): + """Verify that == correctly identifies objects of the same type.""" + self.assert_(mox.IsA(str) == 'test') + + def testEqualityInvalid(self): + """Verify that == correctly identifies objects of different types.""" + self.failIf(mox.IsA(str) == 10) + + def testInequalityValid(self): + """Verify that != identifies objects of different type.""" + self.assert_(mox.IsA(str) != 10) + + def testInequalityInvalid(self): + """Verify that != correctly identifies objects of the same type.""" + self.failIf(mox.IsA(str) != "test") + + def testEqualityInListValid(self): + """Verify list contents are properly compared.""" + isa_list = [mox.IsA(str), mox.IsA(str)] + str_list = ["abc", "def"] + self.assert_(isa_list == str_list) + + def testEquailtyInListInvalid(self): + """Verify list contents are properly compared.""" + isa_list = [mox.IsA(str),mox.IsA(str)] + mixed_list = ["abc", 123] + self.failIf(isa_list == mixed_list) + + def testSpecialTypes(self): + """Verify that IsA can handle objects like cStringIO.StringIO.""" + isA = mox.IsA(cStringIO.StringIO()) + stringIO = cStringIO.StringIO() + self.assert_(isA == stringIO) + + +class IsAlmostTest(unittest.TestCase): + """Verify IsAlmost correctly checks equality of floating point numbers.""" + + def testEqualityValid(self): + """Verify that == correctly identifies nearly equivalent floats.""" + self.assertEquals(mox.IsAlmost(1.8999999999), 1.9) + + def testEqualityInvalid(self): + """Verify that == correctly identifies non-equivalent floats.""" + self.assertNotEquals(mox.IsAlmost(1.899), 1.9) + + def testEqualityWithPlaces(self): + """Verify that specifying places has the desired effect.""" + self.assertNotEquals(mox.IsAlmost(1.899), 1.9) + self.assertEquals(mox.IsAlmost(1.899, places=2), 1.9) + + def testNonNumericTypes(self): + """Verify that IsAlmost handles non-numeric types properly.""" + + self.assertNotEquals(mox.IsAlmost(1.8999999999), '1.9') + self.assertNotEquals(mox.IsAlmost('1.8999999999'), 1.9) + self.assertNotEquals(mox.IsAlmost('1.8999999999'), '1.9') + + +class MockMethodTest(unittest.TestCase): + """Test class to verify that the MockMethod class is working correctly.""" + + def setUp(self): + self.expected_method = mox.MockMethod("testMethod", [], False)(['original']) + self.mock_method = mox.MockMethod("testMethod", [self.expected_method], + True) + + def testNameAttribute(self): + """Should provide a __name__ attribute.""" + self.assertEquals('testMethod', self.mock_method.__name__) + + def testAndReturnNoneByDefault(self): + """Should return None by default.""" + return_value = self.mock_method(['original']) + self.assert_(return_value == None) + + def testAndReturnValue(self): + """Should return a specificed return value.""" + expected_return_value = "test" + self.expected_method.AndReturn(expected_return_value) + return_value = self.mock_method(['original']) + self.assert_(return_value == expected_return_value) + + def testAndRaiseException(self): + """Should raise a specified exception.""" + expected_exception = Exception('test exception') + self.expected_method.AndRaise(expected_exception) + self.assertRaises(Exception, self.mock_method) + + def testWithSideEffects(self): + """Should call state modifier.""" + local_list = ['original'] + def modifier(mutable_list): + self.assertTrue(local_list is mutable_list) + mutable_list[0] = 'mutation' + self.expected_method.WithSideEffects(modifier).AndReturn(1) + self.mock_method(local_list) + self.assertEquals('mutation', local_list[0]) + + def testWithReturningSideEffects(self): + """Should call state modifier and propagate its return value.""" + local_list = ['original'] + expected_return = 'expected_return' + def modifier_with_return(mutable_list): + self.assertTrue(local_list is mutable_list) + mutable_list[0] = 'mutation' + return expected_return + self.expected_method.WithSideEffects(modifier_with_return) + actual_return = self.mock_method(local_list) + self.assertEquals('mutation', local_list[0]) + self.assertEquals(expected_return, actual_return) + + def testWithReturningSideEffectsWithAndReturn(self): + """Should call state modifier and ignore its return value.""" + local_list = ['original'] + expected_return = 'expected_return' + unexpected_return = 'unexpected_return' + def modifier_with_return(mutable_list): + self.assertTrue(local_list is mutable_list) + mutable_list[0] = 'mutation' + return unexpected_return + self.expected_method.WithSideEffects(modifier_with_return).AndReturn( + expected_return) + actual_return = self.mock_method(local_list) + self.assertEquals('mutation', local_list[0]) + self.assertEquals(expected_return, actual_return) + + def testEqualityNoParamsEqual(self): + """Methods with the same name and without params should be equal.""" + expected_method = mox.MockMethod("testMethod", [], False) + self.assertEqual(self.mock_method, expected_method) + + def testEqualityNoParamsNotEqual(self): + """Methods with different names and without params should not be equal.""" + expected_method = mox.MockMethod("otherMethod", [], False) + self.failIfEqual(self.mock_method, expected_method) + + def testEqualityParamsEqual(self): + """Methods with the same name and parameters should be equal.""" + params = [1, 2, 3] + expected_method = mox.MockMethod("testMethod", [], False) + expected_method._params = params + + self.mock_method._params = params + self.assertEqual(self.mock_method, expected_method) + + def testEqualityParamsNotEqual(self): + """Methods with the same name and different params should not be equal.""" + expected_method = mox.MockMethod("testMethod", [], False) + expected_method._params = [1, 2, 3] + + self.mock_method._params = ['a', 'b', 'c'] + self.failIfEqual(self.mock_method, expected_method) + + def testEqualityNamedParamsEqual(self): + """Methods with the same name and same named params should be equal.""" + named_params = {"input1": "test", "input2": "params"} + expected_method = mox.MockMethod("testMethod", [], False) + expected_method._named_params = named_params + + self.mock_method._named_params = named_params + self.assertEqual(self.mock_method, expected_method) + + def testEqualityNamedParamsNotEqual(self): + """Methods with the same name and diffnamed params should not be equal.""" + expected_method = mox.MockMethod("testMethod", [], False) + expected_method._named_params = {"input1": "test", "input2": "params"} + + self.mock_method._named_params = {"input1": "test2", "input2": "params2"} + self.failIfEqual(self.mock_method, expected_method) + + def testEqualityWrongType(self): + """Method should not be equal to an object of a different type.""" + self.failIfEqual(self.mock_method, "string?") + + def testObjectEquality(self): + """Equality of objects should work without a Comparator""" + instA = TestClass(); + instB = TestClass(); + + params = [instA, ] + expected_method = mox.MockMethod("testMethod", [], False) + expected_method._params = params + + self.mock_method._params = [instB, ] + self.assertEqual(self.mock_method, expected_method) + + def testStrConversion(self): + method = mox.MockMethod("f", [], False) + method(1, 2, "st", n1=8, n2="st2") + self.assertEqual(str(method), ("f(1, 2, 'st', n1=8, n2='st2') -> None")) + + method = mox.MockMethod("testMethod", [], False) + method(1, 2, "only positional") + self.assertEqual(str(method), "testMethod(1, 2, 'only positional') -> None") + + method = mox.MockMethod("testMethod", [], False) + method(a=1, b=2, c="only named") + self.assertEqual(str(method), + "testMethod(a=1, b=2, c='only named') -> None") + + method = mox.MockMethod("testMethod", [], False) + method() + self.assertEqual(str(method), "testMethod() -> None") + + method = mox.MockMethod("testMethod", [], False) + method(x="only 1 parameter") + self.assertEqual(str(method), "testMethod(x='only 1 parameter') -> None") + + method = mox.MockMethod("testMethod", [], False) + method().AndReturn('return_value') + self.assertEqual(str(method), "testMethod() -> 'return_value'") + + method = mox.MockMethod("testMethod", [], False) + method().AndReturn(('a', {1: 2})) + self.assertEqual(str(method), "testMethod() -> ('a', {1: 2})") + + +class MockAnythingTest(unittest.TestCase): + """Verify that the MockAnything class works as expected.""" + + def setUp(self): + self.mock_object = mox.MockAnything() + + def testRepr(self): + """Calling repr on a MockAnything instance must work.""" + self.assertEqual('', repr(self.mock_object)) + + def testSetupMode(self): + """Verify the mock will accept any call.""" + self.mock_object.NonsenseCall() + self.assert_(len(self.mock_object._expected_calls_queue) == 1) + + def testReplayWithExpectedCall(self): + """Verify the mock replays method calls as expected.""" + self.mock_object.ValidCall() # setup method call + self.mock_object._Replay() # start replay mode + self.mock_object.ValidCall() # make method call + + def testReplayWithUnexpectedCall(self): + """Unexpected method calls should raise UnexpectedMethodCallError.""" + self.mock_object.ValidCall() # setup method call + self.mock_object._Replay() # start replay mode + self.assertRaises(mox.UnexpectedMethodCallError, + self.mock_object.OtherValidCall) + + def testVerifyWithCompleteReplay(self): + """Verify should not raise an exception for a valid replay.""" + self.mock_object.ValidCall() # setup method call + self.mock_object._Replay() # start replay mode + self.mock_object.ValidCall() # make method call + self.mock_object._Verify() + + def testVerifyWithIncompleteReplay(self): + """Verify should raise an exception if the replay was not complete.""" + self.mock_object.ValidCall() # setup method call + self.mock_object._Replay() # start replay mode + # ValidCall() is never made + self.assertRaises(mox.ExpectedMethodCallsError, self.mock_object._Verify) + + def testSpecialClassMethod(self): + """Verify should not raise an exception when special methods are used.""" + self.mock_object[1].AndReturn(True) + self.mock_object._Replay() + returned_val = self.mock_object[1] + self.assert_(returned_val) + self.mock_object._Verify() + + def testNonzero(self): + """You should be able to use the mock object in an if.""" + self.mock_object._Replay() + if self.mock_object: + pass + + def testNotNone(self): + """Mock should be comparable to None.""" + self.mock_object._Replay() + if self.mock_object is not None: + pass + + if self.mock_object is None: + pass + + def testEquals(self): + """A mock should be able to compare itself to another object.""" + self.mock_object._Replay() + self.assertEquals(self.mock_object, self.mock_object) + + def testEqualsMockFailure(self): + """Verify equals identifies unequal objects.""" + self.mock_object.SillyCall() + self.mock_object._Replay() + self.assertNotEquals(self.mock_object, mox.MockAnything()) + + def testEqualsInstanceFailure(self): + """Verify equals identifies that objects are different instances.""" + self.mock_object._Replay() + self.assertNotEquals(self.mock_object, TestClass()) + + def testNotEquals(self): + """Verify not equals works.""" + self.mock_object._Replay() + self.assertFalse(self.mock_object != self.mock_object) + + def testNestedMockCallsRecordedSerially(self): + """Test that nested calls work when recorded serially.""" + self.mock_object.CallInner().AndReturn(1) + self.mock_object.CallOuter(1) + self.mock_object._Replay() + + self.mock_object.CallOuter(self.mock_object.CallInner()) + + self.mock_object._Verify() + + def testNestedMockCallsRecordedNested(self): + """Test that nested cals work when recorded in a nested fashion.""" + self.mock_object.CallOuter(self.mock_object.CallInner().AndReturn(1)) + self.mock_object._Replay() + + self.mock_object.CallOuter(self.mock_object.CallInner()) + + self.mock_object._Verify() + + def testIsCallable(self): + """Test that MockAnything can even mock a simple callable. + + This is handy for "stubbing out" a method in a module with a mock, and + verifying that it was called. + """ + self.mock_object().AndReturn('mox0rd') + self.mock_object._Replay() + + self.assertEquals('mox0rd', self.mock_object()) + + self.mock_object._Verify() + + def testIsReprable(self): + """Test that MockAnythings can be repr'd without causing a failure.""" + self.failUnless('MockAnything' in repr(self.mock_object)) + + +class MethodCheckerTest(unittest.TestCase): + """Tests MockMethod's use of MethodChecker method.""" + + def testNoParameters(self): + method = mox.MockMethod('NoParameters', [], False, + CheckCallTestClass.NoParameters) + method() + self.assertRaises(AttributeError, method, 1) + self.assertRaises(AttributeError, method, 1, 2) + self.assertRaises(AttributeError, method, a=1) + self.assertRaises(AttributeError, method, 1, b=2) + + def testOneParameter(self): + method = mox.MockMethod('OneParameter', [], False, + CheckCallTestClass.OneParameter) + self.assertRaises(AttributeError, method) + method(1) + method(a=1) + self.assertRaises(AttributeError, method, b=1) + self.assertRaises(AttributeError, method, 1, 2) + self.assertRaises(AttributeError, method, 1, a=2) + self.assertRaises(AttributeError, method, 1, b=2) + + def testTwoParameters(self): + method = mox.MockMethod('TwoParameters', [], False, + CheckCallTestClass.TwoParameters) + self.assertRaises(AttributeError, method) + self.assertRaises(AttributeError, method, 1) + self.assertRaises(AttributeError, method, a=1) + self.assertRaises(AttributeError, method, b=1) + method(1, 2) + method(1, b=2) + method(a=1, b=2) + method(b=2, a=1) + self.assertRaises(AttributeError, method, b=2, c=3) + self.assertRaises(AttributeError, method, a=1, b=2, c=3) + self.assertRaises(AttributeError, method, 1, 2, 3) + self.assertRaises(AttributeError, method, 1, 2, 3, 4) + self.assertRaises(AttributeError, method, 3, a=1, b=2) + + def testOneDefaultValue(self): + method = mox.MockMethod('OneDefaultValue', [], False, + CheckCallTestClass.OneDefaultValue) + method() + method(1) + method(a=1) + self.assertRaises(AttributeError, method, b=1) + self.assertRaises(AttributeError, method, 1, 2) + self.assertRaises(AttributeError, method, 1, a=2) + self.assertRaises(AttributeError, method, 1, b=2) + + def testTwoDefaultValues(self): + method = mox.MockMethod('TwoDefaultValues', [], False, + CheckCallTestClass.TwoDefaultValues) + self.assertRaises(AttributeError, method) + self.assertRaises(AttributeError, method, c=3) + self.assertRaises(AttributeError, method, 1) + self.assertRaises(AttributeError, method, 1, d=4) + self.assertRaises(AttributeError, method, 1, d=4, c=3) + method(1, 2) + method(a=1, b=2) + method(1, 2, 3) + method(1, 2, 3, 4) + method(1, 2, c=3) + method(1, 2, c=3, d=4) + method(1, 2, d=4, c=3) + method(d=4, c=3, a=1, b=2) + self.assertRaises(AttributeError, method, 1, 2, 3, 4, 5) + self.assertRaises(AttributeError, method, 1, 2, e=9) + self.assertRaises(AttributeError, method, a=1, b=2, e=9) + + def testArgs(self): + method = mox.MockMethod('Args', [], False, CheckCallTestClass.Args) + self.assertRaises(AttributeError, method) + self.assertRaises(AttributeError, method, 1) + method(1, 2) + method(a=1, b=2) + method(1, 2, 3) + method(1, 2, 3, 4) + self.assertRaises(AttributeError, method, 1, 2, a=3) + self.assertRaises(AttributeError, method, 1, 2, c=3) + + def testKwargs(self): + method = mox.MockMethod('Kwargs', [], False, CheckCallTestClass.Kwargs) + self.assertRaises(AttributeError, method) + method(1) + method(1, 2) + method(a=1, b=2) + method(b=2, a=1) + self.assertRaises(AttributeError, method, 1, 2, 3) + self.assertRaises(AttributeError, method, 1, 2, a=3) + method(1, 2, c=3) + method(a=1, b=2, c=3) + method(c=3, a=1, b=2) + method(a=1, b=2, c=3, d=4) + self.assertRaises(AttributeError, method, 1, 2, 3, 4) + + def testArgsAndKwargs(self): + method = mox.MockMethod('ArgsAndKwargs', [], False, + CheckCallTestClass.ArgsAndKwargs) + self.assertRaises(AttributeError, method) + method(1) + method(1, 2) + method(1, 2, 3) + method(a=1) + method(1, b=2) + self.assertRaises(AttributeError, method, 1, a=2) + method(b=2, a=1) + method(c=3, b=2, a=1) + method(1, 2, c=3) + + +class CheckCallTestClass(object): + def NoParameters(self): + pass + + def OneParameter(self, a): + pass + + def TwoParameters(self, a, b): + pass + + def OneDefaultValue(self, a=1): + pass + + def TwoDefaultValues(self, a, b, c=1, d=2): + pass + + def Args(self, a, b, *args): + pass + + def Kwargs(self, a, b=2, **kwargs): + pass + + def ArgsAndKwargs(self, a, *args, **kwargs): + pass + + +class MockObjectTest(unittest.TestCase): + """Verify that the MockObject class works as exepcted.""" + + def setUp(self): + self.mock_object = mox.MockObject(TestClass) + + def testSetupModeWithValidCall(self): + """Verify the mock object properly mocks a basic method call.""" + self.mock_object.ValidCall() + self.assert_(len(self.mock_object._expected_calls_queue) == 1) + + def testSetupModeWithInvalidCall(self): + """UnknownMethodCallError should be raised if a non-member method is called. + """ + # Note: assertRaises does not catch exceptions thrown by MockObject's + # __getattr__ + try: + self.mock_object.InvalidCall() + self.fail("No exception thrown, expected UnknownMethodCallError") + except mox.UnknownMethodCallError: + pass + except Exception: + self.fail("Wrong exception type thrown, expected UnknownMethodCallError") + + def testReplayWithInvalidCall(self): + """UnknownMethodCallError should be raised if a non-member method is called. + """ + self.mock_object.ValidCall() # setup method call + self.mock_object._Replay() # start replay mode + # Note: assertRaises does not catch exceptions thrown by MockObject's + # __getattr__ + try: + self.mock_object.InvalidCall() + self.fail("No exception thrown, expected UnknownMethodCallError") + except mox.UnknownMethodCallError: + pass + except Exception: + self.fail("Wrong exception type thrown, expected UnknownMethodCallError") + + def testIsInstance(self): + """Mock should be able to pass as an instance of the mocked class.""" + self.assert_(isinstance(self.mock_object, TestClass)) + + def testFindValidMethods(self): + """Mock should be able to mock all public methods.""" + self.assert_('ValidCall' in self.mock_object._known_methods) + self.assert_('OtherValidCall' in self.mock_object._known_methods) + self.assert_('MyClassMethod' in self.mock_object._known_methods) + self.assert_('MyStaticMethod' in self.mock_object._known_methods) + self.assert_('_ProtectedCall' in self.mock_object._known_methods) + self.assert_('__PrivateCall' not in self.mock_object._known_methods) + self.assert_('_TestClass__PrivateCall' in self.mock_object._known_methods) + + def testFindsSuperclassMethods(self): + """Mock should be able to mock superclasses methods.""" + self.mock_object = mox.MockObject(ChildClass) + self.assert_('ValidCall' in self.mock_object._known_methods) + self.assert_('OtherValidCall' in self.mock_object._known_methods) + self.assert_('MyClassMethod' in self.mock_object._known_methods) + self.assert_('ChildValidCall' in self.mock_object._known_methods) + + def testAccessClassVariables(self): + """Class variables should be accessible through the mock.""" + self.assert_('SOME_CLASS_VAR' in self.mock_object._known_vars) + self.assert_('_PROTECTED_CLASS_VAR' in self.mock_object._known_vars) + self.assertEquals('test_value', self.mock_object.SOME_CLASS_VAR) + + def testEquals(self): + """A mock should be able to compare itself to another object.""" + self.mock_object._Replay() + self.assertEquals(self.mock_object, self.mock_object) + + def testEqualsMockFailure(self): + """Verify equals identifies unequal objects.""" + self.mock_object.ValidCall() + self.mock_object._Replay() + self.assertNotEquals(self.mock_object, mox.MockObject(TestClass)) + + def testEqualsInstanceFailure(self): + """Verify equals identifies that objects are different instances.""" + self.mock_object._Replay() + self.assertNotEquals(self.mock_object, TestClass()) + + def testNotEquals(self): + """Verify not equals works.""" + self.mock_object._Replay() + self.assertFalse(self.mock_object != self.mock_object) + + def testMockSetItem_ExpectedSetItem_Success(self): + """Test that __setitem__() gets mocked in Dummy. + + In this test, _Verify() succeeds. + """ + dummy = mox.MockObject(TestClass) + dummy['X'] = 'Y' + + dummy._Replay() + + dummy['X'] = 'Y' + + dummy._Verify() + + def testMockSetItem_ExpectedSetItem_NoSuccess(self): + """Test that __setitem__() gets mocked in Dummy. + + In this test, _Verify() fails. + """ + dummy = mox.MockObject(TestClass) + dummy['X'] = 'Y' + + dummy._Replay() + + # NOT doing dummy['X'] = 'Y' + + self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify) + + def testMockSetItem_ExpectedNoSetItem_Success(self): + """Test that __setitem__() gets mocked in Dummy.""" + dummy = mox.MockObject(TestClass) + # NOT doing dummy['X'] = 'Y' + + dummy._Replay() + + def call(): dummy['X'] = 'Y' + self.assertRaises(mox.UnexpectedMethodCallError, call) + + def testMockSetItem_ExpectedNoSetItem_NoSuccess(self): + """Test that __setitem__() gets mocked in Dummy. + + In this test, _Verify() fails. + """ + dummy = mox.MockObject(TestClass) + # NOT doing dummy['X'] = 'Y' + + dummy._Replay() + + # NOT doing dummy['X'] = 'Y' + + dummy._Verify() + + def testMockSetItem_ExpectedSetItem_NonmatchingParameters(self): + """Test that __setitem__() fails if other parameters are expected.""" + dummy = mox.MockObject(TestClass) + dummy['X'] = 'Y' + + dummy._Replay() + + def call(): dummy['wrong'] = 'Y' + + self.assertRaises(mox.UnexpectedMethodCallError, call) + + dummy._Verify() + + def testMockSetItem_WithSubClassOfNewStyleClass(self): + class NewStyleTestClass(object): + def __init__(self): + self.my_dict = {} + + def __setitem__(self, key, value): + self.my_dict[key], value + + class TestSubClass(NewStyleTestClass): + pass + + dummy = mox.MockObject(TestSubClass) + dummy[1] = 2 + dummy._Replay() + dummy[1] = 2 + dummy._Verify() + + def testMockGetItem_ExpectedGetItem_Success(self): + """Test that __getitem__() gets mocked in Dummy. + + In this test, _Verify() succeeds. + """ + dummy = mox.MockObject(TestClass) + dummy['X'].AndReturn('value') + + dummy._Replay() + + self.assertEqual(dummy['X'], 'value') + + dummy._Verify() + + def testMockGetItem_ExpectedGetItem_NoSuccess(self): + """Test that __getitem__() gets mocked in Dummy. + + In this test, _Verify() fails. + """ + dummy = mox.MockObject(TestClass) + dummy['X'].AndReturn('value') + + dummy._Replay() + + # NOT doing dummy['X'] + + self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify) + + def testMockGetItem_ExpectedNoGetItem_NoSuccess(self): + """Test that __getitem__() gets mocked in Dummy.""" + dummy = mox.MockObject(TestClass) + # NOT doing dummy['X'] + + dummy._Replay() + + def call(): return dummy['X'] + self.assertRaises(mox.UnexpectedMethodCallError, call) + + def testMockGetItem_ExpectedGetItem_NonmatchingParameters(self): + """Test that __getitem__() fails if other parameters are expected.""" + dummy = mox.MockObject(TestClass) + dummy['X'].AndReturn('value') + + dummy._Replay() + + def call(): return dummy['wrong'] + + self.assertRaises(mox.UnexpectedMethodCallError, call) + + dummy._Verify() + + def testMockGetItem_WithSubClassOfNewStyleClass(self): + class NewStyleTestClass(object): + def __getitem__(self, key): + return {1: '1', 2: '2'}[key] + + class TestSubClass(NewStyleTestClass): + pass + + dummy = mox.MockObject(TestSubClass) + dummy[1].AndReturn('3') + + dummy._Replay() + self.assertEquals('3', dummy.__getitem__(1)) + dummy._Verify() + + def testMockIter_ExpectedIter_Success(self): + """Test that __iter__() gets mocked in Dummy. + + In this test, _Verify() succeeds. + """ + dummy = mox.MockObject(TestClass) + iter(dummy).AndReturn(iter(['X', 'Y'])) + + dummy._Replay() + + self.assertEqual([x for x in dummy], ['X', 'Y']) + + dummy._Verify() + + def testMockContains_ExpectedContains_Success(self): + """Test that __contains__ gets mocked in Dummy. + + In this test, _Verify() succeeds. + """ + dummy = mox.MockObject(TestClass) + dummy.__contains__('X').AndReturn(True) + + dummy._Replay() + + self.failUnless('X' in dummy) + + dummy._Verify() + + def testMockContains_ExpectedContains_NoSuccess(self): + """Test that __contains__() gets mocked in Dummy. + + In this test, _Verify() fails. + """ + dummy = mox.MockObject(TestClass) + dummy.__contains__('X').AndReturn('True') + + dummy._Replay() + + # NOT doing 'X' in dummy + + self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify) + + def testMockContains_ExpectedContains_NonmatchingParameter(self): + """Test that __contains__ fails if other parameters are expected.""" + dummy = mox.MockObject(TestClass) + dummy.__contains__('X').AndReturn(True) + + dummy._Replay() + + def call(): return 'Y' in dummy + + self.assertRaises(mox.UnexpectedMethodCallError, call) + + dummy._Verify() + + def testMockIter_ExpectedIter_NoSuccess(self): + """Test that __iter__() gets mocked in Dummy. + + In this test, _Verify() fails. + """ + dummy = mox.MockObject(TestClass) + iter(dummy).AndReturn(iter(['X', 'Y'])) + + dummy._Replay() + + # NOT doing self.assertEqual([x for x in dummy], ['X', 'Y']) + + self.assertRaises(mox.ExpectedMethodCallsError, dummy._Verify) + + def testMockIter_ExpectedNoIter_NoSuccess(self): + """Test that __iter__() gets mocked in Dummy.""" + dummy = mox.MockObject(TestClass) + # NOT doing iter(dummy) + + dummy._Replay() + + def call(): return [x for x in dummy] + self.assertRaises(mox.UnexpectedMethodCallError, call) + + def testMockIter_ExpectedGetItem_Success(self): + """Test that __iter__() gets mocked in Dummy using getitem.""" + dummy = mox.MockObject(SubscribtableNonIterableClass) + dummy[0].AndReturn('a') + dummy[1].AndReturn('b') + dummy[2].AndRaise(IndexError) + + dummy._Replay() + self.assertEquals(['a', 'b'], [x for x in dummy]) + dummy._Verify() + + def testMockIter_ExpectedNoGetItem_NoSuccess(self): + """Test that __iter__() gets mocked in Dummy using getitem.""" + dummy = mox.MockObject(SubscribtableNonIterableClass) + # NOT doing dummy[index] + + dummy._Replay() + function = lambda: [x for x in dummy] + self.assertRaises(mox.UnexpectedMethodCallError, function) + + def testMockGetIter_WithSubClassOfNewStyleClass(self): + class NewStyleTestClass(object): + def __iter__(self): + return iter([1, 2, 3]) + + class TestSubClass(NewStyleTestClass): + pass + + dummy = mox.MockObject(TestSubClass) + iter(dummy).AndReturn(iter(['a', 'b'])) + dummy._Replay() + self.assertEquals(['a', 'b'], [x for x in dummy]) + dummy._Verify() + + def testInstantiationWithAdditionalAttributes(self): + mock_object = mox.MockObject(TestClass, attrs={"attr1": "value"}) + self.assertEquals(mock_object.attr1, "value") + + def testCantOverrideMethodsWithAttributes(self): + self.assertRaises(ValueError, mox.MockObject, TestClass, + attrs={"ValidCall": "value"}) + + def testCantMockNonPublicAttributes(self): + self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass, + attrs={"_protected": "value"}) + self.assertRaises(mox.PrivateAttributeError, mox.MockObject, TestClass, + attrs={"__private": "value"}) + + +class MoxTest(unittest.TestCase): + """Verify Mox works correctly.""" + + def setUp(self): + self.mox = mox.Mox() + + def testCreateObject(self): + """Mox should create a mock object.""" + mock_obj = self.mox.CreateMock(TestClass) + + def testVerifyObjectWithCompleteReplay(self): + """Mox should replay and verify all objects it created.""" + mock_obj = self.mox.CreateMock(TestClass) + mock_obj.ValidCall() + mock_obj.ValidCallWithArgs(mox.IsA(TestClass)) + self.mox.ReplayAll() + mock_obj.ValidCall() + mock_obj.ValidCallWithArgs(TestClass("some_value")) + self.mox.VerifyAll() + + def testVerifyObjectWithIncompleteReplay(self): + """Mox should raise an exception if a mock didn't replay completely.""" + mock_obj = self.mox.CreateMock(TestClass) + mock_obj.ValidCall() + self.mox.ReplayAll() + # ValidCall() is never made + self.assertRaises(mox.ExpectedMethodCallsError, self.mox.VerifyAll) + + def testEntireWorkflow(self): + """Test the whole work flow.""" + mock_obj = self.mox.CreateMock(TestClass) + mock_obj.ValidCall().AndReturn("yes") + self.mox.ReplayAll() + + ret_val = mock_obj.ValidCall() + self.assertEquals("yes", ret_val) + self.mox.VerifyAll() + + def testCallableObject(self): + """Test recording calls to a callable object works.""" + mock_obj = self.mox.CreateMock(CallableClass) + mock_obj("foo").AndReturn("qux") + self.mox.ReplayAll() + + ret_val = mock_obj("foo") + self.assertEquals("qux", ret_val) + self.mox.VerifyAll() + + def testInheritedCallableObject(self): + """Test recording calls to an object inheriting from a callable object.""" + mock_obj = self.mox.CreateMock(InheritsFromCallable) + mock_obj("foo").AndReturn("qux") + self.mox.ReplayAll() + + ret_val = mock_obj("foo") + self.assertEquals("qux", ret_val) + self.mox.VerifyAll() + + def testCallOnNonCallableObject(self): + """Test that you cannot call a non-callable object.""" + mock_obj = self.mox.CreateMock(TestClass) + self.assertRaises(TypeError, mock_obj) + + def testCallableObjectWithBadCall(self): + """Test verifying calls to a callable object works.""" + mock_obj = self.mox.CreateMock(CallableClass) + mock_obj("foo").AndReturn("qux") + self.mox.ReplayAll() + + self.assertRaises(mox.UnexpectedMethodCallError, mock_obj, "ZOOBAZ") + + def testUnorderedGroup(self): + """Test that using one unordered group works.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Method(1).InAnyOrder() + mock_obj.Method(2).InAnyOrder() + self.mox.ReplayAll() + + mock_obj.Method(2) + mock_obj.Method(1) + + self.mox.VerifyAll() + + def testUnorderedGroupsInline(self): + """Unordered groups should work in the context of ordered calls.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).InAnyOrder() + mock_obj.Method(2).InAnyOrder() + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + mock_obj.Method(2) + mock_obj.Method(1) + mock_obj.Close() + + self.mox.VerifyAll() + + def testMultipleUnorderdGroups(self): + """Multiple unoreded groups should work.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Method(1).InAnyOrder() + mock_obj.Method(2).InAnyOrder() + mock_obj.Foo().InAnyOrder('group2') + mock_obj.Bar().InAnyOrder('group2') + self.mox.ReplayAll() + + mock_obj.Method(2) + mock_obj.Method(1) + mock_obj.Bar() + mock_obj.Foo() + + self.mox.VerifyAll() + + def testMultipleUnorderdGroupsOutOfOrder(self): + """Multiple unordered groups should maintain external order""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Method(1).InAnyOrder() + mock_obj.Method(2).InAnyOrder() + mock_obj.Foo().InAnyOrder('group2') + mock_obj.Bar().InAnyOrder('group2') + self.mox.ReplayAll() + + mock_obj.Method(2) + self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Bar) + + def testUnorderedGroupWithReturnValue(self): + """Unordered groups should work with return values.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).InAnyOrder().AndReturn(9) + mock_obj.Method(2).InAnyOrder().AndReturn(10) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + actual_two = mock_obj.Method(2) + actual_one = mock_obj.Method(1) + mock_obj.Close() + + self.assertEquals(9, actual_one) + self.assertEquals(10, actual_two) + + self.mox.VerifyAll() + + def testUnorderedGroupWithComparator(self): + """Unordered groups should work with comparators""" + + def VerifyOne(cmd): + if not isinstance(cmd, str): + self.fail('Unexpected type passed to comparator: ' + str(cmd)) + return cmd == 'test' + + def VerifyTwo(cmd): + return True + + mock_obj = self.mox.CreateMockAnything() + mock_obj.Foo(['test'], mox.Func(VerifyOne), bar=1).InAnyOrder().\ + AndReturn('yes test') + mock_obj.Foo(['test'], mox.Func(VerifyTwo), bar=1).InAnyOrder().\ + AndReturn('anything') + + self.mox.ReplayAll() + + mock_obj.Foo(['test'], 'anything', bar=1) + mock_obj.Foo(['test'], 'test', bar=1) + + self.mox.VerifyAll() + + def testMultipleTimes(self): + """Test if MultipleTimesGroup works.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Method(1).MultipleTimes().AndReturn(9) + mock_obj.Method(2).AndReturn(10) + mock_obj.Method(3).MultipleTimes().AndReturn(42) + self.mox.ReplayAll() + + actual_one = mock_obj.Method(1) + second_one = mock_obj.Method(1) # This tests MultipleTimes. + actual_two = mock_obj.Method(2) + actual_three = mock_obj.Method(3) + mock_obj.Method(3) + mock_obj.Method(3) + + self.mox.VerifyAll() + + self.assertEquals(9, actual_one) + self.assertEquals(9, second_one) # Repeated calls should return same number. + self.assertEquals(10, actual_two) + self.assertEquals(42, actual_three) + + def testMultipleTimesUsingIsAParameter(self): + """Test if MultipleTimesGroup works with a IsA parameter.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(mox.IsA(str)).MultipleTimes("IsA").AndReturn(9) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + actual_one = mock_obj.Method("1") + second_one = mock_obj.Method("2") # This tests MultipleTimes. + mock_obj.Close() + + self.mox.VerifyAll() + + self.assertEquals(9, actual_one) + self.assertEquals(9, second_one) # Repeated calls should return same number. + + def testMutlipleTimesUsingFunc(self): + """Test that the Func is not evaluated more times than necessary. + + If a Func() has side effects, it can cause a passing test to fail. + """ + + self.counter = 0 + def MyFunc(actual_str): + """Increment the counter if actual_str == 'foo'.""" + if actual_str == 'foo': + self.counter += 1 + return True + + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(mox.Func(MyFunc)).MultipleTimes() + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + mock_obj.Method('foo') + mock_obj.Method('foo') + mock_obj.Method('not-foo') + mock_obj.Close() + + self.mox.VerifyAll() + + self.assertEquals(2, self.counter) + + def testMultipleTimesThreeMethods(self): + """Test if MultipleTimesGroup works with three or more methods.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).MultipleTimes().AndReturn(9) + mock_obj.Method(2).MultipleTimes().AndReturn(8) + mock_obj.Method(3).MultipleTimes().AndReturn(7) + mock_obj.Method(4).AndReturn(10) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + actual_three = mock_obj.Method(3) + mock_obj.Method(1) + actual_two = mock_obj.Method(2) + mock_obj.Method(3) + actual_one = mock_obj.Method(1) + actual_four = mock_obj.Method(4) + mock_obj.Close() + + self.assertEquals(9, actual_one) + self.assertEquals(8, actual_two) + self.assertEquals(7, actual_three) + self.assertEquals(10, actual_four) + + self.mox.VerifyAll() + + def testMultipleTimesMissingOne(self): + """Test if MultipleTimesGroup fails if one method is missing.""" + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).MultipleTimes().AndReturn(9) + mock_obj.Method(2).MultipleTimes().AndReturn(8) + mock_obj.Method(3).MultipleTimes().AndReturn(7) + mock_obj.Method(4).AndReturn(10) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + mock_obj.Method(3) + mock_obj.Method(2) + mock_obj.Method(3) + mock_obj.Method(3) + mock_obj.Method(2) + + self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 4) + + def testMultipleTimesTwoGroups(self): + """Test if MultipleTimesGroup works with a group after a + MultipleTimesGroup. + """ + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).MultipleTimes().AndReturn(9) + mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + actual_one = mock_obj.Method(1) + mock_obj.Method(1) + actual_three = mock_obj.Method(3) + mock_obj.Method(3) + mock_obj.Close() + + self.assertEquals(9, actual_one) + self.assertEquals(42, actual_three) + + self.mox.VerifyAll() + + def testMultipleTimesTwoGroupsFailure(self): + """Test if MultipleTimesGroup fails with a group after a + MultipleTimesGroup. + """ + mock_obj = self.mox.CreateMockAnything() + mock_obj.Open() + mock_obj.Method(1).MultipleTimes().AndReturn(9) + mock_obj.Method(3).MultipleTimes("nr2").AndReturn(42) + mock_obj.Close() + self.mox.ReplayAll() + + mock_obj.Open() + actual_one = mock_obj.Method(1) + mock_obj.Method(1) + actual_three = mock_obj.Method(3) + + self.assertRaises(mox.UnexpectedMethodCallError, mock_obj.Method, 1) + + def testWithSideEffects(self): + """Test side effect operations actually modify their target objects.""" + def modifier(mutable_list): + mutable_list[0] = 'mutated' + mock_obj = self.mox.CreateMockAnything() + mock_obj.ConfigureInOutParameter(['original']).WithSideEffects(modifier) + mock_obj.WorkWithParameter(['mutated']) + self.mox.ReplayAll() + + local_list = ['original'] + mock_obj.ConfigureInOutParameter(local_list) + mock_obj.WorkWithParameter(local_list) + + self.mox.VerifyAll() + + def testWithSideEffectsException(self): + """Test side effect operations actually modify their target objects.""" + def modifier(mutable_list): + mutable_list[0] = 'mutated' + mock_obj = self.mox.CreateMockAnything() + method = mock_obj.ConfigureInOutParameter(['original']) + method.WithSideEffects(modifier).AndRaise(Exception('exception')) + mock_obj.WorkWithParameter(['mutated']) + self.mox.ReplayAll() + + local_list = ['original'] + self.failUnlessRaises(Exception, + mock_obj.ConfigureInOutParameter, + local_list) + mock_obj.WorkWithParameter(local_list) + + self.mox.VerifyAll() + + def testStubOutMethod(self): + """Test that a method is replaced with a MockAnything.""" + test_obj = TestClass() + # Replace OtherValidCall with a mock. + self.mox.StubOutWithMock(test_obj, 'OtherValidCall') + self.assert_(isinstance(test_obj.OtherValidCall, mox.MockAnything)) + test_obj.OtherValidCall().AndReturn('foo') + self.mox.ReplayAll() + + actual = test_obj.OtherValidCall() + + self.mox.VerifyAll() + self.mox.UnsetStubs() + self.assertEquals('foo', actual) + self.failIf(isinstance(test_obj.OtherValidCall, mox.MockAnything)) + + def testStubOutClass(self): + """Test a mocked class whose __init__ returns a Mock.""" + self.mox.StubOutWithMock(mox_test_helper, 'TestClassFromAnotherModule') + self.assert_(isinstance(mox_test_helper.TestClassFromAnotherModule, + mox.MockObject)) + + mock_instance = self.mox.CreateMock( + mox_test_helper.TestClassFromAnotherModule) + mox_test_helper.TestClassFromAnotherModule().AndReturn(mock_instance) + mock_instance.Value().AndReturn('mock instance') + + self.mox.ReplayAll() + + a_mock = mox_test_helper.TestClassFromAnotherModule() + actual = a_mock.Value() + + self.mox.VerifyAll() + self.mox.UnsetStubs() + self.assertEquals('mock instance', actual) + + def testWarnsUserIfMockingMock(self): + """Test that user is warned if they try to stub out a MockAnything.""" + self.mox.StubOutWithMock(TestClass, 'MyStaticMethod') + self.assertRaises(TypeError, self.mox.StubOutWithMock, TestClass, + 'MyStaticMethod') + + def testStubOutObject(self): + """Test than object is replaced with a Mock.""" + + class Foo(object): + def __init__(self): + self.obj = TestClass() + + foo = Foo() + self.mox.StubOutWithMock(foo, "obj") + self.assert_(isinstance(foo.obj, mox.MockObject)) + foo.obj.ValidCall() + self.mox.ReplayAll() + + foo.obj.ValidCall() + + self.mox.VerifyAll() + self.mox.UnsetStubs() + self.failIf(isinstance(foo.obj, mox.MockObject)) + + def testForgotReplayHelpfulMessage(self): + """If there is an AttributeError on a MockMethod, give users a helpful msg. + """ + foo = self.mox.CreateMockAnything() + bar = self.mox.CreateMockAnything() + foo.GetBar().AndReturn(bar) + bar.ShowMeTheMoney() + # Forgot to replay! + try: + foo.GetBar().ShowMeTheMoney() + except AttributeError, e: + self.assertEquals('MockMethod has no attribute "ShowMeTheMoney". ' + 'Did you remember to put your mocks in replay mode?', str(e)) + + +class ReplayTest(unittest.TestCase): + """Verify Replay works properly.""" + + def testReplay(self): + """Replay should put objects into replay mode.""" + mock_obj = mox.MockObject(TestClass) + self.assertFalse(mock_obj._replay_mode) + mox.Replay(mock_obj) + self.assertTrue(mock_obj._replay_mode) + + +class MoxTestBaseTest(unittest.TestCase): + """Verify that all tests in a class derived from MoxTestBase are wrapped.""" + + def setUp(self): + self.mox = mox.Mox() + self.test_mox = mox.Mox() + self.test_stubs = mox.stubout.StubOutForTesting() + self.result = unittest.TestResult() + + def tearDown(self): + self.mox.UnsetStubs() + self.test_mox.UnsetStubs() + self.test_stubs.UnsetAll() + self.test_stubs.SmartUnsetAll() + + def _setUpTestClass(self): + """Replacement for setUp in the test class instance. + + Assigns a mox.Mox instance as the mox attribute of the test class instance. + This replacement Mox instance is under our control before setUp is called + in the test class instance. + """ + self.test.mox = self.test_mox + self.test.stubs = self.test_stubs + + def _CreateTest(self, test_name): + """Create a test from our example mox class. + + The created test instance is assigned to this instances test attribute. + """ + self.test = mox_test_helper.ExampleMoxTest(test_name) + self.mox.stubs.Set(self.test, 'setUp', self._setUpTestClass) + + def _VerifySuccess(self): + """Run the checks to confirm test method completed successfully.""" + self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs') + self.mox.StubOutWithMock(self.test_mox, 'VerifyAll') + self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll') + self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll') + self.test_mox.UnsetStubs() + self.test_mox.VerifyAll() + self.test_stubs.UnsetAll() + self.test_stubs.SmartUnsetAll() + self.mox.ReplayAll() + self.test.run(result=self.result) + self.assertTrue(self.result.wasSuccessful()) + self.mox.VerifyAll() + self.mox.UnsetStubs() # Needed to call the real VerifyAll() below. + self.test_mox.VerifyAll() + + def testSuccess(self): + """Successful test method execution test.""" + self._CreateTest('testSuccess') + self._VerifySuccess() + + def testSuccessNoMocks(self): + """Let testSuccess() unset all the mocks, and verify they've been unset.""" + self._CreateTest('testSuccess') + self.test.run(result=self.result) + self.assertTrue(self.result.wasSuccessful()) + self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir) + + def testStubs(self): + """Test that "self.stubs" is provided as is useful.""" + self._CreateTest('testHasStubs') + self._VerifySuccess() + + def testStubsNoMocks(self): + """Let testHasStubs() unset the stubs by itself.""" + self._CreateTest('testHasStubs') + self.test.run(result=self.result) + self.assertTrue(self.result.wasSuccessful()) + self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir) + + def testExpectedNotCalled(self): + """Stubbed out method is not called.""" + self._CreateTest('testExpectedNotCalled') + self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs') + self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll') + self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll') + # Don't stub out VerifyAll - that's what causes the test to fail + self.test_mox.UnsetStubs() + self.test_stubs.UnsetAll() + self.test_stubs.SmartUnsetAll() + self.mox.ReplayAll() + self.test.run(result=self.result) + self.failIf(self.result.wasSuccessful()) + self.mox.VerifyAll() + + def testExpectedNotCalledNoMocks(self): + """Let testExpectedNotCalled() unset all the mocks by itself.""" + self._CreateTest('testExpectedNotCalled') + self.test.run(result=self.result) + self.failIf(self.result.wasSuccessful()) + self.assertEqual(OS_LISTDIR, mox_test_helper.os.listdir) + + def testUnexpectedCall(self): + """Stubbed out method is called with unexpected arguments.""" + self._CreateTest('testUnexpectedCall') + self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs') + self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll') + self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll') + # Ensure no calls are made to VerifyAll() + self.mox.StubOutWithMock(self.test_mox, 'VerifyAll') + self.test_mox.UnsetStubs() + self.test_stubs.UnsetAll() + self.test_stubs.SmartUnsetAll() + self.mox.ReplayAll() + self.test.run(result=self.result) + self.failIf(self.result.wasSuccessful()) + self.mox.VerifyAll() + + def testFailure(self): + """Failing assertion in test method.""" + self._CreateTest('testFailure') + self.mox.StubOutWithMock(self.test_mox, 'UnsetStubs') + self.mox.StubOutWithMock(self.test_stubs, 'UnsetAll') + self.mox.StubOutWithMock(self.test_stubs, 'SmartUnsetAll') + # Ensure no calls are made to VerifyAll() + self.mox.StubOutWithMock(self.test_mox, 'VerifyAll') + self.test_mox.UnsetStubs() + self.test_stubs.UnsetAll() + self.test_stubs.SmartUnsetAll() + self.mox.ReplayAll() + self.test.run(result=self.result) + self.failIf(self.result.wasSuccessful()) + self.mox.VerifyAll() + + def testMixin(self): + """Run test from mix-in test class, ensure it passes.""" + self._CreateTest('testStat') + self._VerifySuccess() + + def testMixinAgain(self): + """Run same test as above but from the current test class. + + This ensures metaclass properly wrapped test methods from all base classes. + If unsetting of stubs doesn't happen, this will fail. + """ + self._CreateTest('testStatOther') + self._VerifySuccess() + + +class VerifyTest(unittest.TestCase): + """Verify Verify works properly.""" + + def testVerify(self): + """Verify should be called for all objects. + + This should throw an exception because the expected behavior did not occur. + """ + mock_obj = mox.MockObject(TestClass) + mock_obj.ValidCall() + mock_obj._Replay() + self.assertRaises(mox.ExpectedMethodCallsError, mox.Verify, mock_obj) + + +class ResetTest(unittest.TestCase): + """Verify Reset works properly.""" + + def testReset(self): + """Should empty all queues and put mocks in record mode.""" + mock_obj = mox.MockObject(TestClass) + mock_obj.ValidCall() + self.assertFalse(mock_obj._replay_mode) + mock_obj._Replay() + self.assertTrue(mock_obj._replay_mode) + self.assertEquals(1, len(mock_obj._expected_calls_queue)) + + mox.Reset(mock_obj) + self.assertFalse(mock_obj._replay_mode) + self.assertEquals(0, len(mock_obj._expected_calls_queue)) + + +class MyTestCase(unittest.TestCase): + """Simulate the use of a fake wrapper around Python's unittest library.""" + + def setUp(self): + super(MyTestCase, self).setUp() + self.critical_variable = 42 + self.another_critical_variable = 42 + + def testMethodOverride(self): + """Should be properly overriden in a derived class.""" + self.assertEquals(42, self.another_critical_variable) + self.another_critical_variable += 1 + + +class MoxTestBaseMultipleInheritanceTest(mox.MoxTestBase, MyTestCase): + """Test that multiple inheritance can be used with MoxTestBase.""" + + def setUp(self): + super(MoxTestBaseMultipleInheritanceTest, self).setUp() + self.another_critical_variable = 99 + + def testMultipleInheritance(self): + """Should be able to access members created by all parent setUp().""" + self.assert_(isinstance(self.mox, mox.Mox)) + self.assertEquals(42, self.critical_variable) + + def testMethodOverride(self): + """Should run before MyTestCase.testMethodOverride.""" + self.assertEquals(99, self.another_critical_variable) + self.another_critical_variable = 42 + super(MoxTestBaseMultipleInheritanceTest, self).testMethodOverride() + self.assertEquals(43, self.another_critical_variable) + + +class TestClass: + """This class is used only for testing the mock framework""" + + SOME_CLASS_VAR = "test_value" + _PROTECTED_CLASS_VAR = "protected value" + + def __init__(self, ivar=None): + self.__ivar = ivar + + def __eq__(self, rhs): + return self.__ivar == rhs + + def __ne__(self, rhs): + return not self.__eq__(rhs) + + def ValidCall(self): + pass + + def OtherValidCall(self): + pass + + def ValidCallWithArgs(self, *args, **kwargs): + pass + + @classmethod + def MyClassMethod(cls): + pass + + @staticmethod + def MyStaticMethod(): + pass + + def _ProtectedCall(self): + pass + + def __PrivateCall(self): + pass + + def __getitem__(self, key): + pass + + def __DoNotMock(self): + pass + + def __getitem__(self, key): + """Return the value for key.""" + return self.d[key] + + def __setitem__(self, key, value): + """Set the value for key to value.""" + self.d[key] = value + + def __contains__(self, key): + """Returns True if d contains the key.""" + return key in self.d + + def __iter__(self): + pass + +class ChildClass(TestClass): + """This inherits from TestClass.""" + def __init__(self): + TestClass.__init__(self) + + def ChildValidCall(self): + pass + + +class CallableClass(object): + """This class is callable, and that should be mockable!""" + + def __init__(self): + pass + + def __call__(self, param): + return param + + +class SubscribtableNonIterableClass(object): + def __getitem__(self, index): + raise IndexError + + +class InheritsFromCallable(CallableClass): + """This class should also be mockable; it inherits from a callable class.""" + + pass + + +if __name__ == '__main__': + unittest.main() diff --git a/vendor/pymox/mox_test_helper.py b/vendor/pymox/mox_test_helper.py new file mode 100755 index 000000000000..b4bfdec6c9f9 --- /dev/null +++ b/vendor/pymox/mox_test_helper.py @@ -0,0 +1,95 @@ +#!/usr/bin/python2.4 +# +# Copyright 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A very basic test class derived from mox.MoxTestBase, used by mox_test.py. + +The class defined in this module is used to test the features of +MoxTestBase and is not intended to be a standalone test. It needs to +be in a separate module, because otherwise the tests in this class +(which should not all pass) would be executed as part of the +mox_test.py test suite. + +See mox_test.MoxTestBaseTest for how this class is actually used. +""" + +import os + +import mox + +class ExampleMoxTestMixin(object): + """Mix-in class for mox test case class. + + It stubs out the same function as one of the test methods in + the example test case. Both tests must pass as meta class wraps + test methods in all base classes. + """ + + def testStat(self): + self.mox.StubOutWithMock(os, 'stat') + os.stat(self.DIR_PATH) + self.mox.ReplayAll() + os.stat(self.DIR_PATH) + + +class ExampleMoxTest(mox.MoxTestBase, ExampleMoxTestMixin): + + DIR_PATH = '/path/to/some/directory' + + def testSuccess(self): + self.mox.StubOutWithMock(os, 'listdir') + os.listdir(self.DIR_PATH) + self.mox.ReplayAll() + os.listdir(self.DIR_PATH) + + def testExpectedNotCalled(self): + self.mox.StubOutWithMock(os, 'listdir') + os.listdir(self.DIR_PATH) + self.mox.ReplayAll() + + def testUnexpectedCall(self): + self.mox.StubOutWithMock(os, 'listdir') + os.listdir(self.DIR_PATH) + self.mox.ReplayAll() + os.listdir('/path/to/some/other/directory') + os.listdir(self.DIR_PATH) + + def testFailure(self): + self.assertTrue(False) + + def testStatOther(self): + self.mox.StubOutWithMock(os, 'stat') + os.stat(self.DIR_PATH) + self.mox.ReplayAll() + os.stat(self.DIR_PATH) + + def testHasStubs(self): + listdir_list = [] + + def MockListdir(directory): + listdir_list.append(directory) + + self.stubs.Set(os, 'listdir', MockListdir) + os.listdir(self.DIR_PATH) + self.assertEqual([self.DIR_PATH], listdir_list) + + +class TestClassFromAnotherModule(object): + + def __init__(): + return None + + def Value(): + return "Not mock" diff --git a/vendor/pymox/setup.py b/vendor/pymox/setup.py new file mode 100755 index 000000000000..0a981ad2d21a --- /dev/null +++ b/vendor/pymox/setup.py @@ -0,0 +1,14 @@ +#!/usr/bin/python2.4 +from distutils.core import setup + +setup(name='mox', + version='0.5.2', + py_modules=['mox', 'stubout'], + url='http://code.google.com/p/pymox/', + maintainer='pymox maintainers', + maintainer_email='mox-discuss@googlegroups.com', + license='Apache License, Version 2.0', + description='Mock object framework', + long_description='''Mox is a mock object framework for Python based on the +Java mock object framework EasyMock.''', + ) diff --git a/vendor/pymox/stubout.py b/vendor/pymox/stubout.py new file mode 100644 index 000000000000..a45224158e13 --- /dev/null +++ b/vendor/pymox/stubout.py @@ -0,0 +1,142 @@ +#!/usr/bin/python2.4 +# +# Copyright 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect + +class StubOutForTesting: + """Sample Usage: + You want os.path.exists() to always return true during testing. + + stubs = StubOutForTesting() + stubs.Set(os.path, 'exists', lambda x: 1) + ... + stubs.UnsetAll() + + The above changes os.path.exists into a lambda that returns 1. Once + the ... part of the code finishes, the UnsetAll() looks up the old value + of os.path.exists and restores it. + + """ + def __init__(self): + self.cache = [] + self.stubs = [] + + def __del__(self): + self.SmartUnsetAll() + self.UnsetAll() + + def SmartSet(self, obj, attr_name, new_attr): + """Replace obj.attr_name with new_attr. This method is smart and works + at the module, class, and instance level while preserving proper + inheritance. It will not stub out C types however unless that has been + explicitly allowed by the type. + + This method supports the case where attr_name is a staticmethod or a + classmethod of obj. + + Notes: + - If obj is an instance, then it is its class that will actually be + stubbed. Note that the method Set() does not do that: if obj is + an instance, it (and not its class) will be stubbed. + - The stubbing is using the builtin getattr and setattr. So, the __get__ + and __set__ will be called when stubbing (TODO: A better idea would + probably be to manipulate obj.__dict__ instead of getattr() and + setattr()). + + Raises AttributeError if the attribute cannot be found. + """ + if (inspect.ismodule(obj) or + (not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))): + orig_obj = obj + orig_attr = getattr(obj, attr_name) + + else: + if not inspect.isclass(obj): + mro = list(inspect.getmro(obj.__class__)) + else: + mro = list(inspect.getmro(obj)) + + mro.reverse() + + orig_attr = None + + for cls in mro: + try: + orig_obj = cls + orig_attr = getattr(obj, attr_name) + except AttributeError: + continue + + if orig_attr is None: + raise AttributeError("Attribute not found.") + + # Calling getattr() on a staticmethod transforms it to a 'normal' function. + # We need to ensure that we put it back as a staticmethod. + old_attribute = obj.__dict__.get(attr_name) + if old_attribute is not None and isinstance(old_attribute, staticmethod): + orig_attr = staticmethod(orig_attr) + + self.stubs.append((orig_obj, attr_name, orig_attr)) + setattr(orig_obj, attr_name, new_attr) + + def SmartUnsetAll(self): + """Reverses all the SmartSet() calls, restoring things to their original + definition. Its okay to call SmartUnsetAll() repeatedly, as later calls + have no effect if no SmartSet() calls have been made. + + """ + self.stubs.reverse() + + for args in self.stubs: + setattr(*args) + + self.stubs = [] + + def Set(self, parent, child_name, new_child): + """Replace child_name's old definition with new_child, in the context + of the given parent. The parent could be a module when the child is a + function at module scope. Or the parent could be a class when a class' + method is being replaced. The named child is set to new_child, while + the prior definition is saved away for later, when UnsetAll() is called. + + This method supports the case where child_name is a staticmethod or a + classmethod of parent. + """ + old_child = getattr(parent, child_name) + + old_attribute = parent.__dict__.get(child_name) + if old_attribute is not None: + if isinstance(old_attribute, staticmethod): + old_child = staticmethod(old_child) + elif isinstance(old_attribute, classmethod): + old_child = classmethod(old_child.im_func) + + self.cache.append((parent, old_child, child_name)) + setattr(parent, child_name, new_child) + + def UnsetAll(self): + """Reverses all the Set() calls, restoring things to their original + definition. Its okay to call UnsetAll() repeatedly, as later calls have + no effect if no Set() calls have been made. + + """ + # Undo calls to Set() in reverse order, in case Set() was called on the + # same arguments repeatedly (want the original call to be last one undone) + self.cache.reverse() + + for (parent, old_child, child_name) in self.cache: + setattr(parent, child_name, old_child) + self.cache = [] diff --git a/vendor/pymox/stubout_test.py b/vendor/pymox/stubout_test.py new file mode 100644 index 000000000000..a062b4840a0d --- /dev/null +++ b/vendor/pymox/stubout_test.py @@ -0,0 +1,47 @@ +#!/usr/bin/python2.4 +# +# Unit tests for stubout. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mox +import stubout +import stubout_testee + + +class StubOutForTestingTest(unittest.TestCase): + def setUp(self): + self.mox = mox.Mox() + self.sample_function_backup = stubout_testee.SampleFunction + + def tearDown(self): + stubout_testee.SampleFunction = self.sample_function_backup + + def testSmartSetOnModule(self): + mock_function = self.mox.CreateMockAnything() + mock_function() + + stubber = stubout.StubOutForTesting() + stubber.SmartSet(stubout_testee, 'SampleFunction', mock_function) + + self.mox.ReplayAll() + + stubout_testee.SampleFunction() + + self.mox.VerifyAll() + + +if __name__ == '__main__': + unittest.main() diff --git a/vendor/pymox/stubout_testee.py b/vendor/pymox/stubout_testee.py new file mode 100644 index 000000000000..9cbdef60e892 --- /dev/null +++ b/vendor/pymox/stubout_testee.py @@ -0,0 +1,2 @@ +def SampleFunction(): + raise Exception('I should never be called!') diff --git a/vendor/python-daemon/ChangeLog b/vendor/python-daemon/ChangeLog new file mode 100644 index 000000000000..d96fad7ca7f6 --- /dev/null +++ b/vendor/python-daemon/ChangeLog @@ -0,0 +1,187 @@ +2010-03-02 Ben Finney + + Version 1.5.5 released. + + * Stop using ‘pkg_resources’ and revert to pre-1.5.3 version-string + handling, until a better way that doesn't break everyone else's + installation can be found. + +2010-02-27 Ben Finney + + Version 1.5.4 released. + + * MANIFEST.in: Explicitly include version data file, otherwise + everything breaks for users of the sdist. + +2010-02-26 Ben Finney + + Version 1.5.3 released. + + * daemon/daemon.py: Invoke the pidfile context manager's ‘__exit__’ + method with the correct arguments (as per + ). + Thanks to Ludvig Ericson for the bug report. + * version: New plain-text data file to store project version string. + * setup.py: Read version string from data file. + * daemon/version/__init__.py: Query version string with ‘pkg_resources’. + +2010-01-20 Ben Finney + + * Add ‘pylint’ configuration for this project. + * Update copyright notices. + +2009-10-24 Ben Finney + + Version 1.5.2 released. + +2009-10-19 Ben Finney + + * Ensure we only prevent core dumps if ‘prevent_core’ is true. + Thanks to Denis Bilenko for reporting the lacking implementation of + this documented option. + +2009-09-28 Ben Finney + + * Add initial Frequently Asked Questions document. + +2009-09-26 Ben Finney + + Version 1.5.1 released. + + * Make a separate collection of DaemonRunner test scenarios. + * Handle a start request with a timeout on the PID file lock acquire. + +2009-09-24 Ben Finney + + * Implement ‘TimeoutPIDLockFile’ to specify a timeout in advance of + lock acquisition. + * Use lock with timeout for ‘DaemonRunner’. + +2009-09-24 Ben Finney + + Version 1.5 released. + + * Make a separate collection of PIDLockFile test scenarios. + +2009-09-23 Ben Finney + + * Raise specific errors on ‘DaemonRunner’ failures. + * Distinguish different conditions on reading and parsing PID file. + * Refactor code to ‘_terminate_daemon_process’ method. + * Improve explanations in comments and docstrings. + * Don't set pidfile at all if no path specified to constructor. + * Write the PID file using correct OS locking and permissions. + * Close the PID file after writing. + * Implement ‘PIDLockFile’ as subclass of ‘lockfile.LinkFileLock’. + * Remove redundant checks for file existence. + +2009-09-18 Ben Finney + + * Manage the excluded file descriptors as a set (not a list). + * Only inspect the file descriptor of streams if they actually have + one (via a ‘fileno’ method) when determining which file descriptors + to close. Thanks to Ask Solem for revealing this bug. + +2009-09-17 Ben Finney + + Version 1.4.8 released. + + * Remove child-exit signal (‘SIGCLD’, ‘SIGCHLD’) from default signal + map. Thanks to Joel Martin for pinpointing this issue. + * Document requirement for ensuring any operating-system specific + signal handlers are considered. + * Refactor ‘fork_then_exit_parent’ functionality to avoid duplicate + code. + * Remove redundant imports. + * Remove unused code from unit test suite scaffold. + * Add specific license terms for unit test suite scaffold. + +2009-09-03 Ben Finney + + Version 1.4.7 released. + +2009-09-02 Ben Finney + + * Fix keywords argument for distribution setup. + * Exclude ‘test’ package from distribution installation. + +2009-06-21 Ben Finney + + Version 1.4.6 released. + + * Update documentation for changes from latest PEP 3143 revision. + * Implement DaemonContext.is_open method. + +2009-05-17 Ben Finney + + Version 1.4.5 released. + + * Register DaemonContext.close method for atexit processing. + * Move PID file cleanup to close method. + * Improve docstrings by reference to, and copy from, PEP 3143. + * Use mock checking capabilities of newer ‘MiniMock’ library. + * Automate building a versioned distribution tarball. + * Include developer documentation files in source distribution. + +2009-03-26 Ben Finney + + Version 1.4.4 released. + + * Conform to current PEP version, now released as PEP 3143 “Standard + daemon process libraryâ€. + * Ensure UID and GID are set in correct order. + * Delay closing all open files until just before re-binding standard + streams. + * Redirect standard streams to null device by default. + +2009-03-19 Ben Finney + + Version 1.4.3 released. + + * Close the PID file context on exit. + +2009-03-18 Ben Finney + + Version 1.4.2 released. + + * Context manager methods for DaemonContext. + +2009-03-18 Ben Finney + + Version 1.4.1 released. + + * Improvements to docstrings. + * Further conformance with draft PEP. + +2009-03-17 Ben Finney + + Version 1.4 released. + + * Implement the interface from a draft PEP for process daemonisation. + * Complete statement coverage from unit test suite. + +2009-03-12 Ben Finney + + Version 1.3 released. + + * Separate controller (now ‘DaemonRunner’) from daemon process + context (now ‘DaemonContext’). + * Fix many corner cases and bugs. + * Huge increase in unit test suite. + +2009-01-27 Ben Finney + + Version 1.2 released. + + * Initial release of this project forked from ‘bda.daemon’. Thanks, + Robert Niederreiter. + * Refactor some functionality out to helper functions. + * Begin unit test suite. + + +Local variables: +mode: change-log +coding: utf-8 +left-margin: 4 +indent-tabs-mode: nil +End: diff --git a/vendor/python-daemon/LICENSE.GPL-2 b/vendor/python-daemon/LICENSE.GPL-2 new file mode 100644 index 000000000000..d511905c1647 --- /dev/null +++ b/vendor/python-daemon/LICENSE.GPL-2 @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/python-daemon/LICENSE.PSF-2 b/vendor/python-daemon/LICENSE.PSF-2 new file mode 100644 index 000000000000..28533b6c53a4 --- /dev/null +++ b/vendor/python-daemon/LICENSE.PSF-2 @@ -0,0 +1,48 @@ +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python +alone or in any derivative version, provided, however, that PSF's +License Agreement and PSF's notice of copyright, i.e., "Copyright (c) +2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative +version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/vendor/python-daemon/MANIFEST.in b/vendor/python-daemon/MANIFEST.in new file mode 100644 index 000000000000..ef71641df826 --- /dev/null +++ b/vendor/python-daemon/MANIFEST.in @@ -0,0 +1,4 @@ +include MANIFEST.in +include LICENSE.* +include ChangeLog +include TODO diff --git a/vendor/python-daemon/PKG-INFO b/vendor/python-daemon/PKG-INFO new file mode 100644 index 000000000000..df8f5531b2cf --- /dev/null +++ b/vendor/python-daemon/PKG-INFO @@ -0,0 +1,37 @@ +Metadata-Version: 1.0 +Name: python-daemon +Version: 1.5.5 +Summary: Library to implement a well-behaved Unix daemon process. +Home-page: http://pypi.python.org/pypi/python-daemon/ +Author: Ben Finney +Author-email: ben+python@benfinney.id.au +License: PSF-2+ +Description: This library implements the well-behaved daemon specification of + :pep:`3143`, "Standard daemon process library". + + A well-behaved Unix daemon process is tricky to get right, but the + required steps are much the same for every daemon program. A + `DaemonContext` instance holds the behaviour and configured + process environment for the program; use the instance as a context + manager to enter a daemon state. + + Simple example of usage:: + + import daemon + + from spam import do_main_program + + with daemon.DaemonContext(): + do_main_program() + + Customisation of the steps to become a daemon is available by + setting options on the `DaemonContext` instance; see the + documentation for that class for each option. +Keywords: daemon,fork,unix +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/vendor/python-daemon/README.nova b/vendor/python-daemon/README.nova new file mode 100644 index 000000000000..57cefd857f07 --- /dev/null +++ b/vendor/python-daemon/README.nova @@ -0,0 +1,4 @@ + + +NOTE(termie): using LinkFileLock in pidlockfile resulted in a bug, + modified the code by replacing it with FileLock diff --git a/vendor/python-daemon/daemon/__init__.py b/vendor/python-daemon/daemon/__init__.py new file mode 100644 index 000000000000..d8dc171a2699 --- /dev/null +++ b/vendor/python-daemon/daemon/__init__.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +# daemon/__init__.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2009–2010 Ben Finney +# Copyright © 2006 Robert Niederreiter +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Library to implement a well-behaved Unix daemon process. + + This library implements the well-behaved daemon specification of + :pep:`3143`, "Standard daemon process library". + + A well-behaved Unix daemon process is tricky to get right, but the + required steps are much the same for every daemon program. A + `DaemonContext` instance holds the behaviour and configured + process environment for the program; use the instance as a context + manager to enter a daemon state. + + Simple example of usage:: + + import daemon + + from spam import do_main_program + + with daemon.DaemonContext(): + do_main_program() + + Customisation of the steps to become a daemon is available by + setting options on the `DaemonContext` instance; see the + documentation for that class for each option. + + """ + +import version +from daemon import DaemonContext + + +_version = version.version +_copyright = version.copyright +_license = version.license +_url = "http://pypi.python.org/pypi/python-daemon/" diff --git a/vendor/python-daemon/daemon/daemon.py b/vendor/python-daemon/daemon/daemon.py new file mode 100644 index 000000000000..28db69574f24 --- /dev/null +++ b/vendor/python-daemon/daemon/daemon.py @@ -0,0 +1,776 @@ +# -*- coding: utf-8 -*- + +# daemon/daemon.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2008–2010 Ben Finney +# Copyright © 2007–2008 Robert Niederreiter, Jens Klein +# Copyright © 2004–2005 Chad J. Schroeder +# Copyright © 2003 Clark Evans +# Copyright © 2002 Noah Spurrier +# Copyright © 2001 Jürgen Hermann +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Daemon process behaviour. + """ + +import os +import sys +import resource +import errno +import signal +import socket +import atexit + + +class DaemonError(Exception): + """ Base exception class for errors from this module. """ + + +class DaemonOSEnvironmentError(DaemonError, OSError): + """ Exception raised when daemon OS environment setup receives error. """ + + +class DaemonProcessDetachError(DaemonError, OSError): + """ Exception raised when process detach fails. """ + + +class DaemonContext(object): + """ Context for turning the current program into a daemon process. + + A `DaemonContext` instance represents the behaviour settings and + process context for the program when it becomes a daemon. The + behaviour and environment is customised by setting options on the + instance, before calling the `open` method. + + Each option can be passed as a keyword argument to the `DaemonContext` + constructor, or subsequently altered by assigning to an attribute on + the instance at any time prior to calling `open`. That is, for + options named `wibble` and `wubble`, the following invocation:: + + foo = daemon.DaemonContext(wibble=bar, wubble=baz) + foo.open() + + is equivalent to:: + + foo = daemon.DaemonContext() + foo.wibble = bar + foo.wubble = baz + foo.open() + + The following options are defined. + + `files_preserve` + :Default: ``None`` + + List of files that should *not* be closed when starting the + daemon. If ``None``, all open file descriptors will be closed. + + Elements of the list are file descriptors (as returned by a file + object's `fileno()` method) or Python `file` objects. Each + specifies a file that is not to be closed during daemon start. + + `chroot_directory` + :Default: ``None`` + + Full path to a directory to set as the effective root directory of + the process. If ``None``, specifies that the root directory is not + to be changed. + + `working_directory` + :Default: ``'/'`` + + Full path of the working directory to which the process should + change on daemon start. + + Since a filesystem cannot be unmounted if a process has its + current working directory on that filesystem, this should either + be left at default or set to a directory that is a sensible “home + directory†for the daemon while it is running. + + `umask` + :Default: ``0`` + + File access creation mask (“umaskâ€) to set for the process on + daemon start. + + Since a process inherits its umask from its parent process, + starting the daemon will reset the umask to this value so that + files are created by the daemon with access modes as it expects. + + `pidfile` + :Default: ``None`` + + Context manager for a PID lock file. When the daemon context opens + and closes, it enters and exits the `pidfile` context manager. + + `detach_process` + :Default: ``None`` + + If ``True``, detach the process context when opening the daemon + context; if ``False``, do not detach. + + If unspecified (``None``) during initialisation of the instance, + this will be set to ``True`` by default, and ``False`` only if + detaching the process is determined to be redundant; for example, + in the case when the process was started by `init`, by `initd`, or + by `inetd`. + + `signal_map` + :Default: system-dependent + + Mapping from operating system signals to callback actions. + + The mapping is used when the daemon context opens, and determines + the action for each signal's signal handler: + + * A value of ``None`` will ignore the signal (by setting the + signal action to ``signal.SIG_IGN``). + + * A string value will be used as the name of an attribute on the + ``DaemonContext`` instance. The attribute's value will be used + as the action for the signal handler. + + * Any other value will be used as the action for the + signal handler. See the ``signal.signal`` documentation + for details of the signal handler interface. + + The default value depends on which signals are defined on the + running system. Each item from the list below whose signal is + actually defined in the ``signal`` module will appear in the + default map: + + * ``signal.SIGTTIN``: ``None`` + + * ``signal.SIGTTOU``: ``None`` + + * ``signal.SIGTSTP``: ``None`` + + * ``signal.SIGTERM``: ``'terminate'`` + + Depending on how the program will interact with its child + processes, it may need to specify a signal map that + includes the ``signal.SIGCHLD`` signal (received when a + child process exits). See the specific operating system's + documentation for more detail on how to determine what + circumstances dictate the need for signal handlers. + + `uid` + :Default: ``os.getuid()`` + + `gid` + :Default: ``os.getgid()`` + + The user ID (“UIDâ€) value and group ID (“GIDâ€) value to switch + the process to on daemon start. + + The default values, the real UID and GID of the process, will + relinquish any effective privilege elevation inherited by the + process. + + `prevent_core` + :Default: ``True`` + + If true, prevents the generation of core files, in order to avoid + leaking sensitive information from daemons run as `root`. + + `stdin` + :Default: ``None`` + + `stdout` + :Default: ``None`` + + `stderr` + :Default: ``None`` + + Each of `stdin`, `stdout`, and `stderr` is a file-like object + which will be used as the new file for the standard I/O stream + `sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file + should therefore be open, with a minimum of mode 'r' in the case + of `stdin`, and mode 'w+' in the case of `stdout` and `stderr`. + + If the object has a `fileno()` method that returns a file + descriptor, the corresponding file will be excluded from being + closed during daemon start (that is, it will be treated as though + it were listed in `files_preserve`). + + If ``None``, the corresponding system stream is re-bound to the + file named by `os.devnull`. + + """ + + def __init__( + self, + chroot_directory=None, + working_directory='/', + umask=0, + uid=None, + gid=None, + prevent_core=True, + detach_process=None, + files_preserve=None, + pidfile=None, + stdin=None, + stdout=None, + stderr=None, + signal_map=None, + ): + """ Set up a new instance. """ + self.chroot_directory = chroot_directory + self.working_directory = working_directory + self.umask = umask + self.prevent_core = prevent_core + self.files_preserve = files_preserve + self.pidfile = pidfile + self.stdin = stdin + self.stdout = stdout + self.stderr = stderr + + if uid is None: + uid = os.getuid() + self.uid = uid + if gid is None: + gid = os.getgid() + self.gid = gid + + if detach_process is None: + detach_process = is_detach_process_context_required() + self.detach_process = detach_process + + if signal_map is None: + signal_map = make_default_signal_map() + self.signal_map = signal_map + + self._is_open = False + + @property + def is_open(self): + """ ``True`` if the instance is currently open. """ + return self._is_open + + def open(self): + """ Become a daemon process. + :Return: ``None`` + + Open the daemon context, turning the current program into a daemon + process. This performs the following steps: + + * If this instance's `is_open` property is true, return + immediately. This makes it safe to call `open` multiple times on + an instance. + + * If the `prevent_core` attribute is true, set the resource limits + for the process to prevent any core dump from the process. + + * If the `chroot_directory` attribute is not ``None``, set the + effective root directory of the process to that directory (via + `os.chroot`). + + This allows running the daemon process inside a “chroot gaol†+ as a means of limiting the system's exposure to rogue behaviour + by the process. Note that the specified directory needs to + already be set up for this purpose. + + * Set the process UID and GID to the `uid` and `gid` attribute + values. + + * Close all open file descriptors. This excludes those listed in + the `files_preserve` attribute, and those that correspond to the + `stdin`, `stdout`, or `stderr` attributes. + + * Change current working directory to the path specified by the + `working_directory` attribute. + + * Reset the file access creation mask to the value specified by + the `umask` attribute. + + * If the `detach_process` option is true, detach the current + process into its own process group, and disassociate from any + controlling terminal. + + * Set signal handlers as specified by the `signal_map` attribute. + + * If any of the attributes `stdin`, `stdout`, `stderr` are not + ``None``, bind the system streams `sys.stdin`, `sys.stdout`, + and/or `sys.stderr` to the files represented by the + corresponding attributes. Where the attribute has a file + descriptor, the descriptor is duplicated (instead of re-binding + the name). + + * If the `pidfile` attribute is not ``None``, enter its context + manager. + + * Mark this instance as open (for the purpose of future `open` and + `close` calls). + + * Register the `close` method to be called during Python's exit + processing. + + When the function returns, the running program is a daemon + process. + + """ + if self.is_open: + return + + if self.chroot_directory is not None: + change_root_directory(self.chroot_directory) + + if self.prevent_core: + prevent_core_dump() + + change_file_creation_mask(self.umask) + change_working_directory(self.working_directory) + change_process_owner(self.uid, self.gid) + + if self.detach_process: + detach_process_context() + + signal_handler_map = self._make_signal_handler_map() + set_signal_handlers(signal_handler_map) + + exclude_fds = self._get_exclude_file_descriptors() + close_all_open_files(exclude=exclude_fds) + + redirect_stream(sys.stdin, self.stdin) + redirect_stream(sys.stdout, self.stdout) + redirect_stream(sys.stderr, self.stderr) + + if self.pidfile is not None: + self.pidfile.__enter__() + + self._is_open = True + + register_atexit_function(self.close) + + def __enter__(self): + """ Context manager entry point. """ + self.open() + return self + + def close(self): + """ Exit the daemon process context. + :Return: ``None`` + + Close the daemon context. This performs the following steps: + + * If this instance's `is_open` property is false, return + immediately. This makes it safe to call `close` multiple times + on an instance. + + * If the `pidfile` attribute is not ``None``, exit its context + manager. + + * Mark this instance as closed (for the purpose of future `open` + and `close` calls). + + """ + if not self.is_open: + return + + if self.pidfile is not None: + # Follow the interface for telling a context manager to exit, + # . + self.pidfile.__exit__(None, None, None) + + self._is_open = False + + def __exit__(self, exc_type, exc_value, traceback): + """ Context manager exit point. """ + self.close() + + def terminate(self, signal_number, stack_frame): + """ Signal handler for end-process signals. + :Return: ``None`` + + Signal handler for the ``signal.SIGTERM`` signal. Performs the + following step: + + * Raise a ``SystemExit`` exception explaining the signal. + + """ + exception = SystemExit( + "Terminating on signal %(signal_number)r" + % vars()) + raise exception + + def _get_exclude_file_descriptors(self): + """ Return the set of file descriptors to exclude closing. + + Returns a set containing the file descriptors for the + items in `files_preserve`, and also each of `stdin`, + `stdout`, and `stderr`: + + * If the item is ``None``, it is omitted from the return + set. + + * If the item has a ``fileno()`` method, that method's + return value is in the return set. + + * Otherwise, the item is in the return set verbatim. + + """ + files_preserve = self.files_preserve + if files_preserve is None: + files_preserve = [] + files_preserve.extend( + item for item in [self.stdin, self.stdout, self.stderr] + if hasattr(item, 'fileno')) + exclude_descriptors = set() + for item in files_preserve: + if item is None: + continue + if hasattr(item, 'fileno'): + exclude_descriptors.add(item.fileno()) + else: + exclude_descriptors.add(item) + return exclude_descriptors + + def _make_signal_handler(self, target): + """ Make the signal handler for a specified target object. + + If `target` is ``None``, returns ``signal.SIG_IGN``. If + `target` is a string, returns the attribute of this + instance named by that string. Otherwise, returns `target` + itself. + + """ + if target is None: + result = signal.SIG_IGN + elif isinstance(target, basestring): + name = target + result = getattr(self, name) + else: + result = target + + return result + + def _make_signal_handler_map(self): + """ Make the map from signals to handlers for this instance. + + Constructs a map from signal numbers to handlers for this + context instance, suitable for passing to + `set_signal_handlers`. + + """ + signal_handler_map = dict( + (signal_number, self._make_signal_handler(target)) + for (signal_number, target) in self.signal_map.items()) + return signal_handler_map + + +def change_working_directory(directory): + """ Change the working directory of this process. + """ + try: + os.chdir(directory) + except Exception, exc: + error = DaemonOSEnvironmentError( + "Unable to change working directory (%(exc)s)" + % vars()) + raise error + + +def change_root_directory(directory): + """ Change the root directory of this process. + + Sets the current working directory, then the process root + directory, to the specified `directory`. Requires appropriate + OS privileges for this process. + + """ + try: + os.chdir(directory) + os.chroot(directory) + except Exception, exc: + error = DaemonOSEnvironmentError( + "Unable to change root directory (%(exc)s)" + % vars()) + raise error + + +def change_file_creation_mask(mask): + """ Change the file creation mask for this process. + """ + try: + os.umask(mask) + except Exception, exc: + error = DaemonOSEnvironmentError( + "Unable to change file creation mask (%(exc)s)" + % vars()) + raise error + + +def change_process_owner(uid, gid): + """ Change the owning UID and GID of this process. + + Sets the GID then the UID of the process (in that order, to + avoid permission errors) to the specified `gid` and `uid` + values. Requires appropriate OS privileges for this process. + + """ + try: + os.setgid(gid) + os.setuid(uid) + except Exception, exc: + error = DaemonOSEnvironmentError( + "Unable to change file creation mask (%(exc)s)" + % vars()) + raise error + + +def prevent_core_dump(): + """ Prevent this process from generating a core dump. + + Sets the soft and hard limits for core dump size to zero. On + Unix, this prevents the process from creating core dump + altogether. + + """ + core_resource = resource.RLIMIT_CORE + + try: + # Ensure the resource limit exists on this platform, by requesting + # its current value + core_limit_prev = resource.getrlimit(core_resource) + except ValueError, exc: + error = DaemonOSEnvironmentError( + "System does not support RLIMIT_CORE resource limit (%(exc)s)" + % vars()) + raise error + + # Set hard and soft limits to zero, i.e. no core dump at all + core_limit = (0, 0) + resource.setrlimit(core_resource, core_limit) + + +def detach_process_context(): + """ Detach the process context from parent and session. + + Detach from the parent process and session group, allowing the + parent to exit while this process continues running. + + Reference: “Advanced Programming in the Unix Environmentâ€, + section 13.3, by W. Richard Stevens, published 1993 by + Addison-Wesley. + + """ + + def fork_then_exit_parent(error_message): + """ Fork a child process, then exit the parent process. + + If the fork fails, raise a ``DaemonProcessDetachError`` + with ``error_message``. + + """ + try: + pid = os.fork() + if pid > 0: + os._exit(0) + except OSError, exc: + exc_errno = exc.errno + exc_strerror = exc.strerror + error = DaemonProcessDetachError( + "%(error_message)s: [%(exc_errno)d] %(exc_strerror)s" % vars()) + raise error + + fork_then_exit_parent(error_message="Failed first fork") + os.setsid() + fork_then_exit_parent(error_message="Failed second fork") + + +def is_process_started_by_init(): + """ Determine if the current process is started by `init`. + + The `init` process has the process ID of 1; if that is our + parent process ID, return ``True``, otherwise ``False``. + + """ + result = False + + init_pid = 1 + if os.getppid() == init_pid: + result = True + + return result + + +def is_socket(fd): + """ Determine if the file descriptor is a socket. + + Return ``False`` if querying the socket type of `fd` raises an + error; otherwise return ``True``. + + """ + result = False + + file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) + + try: + socket_type = file_socket.getsockopt( + socket.SOL_SOCKET, socket.SO_TYPE) + except socket.error, exc: + exc_errno = exc.args[0] + if exc_errno == errno.ENOTSOCK: + # Socket operation on non-socket + pass + else: + # Some other socket error + result = True + else: + # No error getting socket type + result = True + + return result + + +def is_process_started_by_superserver(): + """ Determine if the current process is started by the superserver. + + The internet superserver creates a network socket, and + attaches it to the standard streams of the child process. If + that is the case for this process, return ``True``, otherwise + ``False``. + + """ + result = False + + stdin_fd = sys.__stdin__.fileno() + if is_socket(stdin_fd): + result = True + + return result + + +def is_detach_process_context_required(): + """ Determine whether detaching process context is required. + + Return ``True`` if the process environment indicates the + process is already detached: + + * Process was started by `init`; or + + * Process was started by `inetd`. + + """ + result = True + if is_process_started_by_init() or is_process_started_by_superserver(): + result = False + + return result + + +def close_file_descriptor_if_open(fd): + """ Close a file descriptor if already open. + + Close the file descriptor `fd`, suppressing an error in the + case the file was not open. + + """ + try: + os.close(fd) + except OSError, exc: + if exc.errno == errno.EBADF: + # File descriptor was not open + pass + else: + error = DaemonOSEnvironmentError( + "Failed to close file descriptor %(fd)d" + " (%(exc)s)" + % vars()) + raise error + + +MAXFD = 2048 + +def get_maximum_file_descriptors(): + """ Return the maximum number of open file descriptors for this process. + + Return the process hard resource limit of maximum number of + open file descriptors. If the limit is “infinityâ€, a default + value of ``MAXFD`` is returned. + + """ + limits = resource.getrlimit(resource.RLIMIT_NOFILE) + result = limits[1] + if result == resource.RLIM_INFINITY: + result = MAXFD + return result + + +def close_all_open_files(exclude=set()): + """ Close all open file descriptors. + + Closes every file descriptor (if open) of this process. If + specified, `exclude` is a set of file descriptors to *not* + close. + + """ + maxfd = get_maximum_file_descriptors() + for fd in reversed(range(maxfd)): + if fd not in exclude: + close_file_descriptor_if_open(fd) + + +def redirect_stream(system_stream, target_stream): + """ Redirect a system stream to a specified file. + + `system_stream` is a standard system stream such as + ``sys.stdout``. `target_stream` is an open file object that + should replace the corresponding system stream object. + + If `target_stream` is ``None``, defaults to opening the + operating system's null device and using its file descriptor. + + """ + if target_stream is None: + target_fd = os.open(os.devnull, os.O_RDWR) + else: + target_fd = target_stream.fileno() + os.dup2(target_fd, system_stream.fileno()) + + +def make_default_signal_map(): + """ Make the default signal map for this system. + + The signals available differ by system. The map will not + contain any signals not defined on the running system. + + """ + name_map = { + 'SIGTSTP': None, + 'SIGTTIN': None, + 'SIGTTOU': None, + 'SIGTERM': 'terminate', + } + signal_map = dict( + (getattr(signal, name), target) + for (name, target) in name_map.items() + if hasattr(signal, name)) + + return signal_map + + +def set_signal_handlers(signal_handler_map): + """ Set the signal handlers as specified. + + The `signal_handler_map` argument is a map from signal number + to signal handler. See the `signal` module for details. + + """ + for (signal_number, handler) in signal_handler_map.items(): + signal.signal(signal_number, handler) + + +def register_atexit_function(func): + """ Register a function for processing at program exit. + + The function `func` is registered for a call with no arguments + at program exit. + + """ + atexit.register(func) diff --git a/vendor/python-daemon/daemon/pidlockfile.py b/vendor/python-daemon/daemon/pidlockfile.py new file mode 100644 index 000000000000..2eb334abb22b --- /dev/null +++ b/vendor/python-daemon/daemon/pidlockfile.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- + +# daemon/pidlockfile.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2008–2010 Ben Finney +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + + +""" Lockfile behaviour implemented via Unix PID files. + """ + +import os +import errno + +from lockfile import ( + FileLock, + AlreadyLocked, LockFailed, + NotLocked, NotMyLock, + ) + + +class PIDFileError(Exception): + """ Abstract base class for errors specific to PID files. """ + +class PIDFileParseError(ValueError, PIDFileError): + """ Raised when parsing contents of PID file fails. """ + + +class PIDLockFile(FileLock, object): + """ Lockfile implemented as a Unix PID file. + + The PID file is named by the attribute `path`. When locked, + the file will be created with a single line of text, + containing the process ID (PID) of the process that acquired + the lock. + + The lock is acquired and maintained as per `LinkFileLock`. + + """ + + def read_pid(self): + """ Get the PID from the lock file. + """ + result = read_pid_from_pidfile(self.path) + return result + + def acquire(self, *args, **kwargs): + """ Acquire the lock. + + Locks the PID file then creates the PID file for this + lock. The `timeout` parameter is used as for the + `LinkFileLock` class. + + """ + super(PIDLockFile, self).acquire(*args, **kwargs) + try: + write_pid_to_pidfile(self.path) + except OSError, exc: + error = LockFailed("%(exc)s" % vars()) + raise error + + def release(self): + """ Release the lock. + + Removes the PID file then releases the lock, or raises an + error if the current process does not hold the lock. + + """ + if self.i_am_locking(): + remove_existing_pidfile(self.path) + super(PIDLockFile, self).release() + + def break_lock(self): + """ Break an existing lock. + + If the lock is held, breaks the lock and removes the PID + file. + + """ + super(PIDLockFile, self).break_lock() + remove_existing_pidfile(self.path) + + +class TimeoutPIDLockFile(PIDLockFile): + """ Lockfile with default timeout, implemented as a Unix PID file. + + This uses the ``PIDLockFile`` implementation, with the + following changes: + + * The `acquire_timeout` parameter to the initialiser will be + used as the default `timeout` parameter for the `acquire` + method. + + """ + + def __init__(self, path, acquire_timeout=None, *args, **kwargs): + """ Set up the parameters of a DaemonRunnerLock. """ + self.acquire_timeout = acquire_timeout + super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs) + + def acquire(self, timeout=None, *args, **kwargs): + """ Acquire the lock. """ + if timeout is None: + timeout = self.acquire_timeout + super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs) + + +def read_pid_from_pidfile(pidfile_path): + """ Read the PID recorded in the named PID file. + + Read and return the numeric PID recorded as text in the named + PID file. If the PID file does not exist, return ``None``. If + the content is not a valid PID, raise ``PIDFileParseError``. + + """ + pid = None + pidfile = None + try: + pidfile = open(pidfile_path, 'r') + except IOError, exc: + if exc.errno == errno.ENOENT: + pass + else: + raise + + if pidfile: + # According to the FHS 2.3 section on PID files in ‘/var/run’: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. … + # + # Programs that read PID files should be somewhat flexible + # in what they accept; i.e., they should ignore extra + # whitespace, leading zeroes, absence of the trailing + # newline, or additional lines in the PID file. + + line = pidfile.readline().strip() + try: + pid = int(line) + except ValueError: + raise PIDFileParseError( + "PID file %(pidfile_path)r contents invalid" % vars()) + pidfile.close() + + return pid + + +def write_pid_to_pidfile(pidfile_path): + """ Write the PID in the named PID file. + + Get the numeric process ID (“PIDâ€) of the current process + and write it to the named file as a line of text. + + """ + open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY) + open_mode = ( + ((os.R_OK | os.W_OK) << 6) | + ((os.R_OK) << 3) | + ((os.R_OK))) + pidfile_fd = os.open(pidfile_path, open_flags, open_mode) + pidfile = os.fdopen(pidfile_fd, 'w') + + # According to the FHS 2.3 section on PID files in ‘/var/run’: + # + # The file must consist of the process identifier in + # ASCII-encoded decimal, followed by a newline character. For + # example, if crond was process number 25, /var/run/crond.pid + # would contain three characters: two, five, and newline. + + pid = os.getpid() + line = "%(pid)d\n" % vars() + pidfile.write(line) + pidfile.close() + + +def remove_existing_pidfile(pidfile_path): + """ Remove the named PID file if it exists. + + Remove the named PID file. Ignore the condition if the file + does not exist, since that only means we are already in the + desired state. + + """ + try: + os.remove(pidfile_path) + except OSError, exc: + if exc.errno == errno.ENOENT: + pass + else: + raise diff --git a/vendor/python-daemon/daemon/runner.py b/vendor/python-daemon/daemon/runner.py new file mode 100644 index 000000000000..0642695b00b0 --- /dev/null +++ b/vendor/python-daemon/daemon/runner.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- + +# daemon/runner.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2009–2010 Ben Finney +# Copyright © 2007–2008 Robert Niederreiter, Jens Klein +# Copyright © 2003 Clark Evans +# Copyright © 2002 Noah Spurrier +# Copyright © 2001 Jürgen Hermann +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Daemon runner library. + """ + +import sys +import os +import signal +import errno + +import pidlockfile + +from daemon import DaemonContext + + +class DaemonRunnerError(Exception): + """ Abstract base class for errors from DaemonRunner. """ + +class DaemonRunnerInvalidActionError(ValueError, DaemonRunnerError): + """ Raised when specified action for DaemonRunner is invalid. """ + +class DaemonRunnerStartFailureError(RuntimeError, DaemonRunnerError): + """ Raised when failure starting DaemonRunner. """ + +class DaemonRunnerStopFailureError(RuntimeError, DaemonRunnerError): + """ Raised when failure stopping DaemonRunner. """ + + +class DaemonRunner(object): + """ Controller for a callable running in a separate background process. + + The first command-line argument is the action to take: + + * 'start': Become a daemon and call `app.run()`. + * 'stop': Exit the daemon process specified in the PID file. + * 'restart': Stop, then start. + + """ + + start_message = "started with pid %(pid)d" + + def __init__(self, app): + """ Set up the parameters of a new runner. + + The `app` argument must have the following attributes: + + * `stdin_path`, `stdout_path`, `stderr_path`: Filesystem + paths to open and replace the existing `sys.stdin`, + `sys.stdout`, `sys.stderr`. + + * `pidfile_path`: Absolute filesystem path to a file that + will be used as the PID file for the daemon. If + ``None``, no PID file will be used. + + * `pidfile_timeout`: Used as the default acquisition + timeout value supplied to the runner's PID lock file. + + * `run`: Callable that will be invoked when the daemon is + started. + + """ + self.parse_args() + self.app = app + self.daemon_context = DaemonContext() + self.daemon_context.stdin = open(app.stdin_path, 'r') + self.daemon_context.stdout = open(app.stdout_path, 'w+') + self.daemon_context.stderr = open( + app.stderr_path, 'w+', buffering=0) + + self.pidfile = None + if app.pidfile_path is not None: + self.pidfile = make_pidlockfile( + app.pidfile_path, app.pidfile_timeout) + self.daemon_context.pidfile = self.pidfile + + def _usage_exit(self, argv): + """ Emit a usage message, then exit. + """ + progname = os.path.basename(argv[0]) + usage_exit_code = 2 + action_usage = "|".join(self.action_funcs.keys()) + message = "usage: %(progname)s %(action_usage)s" % vars() + emit_message(message) + sys.exit(usage_exit_code) + + def parse_args(self, argv=None): + """ Parse command-line arguments. + """ + if argv is None: + argv = sys.argv + + min_args = 2 + if len(argv) < min_args: + self._usage_exit(argv) + + self.action = argv[1] + if self.action not in self.action_funcs: + self._usage_exit(argv) + + def _start(self): + """ Open the daemon context and run the application. + """ + if is_pidfile_stale(self.pidfile): + self.pidfile.break_lock() + + try: + self.daemon_context.open() + except pidlockfile.AlreadyLocked: + pidfile_path = self.pidfile.path + raise DaemonRunnerStartFailureError( + "PID file %(pidfile_path)r already locked" % vars()) + + pid = os.getpid() + message = self.start_message % vars() + emit_message(message) + + self.app.run() + + def _terminate_daemon_process(self): + """ Terminate the daemon process specified in the current PID file. + """ + pid = self.pidfile.read_pid() + try: + os.kill(pid, signal.SIGTERM) + except OSError, exc: + raise DaemonRunnerStopFailureError( + "Failed to terminate %(pid)d: %(exc)s" % vars()) + + def _stop(self): + """ Exit the daemon process specified in the current PID file. + """ + if not self.pidfile.is_locked(): + pidfile_path = self.pidfile.path + raise DaemonRunnerStopFailureError( + "PID file %(pidfile_path)r not locked" % vars()) + + if is_pidfile_stale(self.pidfile): + self.pidfile.break_lock() + else: + self._terminate_daemon_process() + + def _restart(self): + """ Stop, then start. + """ + self._stop() + self._start() + + action_funcs = { + 'start': _start, + 'stop': _stop, + 'restart': _restart, + } + + def _get_action_func(self): + """ Return the function for the specified action. + + Raises ``DaemonRunnerInvalidActionError`` if the action is + unknown. + + """ + try: + func = self.action_funcs[self.action] + except KeyError: + raise DaemonRunnerInvalidActionError( + "Unknown action: %(action)r" % vars(self)) + return func + + def do_action(self): + """ Perform the requested action. + """ + func = self._get_action_func() + func(self) + + +def emit_message(message, stream=None): + """ Emit a message to the specified stream (default `sys.stderr`). """ + if stream is None: + stream = sys.stderr + stream.write("%(message)s\n" % vars()) + stream.flush() + + +def make_pidlockfile(path, acquire_timeout): + """ Make a PIDLockFile instance with the given filesystem path. """ + if not isinstance(path, basestring): + error = ValueError("Not a filesystem path: %(path)r" % vars()) + raise error + if not os.path.isabs(path): + error = ValueError("Not an absolute path: %(path)r" % vars()) + raise error + lockfile = pidlockfile.TimeoutPIDLockFile(path, acquire_timeout) + + return lockfile + + +def is_pidfile_stale(pidfile): + """ Determine whether a PID file is stale. + + Return ``True`` (“staleâ€) if the contents of the PID file are + valid but do not match the PID of a currently-running process; + otherwise return ``False``. + + """ + result = False + + pidfile_pid = pidfile.read_pid() + if pidfile_pid is not None: + try: + os.kill(pidfile_pid, signal.SIG_DFL) + except OSError, exc: + if exc.errno == errno.ESRCH: + # The specified PID does not exist + result = True + + return result diff --git a/vendor/python-daemon/daemon/version/__init__.py b/vendor/python-daemon/daemon/version/__init__.py new file mode 100644 index 000000000000..d2eafa6a99eb --- /dev/null +++ b/vendor/python-daemon/daemon/version/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# daemon/version/__init__.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2008–2010 Ben Finney +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Version information for the python-daemon distribution. """ + +from version_info import version_info + +version_info['version_string'] = u"1.5.5" + +version_short = u"%(version_string)s" % version_info +version_full = u"%(version_string)s.r%(revno)s" % version_info +version = version_short + +author_name = u"Ben Finney" +author_email = u"ben+python@benfinney.id.au" +author = u"%(author_name)s <%(author_email)s>" % vars() + +copyright_year_begin = u"2001" +date = version_info['date'].split(' ', 1)[0] +copyright_year = date.split('-')[0] +copyright_year_range = copyright_year_begin +if copyright_year > copyright_year_begin: + copyright_year_range += u"–%(copyright_year)s" % vars() + +copyright = ( + u"Copyright © %(copyright_year_range)s %(author)s and others" + ) % vars() +license = u"PSF-2+" diff --git a/vendor/python-daemon/daemon/version/version_info.py b/vendor/python-daemon/daemon/version/version_info.py new file mode 100644 index 000000000000..cdbf280a8388 --- /dev/null +++ b/vendor/python-daemon/daemon/version/version_info.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +"""This file is automatically generated by generate_version_info +It uses the current working tree to determine the revision. +So don't edit it. :) +""" + +version_info = {'branch_nick': u'python-daemon.devel', + 'build_date': '2009-05-22 19:50:06 +1000', + 'clean': None, + 'date': '2009-05-22 19:47:30 +1000', + 'revision_id': 'ben+python@benfinney.id.au-20090522094730-p4vsa0reh7ktt4e1', + 'revno': 145} + +revisions = {} + +file_revisions = {} + + + +if __name__ == '__main__': + print 'revision: %(revno)d' % version_info + print 'nick: %(branch_nick)s' % version_info + print 'revision id: %(revision_id)s' % version_info diff --git a/vendor/python-daemon/python_daemon.egg-info/PKG-INFO b/vendor/python-daemon/python_daemon.egg-info/PKG-INFO new file mode 100644 index 000000000000..df8f5531b2cf --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/PKG-INFO @@ -0,0 +1,37 @@ +Metadata-Version: 1.0 +Name: python-daemon +Version: 1.5.5 +Summary: Library to implement a well-behaved Unix daemon process. +Home-page: http://pypi.python.org/pypi/python-daemon/ +Author: Ben Finney +Author-email: ben+python@benfinney.id.au +License: PSF-2+ +Description: This library implements the well-behaved daemon specification of + :pep:`3143`, "Standard daemon process library". + + A well-behaved Unix daemon process is tricky to get right, but the + required steps are much the same for every daemon program. A + `DaemonContext` instance holds the behaviour and configured + process environment for the program; use the instance as a context + manager to enter a daemon state. + + Simple example of usage:: + + import daemon + + from spam import do_main_program + + with daemon.DaemonContext(): + do_main_program() + + Customisation of the steps to become a daemon is available by + setting options on the `DaemonContext` instance; see the + documentation for that class for each option. +Keywords: daemon,fork,unix +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/vendor/python-daemon/python_daemon.egg-info/SOURCES.txt b/vendor/python-daemon/python_daemon.egg-info/SOURCES.txt new file mode 100644 index 000000000000..ab2b52368eaf --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/SOURCES.txt @@ -0,0 +1,22 @@ +ChangeLog +LICENSE.GPL-2 +LICENSE.PSF-2 +MANIFEST.in +setup.py +daemon/__init__.py +daemon/daemon.py +daemon/pidlockfile.py +daemon/runner.py +daemon/version/__init__.py +daemon/version/version_info.py +python_daemon.egg-info/PKG-INFO +python_daemon.egg-info/SOURCES.txt +python_daemon.egg-info/dependency_links.txt +python_daemon.egg-info/not-zip-safe +python_daemon.egg-info/requires.txt +python_daemon.egg-info/top_level.txt +test/__init__.py +test/scaffold.py +test/test_daemon.py +test/test_pidlockfile.py +test/test_runner.py \ No newline at end of file diff --git a/vendor/python-daemon/python_daemon.egg-info/dependency_links.txt b/vendor/python-daemon/python_daemon.egg-info/dependency_links.txt new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/vendor/python-daemon/python_daemon.egg-info/not-zip-safe b/vendor/python-daemon/python_daemon.egg-info/not-zip-safe new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/vendor/python-daemon/python_daemon.egg-info/requires.txt b/vendor/python-daemon/python_daemon.egg-info/requires.txt new file mode 100644 index 000000000000..1c7ae2166183 --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/requires.txt @@ -0,0 +1,2 @@ +setuptools +lockfile >=0.7 \ No newline at end of file diff --git a/vendor/python-daemon/python_daemon.egg-info/top_level.txt b/vendor/python-daemon/python_daemon.egg-info/top_level.txt new file mode 100644 index 000000000000..28e3ee0c0b11 --- /dev/null +++ b/vendor/python-daemon/python_daemon.egg-info/top_level.txt @@ -0,0 +1 @@ +daemon diff --git a/vendor/python-daemon/setup.cfg b/vendor/python-daemon/setup.cfg new file mode 100644 index 000000000000..861a9f554263 --- /dev/null +++ b/vendor/python-daemon/setup.cfg @@ -0,0 +1,5 @@ +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/vendor/python-daemon/setup.py b/vendor/python-daemon/setup.py new file mode 100644 index 000000000000..8570c8ae2af6 --- /dev/null +++ b/vendor/python-daemon/setup.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +# setup.py +# Part of python-daemon, an implementation of PEP 3143. +# +# Copyright © 2008–2010 Ben Finney +# Copyright © 2008 Robert Niederreiter, Jens Klein +# +# This is free software: you may copy, modify, and/or distribute this work +# under the terms of the Python Software Foundation License, version 2 or +# later as published by the Python Software Foundation. +# No warranty expressed or implied. See the file LICENSE.PSF-2 for details. + +""" Distribution setup for python-daemon library. + """ + +import textwrap +from setuptools import setup, find_packages + +distribution_name = "python-daemon" +main_module_name = 'daemon' +main_module = __import__(main_module_name, fromlist=['version']) +version = main_module.version + +short_description, long_description = ( + textwrap.dedent(d).strip() + for d in main_module.__doc__.split(u'\n\n', 1) + ) + + +setup( + name=distribution_name, + version=version.version, + packages=find_packages(exclude=["test"]), + + # setuptools metadata + zip_safe=False, + test_suite="test.suite", + tests_require=[ + "MiniMock >=1.2.2", + ], + install_requires=[ + "setuptools", + "lockfile >=0.7", + ], + + # PyPI metadata + author=version.author_name, + author_email=version.author_email, + description=short_description, + license=version.license, + keywords=u"daemon fork unix".split(), + url=main_module._url, + long_description=long_description, + classifiers=[ + # Reference: http://pypi.python.org/pypi?%3Aaction=list_classifiers + "Development Status :: 4 - Beta", + "License :: OSI Approved :: Python Software Foundation License", + "Operating System :: POSIX", + "Programming Language :: Python", + "Intended Audience :: Developers", + "Topic :: Software Development :: Libraries :: Python Modules", + ], + ) diff --git a/vendor/python-gflags/AUTHORS b/vendor/python-gflags/AUTHORS new file mode 100644 index 000000000000..ee92be88dcf1 --- /dev/null +++ b/vendor/python-gflags/AUTHORS @@ -0,0 +1,2 @@ +opensource@google.com + diff --git a/vendor/python-gflags/COPYING b/vendor/python-gflags/COPYING new file mode 100644 index 000000000000..d15b0c24134d --- /dev/null +++ b/vendor/python-gflags/COPYING @@ -0,0 +1,28 @@ +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/python-gflags/ChangeLog b/vendor/python-gflags/ChangeLog new file mode 100644 index 000000000000..16a5eca0d6ec --- /dev/null +++ b/vendor/python-gflags/ChangeLog @@ -0,0 +1,5 @@ +Mon Jan 4 18:46:29 2010 Tim 'mithro' Ansell + + * python-gflags: version 1.3 + * Fork from the C++ package (google-gflags 1.3) + * Add debian packaging diff --git a/vendor/python-gflags/README b/vendor/python-gflags/README new file mode 100644 index 000000000000..81daa7ab49aa --- /dev/null +++ b/vendor/python-gflags/README @@ -0,0 +1,23 @@ +This repository contains a python implementation of the Google commandline +flags module. + + GFlags defines a *distributed* command line system, replacing systems like + getopt(), optparse and manual argument processing. Rather than an application + having to define all flags in or near main(), each python module defines flags + that are useful to it. When one python module imports another, it gains + access to the other's flags. + + It includes the ability to define flag types (boolean, float, interger, list), + autogeneration of help (in both human and machine readable format) and reading + arguments from a file. It also includes the ability to automatically generate + man pages from the help flags. + +Documentation for implementation is at the top of gflags.py file. + +To install the python module, run + python ./setup.py install + +When you install this library, you also get a helper application, +gflags2man.py, installed into /usr/local/bin. You can run gflags2man.py to +create an instant man page, with all the commandline flags and their docs, for +any C++ or python program you've written using the gflags library. diff --git a/vendor/python-gflags/debian/README b/vendor/python-gflags/debian/README new file mode 100644 index 000000000000..57becfda757e --- /dev/null +++ b/vendor/python-gflags/debian/README @@ -0,0 +1,7 @@ +The list of files here isn't complete. For a step-by-step guide on +how to set this package up correctly, check out + http://www.debian.org/doc/maint-guide/ + +Most of the files that are in this directory are boilerplate. +However, you may need to change the list of binary-arch dependencies +in 'rules'. diff --git a/vendor/python-gflags/debian/changelog b/vendor/python-gflags/debian/changelog new file mode 100644 index 000000000000..6f9aa6c991b2 --- /dev/null +++ b/vendor/python-gflags/debian/changelog @@ -0,0 +1,11 @@ +python-gflags (1.3-2) unstable; urgency=low + + * Fixed man-page generation. + + -- Tim 'mithro' Ansell Mon, 07 Jan 2010 13:46:10 +1100 +python-gflags (1.3-1) unstable; urgency=low + + * Initial release. + * Packaging based on gflags 1.3 + + -- Tim 'mithro' Ansell Mon, 04 Jan 2010 18:46:10 -0800 diff --git a/vendor/python-gflags/debian/compat b/vendor/python-gflags/debian/compat new file mode 100644 index 000000000000..7ed6ff82de6b --- /dev/null +++ b/vendor/python-gflags/debian/compat @@ -0,0 +1 @@ +5 diff --git a/vendor/python-gflags/debian/control b/vendor/python-gflags/debian/control new file mode 100644 index 000000000000..fb502d3bba3b --- /dev/null +++ b/vendor/python-gflags/debian/control @@ -0,0 +1,26 @@ +Source: python-gflags +Section: python +XS-Python-Version: all +Priority: optional +Maintainer: Tim 'mithro' Ansell +Build-Depends-Indep: python-central (>= 0.5.6), python-setuptools (>= 0.6b3-1), python-all +Build-Depends: debhelper (>= 5.0.38) +Standards-Version: 3.7.2 + +Package: python-gflags +Architecture: all +Depends: ${python:Depends} +XB-Python-Version: ${python:Versions} +Description: A Python implementation of the Google commandline flags module + . + GFlags defines a *distributed* command line system, replacing systems like + getopt(), optparse and manual argument processing. Rather than an application + having to define all flags in or near main(), each Python module defines flags + that are useful to it. When one Python module imports another, it gains + access to the other's flags. + . + It includes the ability to define flag types (boolean, float, interger, list), + autogeneration of help (in both human and machine readable format) and reading + arguments from a file. It also includes the ability to automatically generate + man pages from the help flags. + diff --git a/vendor/python-gflags/debian/copyright b/vendor/python-gflags/debian/copyright new file mode 100644 index 000000000000..5100c02e6d8e --- /dev/null +++ b/vendor/python-gflags/debian/copyright @@ -0,0 +1,41 @@ +This package was debianized by Tim 'mithro' Ansell on +Thu, 12 Nov 2009 11:25:53 +1100. + +It was downloaded from http://code.google.com/p/google-gflags/downloads/list + +Upstream Author: Google Inc. +Copyright: Google Inc. + +License: + +Copyright (c) 2006, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The Debian packaging is (C) 2009, Tim 'mithro' Ansell and +is licensed under the above. diff --git a/vendor/python-gflags/debian/docs b/vendor/python-gflags/debian/docs new file mode 100644 index 000000000000..6f12db50845a --- /dev/null +++ b/vendor/python-gflags/debian/docs @@ -0,0 +1,2 @@ +AUTHORS +README diff --git a/vendor/python-gflags/debian/rules b/vendor/python-gflags/debian/rules new file mode 100755 index 000000000000..e29f98838155 --- /dev/null +++ b/vendor/python-gflags/debian/rules @@ -0,0 +1,62 @@ +#!/usr/bin/make -f +# -*- makefile -*- +# Sample debian/rules that uses debhelper. +# GNU copyright 1997 to 1999 by Joey Hess. + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +PYTHON := /usr/bin/python +#PYVER := $(shell $(PYTHON) -c 'import sys; print sys.version[:3]') +PYVERS = $(shell pyversions -vr) + +build: $(PYVERS:%=build-python%) + touch $@ + +build-python%: + dh_testdir + python$* setup.py build + touch $@ + +clean: + dh_testdir + dh_testroot + rm -f build-python* + rm -rf build + -find . -name '*.py[co]' | xargs rm -f + dh_clean + +install: build $(PYVERS:%=install-python%) + +install-python%: + dh_testdir + dh_testroot + dh_clean -k + dh_installdirs + python$* setup.py install --root=$(CURDIR)/debian/python-gflags + # Scripts should not have a .py on the end of them + mv $(CURDIR)/debian/python-gflags/usr/bin/gflags2man.py $(CURDIR)/debian/python-gflags/usr/bin/gflags2man + # Generate a man file for gflags2man + mkdir -p $(CURDIR)/debian/python-gflags/usr/share/man/man1 + PYTHONPATH=$(CURDIR)/debian/.. python$* gflags2man.py --dest_dir $(CURDIR)/debian/python-gflags/usr/share/man/man1 $(CURDIR)/debian/python-gflags/usr/bin/gflags2man + +# Build architecture-independent files here. +binary-indep: build install + dh_testdir + dh_testroot + dh_installchangelogs -k ChangeLog + dh_installdocs + dh_pycentral + dh_compress -X.py + dh_fixperms + dh_installdeb + dh_gencontrol + dh_md5sums + dh_builddeb + +# Build architecture-dependent files here. +binary-arch: build install +# We have nothing to do by default. + +binary: binary-indep binary-arch +.PHONY: build clean binary-indep binary-arch binary install configure diff --git a/vendor/python-gflags/gflags.py b/vendor/python-gflags/gflags.py new file mode 100644 index 000000000000..1e4659e328b0 --- /dev/null +++ b/vendor/python-gflags/gflags.py @@ -0,0 +1,2340 @@ +#!/usr/bin/env python + +# Copyright (c) 2007, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# --- +# Author: Chad Lester +# Design and style contributions by: +# Amit Patel, Bogdan Cocosel, Daniel Dulitz, Eric Tiedemann, +# Eric Veach, Laurence Gonsalves, Matthew Springer +# Code reorganized a bit by Craig Silverstein + +"""This module is used to define and parse command line flags. + +This module defines a *distributed* flag-definition policy: rather than +an application having to define all flags in or near main(), each python +module defines flags that are useful to it. When one python module +imports another, it gains access to the other's flags. (This is +implemented by having all modules share a common, global registry object +containing all the flag information.) + +Flags are defined through the use of one of the DEFINE_xxx functions. +The specific function used determines how the flag is parsed, checked, +and optionally type-converted, when it's seen on the command line. + + +IMPLEMENTATION: DEFINE_* creates a 'Flag' object and registers it with a +'FlagValues' object (typically the global FlagValues FLAGS, defined +here). The 'FlagValues' object can scan the command line arguments and +pass flag arguments to the corresponding 'Flag' objects for +value-checking and type conversion. The converted flag values are +available as attributes of the 'FlagValues' object. + +Code can access the flag through a FlagValues object, for instance +gflags.FLAGS.myflag. Typically, the __main__ module passes the +command line arguments to gflags.FLAGS for parsing. + +At bottom, this module calls getopt(), so getopt functionality is +supported, including short- and long-style flags, and the use of -- to +terminate flags. + +Methods defined by the flag module will throw 'FlagsError' exceptions. +The exception argument will be a human-readable string. + + +FLAG TYPES: This is a list of the DEFINE_*'s that you can do. All flags +take a name, default value, help-string, and optional 'short' name +(one-letter name). Some flags have other arguments, which are described +with the flag. + +DEFINE_string: takes any input, and interprets it as a string. + +DEFINE_bool or +DEFINE_boolean: typically does not take an argument: say --myflag to + set FLAGS.myflag to true, or --nomyflag to set + FLAGS.myflag to false. Alternately, you can say + --myflag=true or --myflag=t or --myflag=1 or + --myflag=false or --myflag=f or --myflag=0 + +DEFINE_float: takes an input and interprets it as a floating point + number. Takes optional args lower_bound and upper_bound; + if the number specified on the command line is out of + range, it will raise a FlagError. + +DEFINE_integer: takes an input and interprets it as an integer. Takes + optional args lower_bound and upper_bound as for floats. + +DEFINE_enum: takes a list of strings which represents legal values. If + the command-line value is not in this list, raise a flag + error. Otherwise, assign to FLAGS.flag as a string. + +DEFINE_list: Takes a comma-separated list of strings on the commandline. + Stores them in a python list object. + +DEFINE_spaceseplist: Takes a space-separated list of strings on the + commandline. Stores them in a python list object. + Example: --myspacesepflag "foo bar baz" + +DEFINE_multistring: The same as DEFINE_string, except the flag can be + specified more than once on the commandline. The + result is a python list object (list of strings), + even if the flag is only on the command line once. + +DEFINE_multi_int: The same as DEFINE_integer, except the flag can be + specified more than once on the commandline. The + result is a python list object (list of ints), even if + the flag is only on the command line once. + + +SPECIAL FLAGS: There are a few flags that have special meaning: + --help prints a list of all the flags in a human-readable fashion + --helpshort prints a list of all key flags (see below). + --helpxml prints a list of all flags, in XML format. DO NOT parse + the output of --help and --helpshort. Instead, parse + the output of --helpxml. As we add new flags, we may + add new XML elements. Hence, make sure your parser + does not crash when it encounters new XML elements. + --flagfile=foo read flags from foo. + --undefok=f1,f2 ignore unrecognized option errors for f1,f2. + For boolean flags, you should use --undefok=boolflag, and + --boolflag and --noboolflag will be accepted. Do not use + --undefok=noboolflag. + -- as in getopt(), terminates flag-processing + + +NOTE ON --flagfile: + +Flags may be loaded from text files in addition to being specified on +the commandline. + +Any flags you don't feel like typing, throw them in a file, one flag per +line, for instance: + --myflag=myvalue + --nomyboolean_flag +You then specify your file with the special flag '--flagfile=somefile'. +You CAN recursively nest flagfile= tokens OR use multiple files on the +command line. Lines beginning with a single hash '#' or a double slash +'//' are comments in your flagfile. + +Any flagfile= will be interpreted as having a relative path from +the current working directory rather than from the place the file was +included from: + myPythonScript.py --flagfile=config/somefile.cfg + +If somefile.cfg includes further --flagfile= directives, these will be +referenced relative to the original CWD, not from the directory the +including flagfile was found in! + +The caveat applies to people who are including a series of nested files +in a different dir than they are executing out of. Relative path names +are always from CWD, not from the directory of the parent include +flagfile. We do now support '~' expanded directory names. + +Absolute path names ALWAYS work! + + +EXAMPLE USAGE: + + import gflags + FLAGS = gflags.FLAGS + + # Flag names are globally defined! So in general, we need to be + # careful to pick names that are unlikely to be used by other libraries. + # If there is a conflict, we'll get an error at import time. + gflags.DEFINE_string('name', 'Mr. President', 'your name') + gflags.DEFINE_integer('age', None, 'your age in years', lower_bound=0) + gflags.DEFINE_boolean('debug', False, 'produces debugging output') + gflags.DEFINE_enum('gender', 'male', ['male', 'female'], 'your gender') + + def main(argv): + try: + argv = FLAGS(argv) # parse flags + except gflags.FlagsError, e: + print '%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS) + sys.exit(1) + if FLAGS.debug: print 'non-flag arguments:', argv + print 'Happy Birthday', FLAGS.name + if FLAGS.age is not None: + print 'You are a %s, who is %d years old' % (FLAGS.gender, FLAGS.age) + + if __name__ == '__main__': + main(sys.argv) + + +KEY FLAGS: + +As we already explained, each module gains access to all flags defined +by all the other modules it transitively imports. In the case of +non-trivial scripts, this means a lot of flags ... For documentation +purposes, it is good to identify the flags that are key (i.e., really +important) to a module. Clearly, the concept of "key flag" is a +subjective one. When trying to determine whether a flag is key to a +module or not, assume that you are trying to explain your module to a +potential user: which flags would you really like to mention first? + +We'll describe shortly how to declare which flags are key to a module. +For the moment, assume we know the set of key flags for each module. +Then, if you use the app.py module, you can use the --helpshort flag to +print only the help for the flags that are key to the main module, in a +human-readable format. + +NOTE: If you need to parse the flag help, do NOT use the output of +--help / --helpshort. That output is meant for human consumption, and +may be changed in the future. Instead, use --helpxml; flags that are +key for the main module are marked there with a yes element. + +The set of key flags for a module M is composed of: + +1. Flags defined by module M by calling a DEFINE_* function. + +2. Flags that module M explictly declares as key by using the function + + DECLARE_key_flag() + +3. Key flags of other modules that M specifies by using the function + + ADOPT_module_key_flags() + + This is a "bulk" declaration of key flags: each flag that is key for + becomes key for the current module too. + +Notice that if you do not use the functions described at points 2 and 3 +above, then --helpshort prints information only about the flags defined +by the main module of our script. In many cases, this behavior is good +enough. But if you move part of the main module code (together with the +related flags) into a different module, then it is nice to use +DECLARE_key_flag / ADOPT_module_key_flags and make sure --helpshort +lists all relevant flags (otherwise, your code refactoring may confuse +your users). + +Note: each of DECLARE_key_flag / ADOPT_module_key_flags has its own +pluses and minuses: DECLARE_key_flag is more targeted and may lead a +more focused --helpshort documentation. ADOPT_module_key_flags is good +for cases when an entire module is considered key to the current script. +Also, it does not require updates to client scripts when a new flag is +added to the module. + + +EXAMPLE USAGE 2 (WITH KEY FLAGS): + +Consider an application that contains the following three files (two +auxiliary modules and a main module): + +File libfoo.py: + + import gflags + + gflags.DEFINE_integer('num_replicas', 3, 'Number of replicas to start') + gflags.DEFINE_boolean('rpc2', True, 'Turn on the usage of RPC2.') + + ... some code ... + +File libbar.py: + + import gflags + + gflags.DEFINE_string('bar_gfs_path', '/gfs/path', + 'Path to the GFS files for libbar.') + gflags.DEFINE_string('email_for_bar_errors', 'bar-team@google.com', + 'Email address for bug reports about module libbar.') + gflags.DEFINE_boolean('bar_risky_hack', False, + 'Turn on an experimental and buggy optimization.') + + ... some code ... + +File myscript.py: + + import gflags + import libfoo + import libbar + + gflags.DEFINE_integer('num_iterations', 0, 'Number of iterations.') + + # Declare that all flags that are key for libfoo are + # key for this module too. + gflags.ADOPT_module_key_flags(libfoo) + + # Declare that the flag --bar_gfs_path (defined in libbar) is key + # for this module. + gflags.DECLARE_key_flag('bar_gfs_path') + + ... some code ... + +When myscript is invoked with the flag --helpshort, the resulted help +message lists information about all the key flags for myscript: +--num_iterations, --num_replicas, --rpc2, and --bar_gfs_path (in +addition to the special flags --help and --helpshort). + +Of course, myscript uses all the flags declared by it (in this case, +just --num_replicas) or by any of the modules it transitively imports +(e.g., the modules libfoo, libbar). E.g., it can access the value of +FLAGS.bar_risky_hack, even if --bar_risky_hack is not declared as a key +flag for myscript. +""" + +import cgi +import getopt +import os +import re +import string +import sys + +# Are we running at least python 2.2? +try: + if tuple(sys.version_info[:3]) < (2,2,0): + raise NotImplementedError("requires python 2.2.0 or later") +except AttributeError: # a very old python, that lacks sys.version_info + raise NotImplementedError("requires python 2.2.0 or later") + +# If we're not running at least python 2.2.1, define True, False, and bool. +# Thanks, Guido, for the code. +try: + True, False, bool +except NameError: + False = 0 + True = 1 + def bool(x): + if x: + return True + else: + return False + +# Are we running under pychecker? +_RUNNING_PYCHECKER = 'pychecker.python' in sys.modules + + +def _GetCallingModule(): + """Returns the name of the module that's calling into this module. + + We generally use this function to get the name of the module calling a + DEFINE_foo... function. + """ + # Walk down the stack to find the first globals dict that's not ours. + for depth in range(1, sys.getrecursionlimit()): + if not sys._getframe(depth).f_globals is globals(): + module_name = __GetModuleName(sys._getframe(depth).f_globals) + if module_name is not None: + return module_name + raise AssertionError("No module was found") + + +# module exceptions: +class FlagsError(Exception): + """The base class for all flags errors.""" + pass + + +class DuplicateFlag(FlagsError): + """Raised if there is a flag naming conflict.""" + pass + + +# A DuplicateFlagError conveys more information than a +# DuplicateFlag. Since there are external modules that create +# DuplicateFlags, the interface to DuplicateFlag shouldn't change. +class DuplicateFlagError(DuplicateFlag): + + def __init__(self, flagname, flag_values): + self.flagname = flagname + message = "The flag '%s' is defined twice." % self.flagname + flags_by_module = flag_values.FlagsByModuleDict() + for module in flags_by_module: + for flag in flags_by_module[module]: + if flag.name == flagname or flag.short_name == flagname: + message = message + " First from " + module + "," + break + message = message + " Second from " + _GetCallingModule() + DuplicateFlag.__init__(self, message) + + +class IllegalFlagValue(FlagsError): + """The flag command line argument is illegal.""" + pass + + +class UnrecognizedFlag(FlagsError): + """Raised if a flag is unrecognized.""" + pass + + +# An UnrecognizedFlagError conveys more information than an +# UnrecognizedFlag. Since there are external modules that create +# DuplicateFlags, the interface to DuplicateFlag shouldn't change. +class UnrecognizedFlagError(UnrecognizedFlag): + def __init__(self, flagname): + self.flagname = flagname + UnrecognizedFlag.__init__( + self, "Unknown command line flag '%s'" % flagname) + + +# Global variable used by expvar +_exported_flags = {} +_help_width = 80 # width of help output + + +def GetHelpWidth(): + """Returns: an integer, the width of help lines that is used in TextWrap.""" + return _help_width + + +def CutCommonSpacePrefix(text): + """Removes a common space prefix from the lines of a multiline text. + + If the first line does not start with a space, it is left as it is and + only in the remaining lines a common space prefix is being searched + for. That means the first line will stay untouched. This is especially + useful to turn doc strings into help texts. This is because some + people prefer to have the doc comment start already after the + apostrophy and then align the following lines while others have the + apostrophies on a seperately line. + + The function also drops trailing empty lines and ignores empty lines + following the initial content line while calculating the initial + common whitespace. + + Args: + text: text to work on + + Returns: + the resulting text + """ + text_lines = text.splitlines() + # Drop trailing empty lines + while text_lines and not text_lines[-1]: + text_lines = text_lines[:-1] + if text_lines: + # We got some content, is the first line starting with a space? + if text_lines[0] and text_lines[0][0].isspace(): + text_first_line = [] + else: + text_first_line = [text_lines.pop(0)] + # Calculate length of common leading whitesppace (only over content lines) + common_prefix = os.path.commonprefix([line for line in text_lines if line]) + space_prefix_len = len(common_prefix) - len(common_prefix.lstrip()) + # If we have a common space prefix, drop it from all lines + if space_prefix_len: + for index in xrange(len(text_lines)): + if text_lines[index]: + text_lines[index] = text_lines[index][space_prefix_len:] + return '\n'.join(text_first_line + text_lines) + return '' + + +def TextWrap(text, length=None, indent='', firstline_indent=None, tabs=' '): + """Wraps a given text to a maximum line length and returns it. + + We turn lines that only contain whitespaces into empty lines. We keep + new lines and tabs (e.g., we do not treat tabs as spaces). + + Args: + text: text to wrap + length: maximum length of a line, includes indentation + if this is None then use GetHelpWidth() + indent: indent for all but first line + firstline_indent: indent for first line; if None, fall back to indent + tabs: replacement for tabs + + Returns: + wrapped text + + Raises: + FlagsError: if indent not shorter than length + FlagsError: if firstline_indent not shorter than length + """ + # Get defaults where callee used None + if length is None: + length = GetHelpWidth() + if indent is None: + indent = '' + if len(indent) >= length: + raise FlagsError('Indent must be shorter than length') + # In line we will be holding the current line which is to be started + # with indent (or firstline_indent if available) and then appended + # with words. + if firstline_indent is None: + firstline_indent = '' + line = indent + else: + line = firstline_indent + if len(firstline_indent) >= length: + raise FlagsError('First iline indent must be shorter than length') + + # If the callee does not care about tabs we simply convert them to + # spaces If callee wanted tabs to be single space then we do that + # already here. + if not tabs or tabs == ' ': + text = text.replace('\t', ' ') + else: + tabs_are_whitespace = not tabs.strip() + + line_regex = re.compile('([ ]*)(\t*)([^ \t]+)', re.MULTILINE) + + # Split the text into lines and the lines with the regex above. The + # resulting lines are collected in result[]. For each split we get the + # spaces, the tabs and the next non white space (e.g. next word). + result = [] + for text_line in text.splitlines(): + # Store result length so we can find out whether processing the next + # line gave any new content + old_result_len = len(result) + # Process next line with line_regex. For optimization we do an rstrip(). + # - process tabs (changes either line or word, see below) + # - process word (first try to squeeze on line, then wrap or force wrap) + # Spaces found on the line are ignored, they get added while wrapping as + # needed. + for spaces, current_tabs, word in line_regex.findall(text_line.rstrip()): + # If tabs weren't converted to spaces, handle them now + if current_tabs: + # If the last thing we added was a space anyway then drop + # it. But let's not get rid of the indentation. + if (((result and line != indent) or + (not result and line != firstline_indent)) and line[-1] == ' '): + line = line[:-1] + # Add the tabs, if that means adding whitespace, just add it at + # the line, the rstrip() code while shorten the line down if + # necessary + if tabs_are_whitespace: + line += tabs * len(current_tabs) + else: + # if not all tab replacement is whitespace we prepend it to the word + word = tabs * len(current_tabs) + word + # Handle the case where word cannot be squeezed onto current last line + if len(line) + len(word) > length and len(indent) + len(word) <= length: + result.append(line.rstrip()) + line = indent + word + word = '' + # No space left on line or can we append a space? + if len(line) + 1 >= length: + result.append(line.rstrip()) + line = indent + else: + line += ' ' + # Add word and shorten it up to allowed line length. Restart next + # line with indent and repeat, or add a space if we're done (word + # finished) This deals with words that caanot fit on one line + # (e.g. indent + word longer than allowed line length). + while len(line) + len(word) >= length: + line += word + result.append(line[:length]) + word = line[length:] + line = indent + # Default case, simply append the word and a space + if word: + line += word + ' ' + # End of input line. If we have content we finish the line. If the + # current line is just the indent but we had content in during this + # original line then we need to add an emoty line. + if (result and line != indent) or (not result and line != firstline_indent): + result.append(line.rstrip()) + elif len(result) == old_result_len: + result.append('') + line = indent + + return '\n'.join(result) + + +def DocToHelp(doc): + """Takes a __doc__ string and reformats it as help.""" + + # Get rid of starting and ending white space. Using lstrip() or even + # strip() could drop more than maximum of first line and right space + # of last line. + doc = doc.strip() + + # Get rid of all empty lines + whitespace_only_line = re.compile('^[ \t]+$', re.M) + doc = whitespace_only_line.sub('', doc) + + # Cut out common space at line beginnings + doc = CutCommonSpacePrefix(doc) + + # Just like this module's comment, comments tend to be aligned somehow. + # In other words they all start with the same amount of white space + # 1) keep double new lines + # 2) keep ws after new lines if not empty line + # 3) all other new lines shall be changed to a space + # Solution: Match new lines between non white space and replace with space. + doc = re.sub('(?<=\S)\n(?=\S)', ' ', doc, re.M) + + return doc + + +def __GetModuleName(globals_dict): + """Given a globals dict, returns the name of the module that defines it. + + Args: + globals_dict: A dictionary that should correspond to an environment + providing the values of the globals. + + Returns: + A string (the name of the module) or None (if the module could not + be identified. + """ + for name, module in sys.modules.iteritems(): + if getattr(module, '__dict__', None) is globals_dict: + if name == '__main__': + return sys.argv[0] + return name + return None + + +def _GetMainModule(): + """Returns the name of the module from which execution started.""" + for depth in range(1, sys.getrecursionlimit()): + try: + globals_of_main = sys._getframe(depth).f_globals + except ValueError: + return __GetModuleName(globals_of_main) + raise AssertionError("No module was found") + + +class FlagValues: + """Registry of 'Flag' objects. + + A 'FlagValues' can then scan command line arguments, passing flag + arguments through to the 'Flag' objects that it owns. It also + provides easy access to the flag values. Typically only one + 'FlagValues' object is needed by an application: gflags.FLAGS + + This class is heavily overloaded: + + 'Flag' objects are registered via __setitem__: + FLAGS['longname'] = x # register a new flag + + The .value attribute of the registered 'Flag' objects can be accessed + as attributes of this 'FlagValues' object, through __getattr__. Both + the long and short name of the original 'Flag' objects can be used to + access its value: + FLAGS.longname # parsed flag value + FLAGS.x # parsed flag value (short name) + + Command line arguments are scanned and passed to the registered 'Flag' + objects through the __call__ method. Unparsed arguments, including + argv[0] (e.g. the program name) are returned. + argv = FLAGS(sys.argv) # scan command line arguments + + The original registered Flag objects can be retrieved through the use + of the dictionary-like operator, __getitem__: + x = FLAGS['longname'] # access the registered Flag object + + The str() operator of a 'FlagValues' object provides help for all of + the registered 'Flag' objects. + """ + + def __init__(self): + # Since everything in this class is so heavily overloaded, the only + # way of defining and using fields is to access __dict__ directly. + + # Dictionary: flag name (string) -> Flag object. + self.__dict__['__flags'] = {} + # Dictionary: module name (string) -> list of Flag objects that are defined + # by that module. + self.__dict__['__flags_by_module'] = {} + # Dictionary: module name (string) -> list of Flag objects that are + # key for that module. + self.__dict__['__key_flags_by_module'] = {} + + # Set if we should use new style gnu_getopt rather than getopt when parsing + # the args. Only possible with Python 2.3+ + self.UseGnuGetOpt(False) + + def UseGnuGetOpt(self, use_gnu_getopt=True): + self.__dict__['__use_gnu_getopt'] = use_gnu_getopt + + def IsGnuGetOpt(self): + return self.__dict__['__use_gnu_getopt'] + + def FlagDict(self): + return self.__dict__['__flags'] + + def FlagsByModuleDict(self): + """Returns the dictionary of module_name -> list of defined flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__flags_by_module'] + + def KeyFlagsByModuleDict(self): + """Returns the dictionary of module_name -> list of key flags. + + Returns: + A dictionary. Its keys are module names (strings). Its values + are lists of Flag objects. + """ + return self.__dict__['__key_flags_by_module'] + + def _RegisterFlagByModule(self, module_name, flag): + """Records the module that defines a specific flag. + + We keep track of which flag is defined by which module so that we + can later sort the flags by module. + + Args: + module_name: A string, the name of a Python module. + flag: A Flag object, a flag that is key to the module. + """ + flags_by_module = self.FlagsByModuleDict() + flags_by_module.setdefault(module_name, []).append(flag) + + def _RegisterKeyFlagForModule(self, module_name, flag): + """Specifies that a flag is a key flag for a module. + + Args: + module_name: A string, the name of a Python module. + flag: A Flag object, a flag that is key to the module. + """ + key_flags_by_module = self.KeyFlagsByModuleDict() + # The list of key flags for the module named module_name. + key_flags = key_flags_by_module.setdefault(module_name, []) + # Add flag, but avoid duplicates. + if flag not in key_flags: + key_flags.append(flag) + + def _GetFlagsDefinedByModule(self, module): + """Returns the list of flags defined by a module. + + Args: + module: A module object or a module name (a string). + + Returns: + A new list of Flag objects. Caller may update this list as he + wishes: none of those changes will affect the internals of this + FlagValue object. + """ + if not isinstance(module, str): + module = module.__name__ + + return list(self.FlagsByModuleDict().get(module, [])) + + def _GetKeyFlagsForModule(self, module): + """Returns the list of key flags for a module. + + Args: + module: A module object or a module name (a string) + + Returns: + A new list of Flag objects. Caller may update this list as he + wishes: none of those changes will affect the internals of this + FlagValue object. + """ + if not isinstance(module, str): + module = module.__name__ + + # Any flag is a key flag for the module that defined it. NOTE: + # key_flags is a fresh list: we can update it without affecting the + # internals of this FlagValues object. + key_flags = self._GetFlagsDefinedByModule(module) + + # Take into account flags explicitly declared as key for a module. + for flag in self.KeyFlagsByModuleDict().get(module, []): + if flag not in key_flags: + key_flags.append(flag) + return key_flags + + def AppendFlagValues(self, flag_values): + """Appends flags registered in another FlagValues instance. + + Args: + flag_values: registry to copy from + """ + for flag_name, flag in flag_values.FlagDict().iteritems(): + # Each flags with shortname appears here twice (once under its + # normal name, and again with its short name). To prevent + # problems (DuplicateFlagError) with double flag registration, we + # perform a check to make sure that the entry we're looking at is + # for its normal name. + if flag_name == flag.name: + self[flag_name] = flag + + def __setitem__(self, name, flag): + """Registers a new flag variable.""" + fl = self.FlagDict() + if not isinstance(flag, Flag): + raise IllegalFlagValue(flag) + if not isinstance(name, type("")): + raise FlagsError("Flag name must be a string") + if len(name) == 0: + raise FlagsError("Flag name cannot be empty") + # If running under pychecker, duplicate keys are likely to be + # defined. Disable check for duplicate keys when pycheck'ing. + if (fl.has_key(name) and not flag.allow_override and + not fl[name].allow_override and not _RUNNING_PYCHECKER): + raise DuplicateFlagError(name, self) + short_name = flag.short_name + if short_name is not None: + if (fl.has_key(short_name) and not flag.allow_override and + not fl[short_name].allow_override and not _RUNNING_PYCHECKER): + raise DuplicateFlagError(short_name, self) + fl[short_name] = flag + fl[name] = flag + global _exported_flags + _exported_flags[name] = flag + + def __getitem__(self, name): + """Retrieves the Flag object for the flag --name.""" + return self.FlagDict()[name] + + def __getattr__(self, name): + """Retrieves the 'value' attribute of the flag --name.""" + fl = self.FlagDict() + if not fl.has_key(name): + raise AttributeError(name) + return fl[name].value + + def __setattr__(self, name, value): + """Sets the 'value' attribute of the flag --name.""" + fl = self.FlagDict() + fl[name].value = value + return value + + def _FlagIsRegistered(self, flag_obj): + """Checks whether a Flag object is registered under some name. + + Note: this is non trivial: in addition to its normal name, a flag + may have a short name too. In self.FlagDict(), both the normal and + the short name are mapped to the same flag object. E.g., calling + only "del FLAGS.short_name" is not unregistering the corresponding + Flag object (it is still registered under the longer name). + + Args: + flag_obj: A Flag object. + + Returns: + A boolean: True iff flag_obj is registered under some name. + """ + flag_dict = self.FlagDict() + # Check whether flag_obj is registered under its long name. + name = flag_obj.name + if flag_dict.get(name, None) == flag_obj: + return True + # Check whether flag_obj is registered under its short name. + short_name = flag_obj.short_name + if (short_name is not None and + flag_dict.get(short_name, None) == flag_obj): + return True + # The flag cannot be registered under any other name, so we do not + # need to do a full search through the values of self.FlagDict(). + return False + + def __delattr__(self, flag_name): + """Deletes a previously-defined flag from a flag object. + + This method makes sure we can delete a flag by using + + del flag_values_object. + + E.g., + + flags.DEFINE_integer('foo', 1, 'Integer flag.') + del flags.FLAGS.foo + + Args: + flag_name: A string, the name of the flag to be deleted. + + Raises: + AttributeError: When there is no registered flag named flag_name. + """ + fl = self.FlagDict() + if flag_name not in fl: + raise AttributeError(flag_name) + + flag_obj = fl[flag_name] + del fl[flag_name] + + if not self._FlagIsRegistered(flag_obj): + # If the Flag object indicated by flag_name is no longer + # registered (please see the docstring of _FlagIsRegistered), then + # we delete the occurences of the flag object in all our internal + # dictionaries. + self.__RemoveFlagFromDictByModule(self.FlagsByModuleDict(), flag_obj) + self.__RemoveFlagFromDictByModule(self.KeyFlagsByModuleDict(), flag_obj) + + def __RemoveFlagFromDictByModule(self, flags_by_module_dict, flag_obj): + """Removes a flag object from a module -> list of flags dictionary. + + Args: + flags_by_module_dict: A dictionary that maps module names to lists of + flags. + flag_obj: A flag object. + """ + for unused_module, flags_in_module in flags_by_module_dict.iteritems(): + # while (as opposed to if) takes care of multiple occurences of a + # flag in the list for the same module. + while flag_obj in flags_in_module: + flags_in_module.remove(flag_obj) + + def SetDefault(self, name, value): + """Changes the default value of the named flag object.""" + fl = self.FlagDict() + if not fl.has_key(name): + raise AttributeError(name) + fl[name].SetDefault(value) + + def __contains__(self, name): + """Returns True if name is a value (flag) in the dict.""" + return name in self.FlagDict() + + has_key = __contains__ # a synonym for __contains__() + + def __iter__(self): + return self.FlagDict().iterkeys() + + def __call__(self, argv): + """Parses flags from argv; stores parsed flags into this FlagValues object. + + All unparsed arguments are returned. Flags are parsed using the GNU + Program Argument Syntax Conventions, using getopt: + + http://www.gnu.org/software/libc/manual/html_mono/libc.html#Getopt + + Args: + argv: argument list. Can be of any type that may be converted to a list. + + Returns: + The list of arguments not parsed as options, including argv[0] + + Raises: + FlagsError: on any parsing error + """ + # Support any sequence type that can be converted to a list + argv = list(argv) + + shortopts = "" + longopts = [] + + fl = self.FlagDict() + + # This pre parses the argv list for --flagfile=<> options. + argv = argv[:1] + self.ReadFlagsFromFiles(argv[1:], force_gnu=False) + + # Correct the argv to support the google style of passing boolean + # parameters. Boolean parameters may be passed by using --mybool, + # --nomybool, --mybool=(true|false|1|0). getopt does not support + # having options that may or may not have a parameter. We replace + # instances of the short form --mybool and --nomybool with their + # full forms: --mybool=(true|false). + original_argv = list(argv) # list() makes a copy + shortest_matches = None + for name, flag in fl.items(): + if not flag.boolean: + continue + if shortest_matches is None: + # Determine the smallest allowable prefix for all flag names + shortest_matches = self.ShortestUniquePrefixes(fl) + no_name = 'no' + name + prefix = shortest_matches[name] + no_prefix = shortest_matches[no_name] + + # Replace all occurences of this boolean with extended forms + for arg_idx in range(1, len(argv)): + arg = argv[arg_idx] + if arg.find('=') >= 0: continue + if arg.startswith('--'+prefix) and ('--'+name).startswith(arg): + argv[arg_idx] = ('--%s=true' % name) + elif arg.startswith('--'+no_prefix) and ('--'+no_name).startswith(arg): + argv[arg_idx] = ('--%s=false' % name) + + # Loop over all of the flags, building up the lists of short options + # and long options that will be passed to getopt. Short options are + # specified as a string of letters, each letter followed by a colon + # if it takes an argument. Long options are stored in an array of + # strings. Each string ends with an '=' if it takes an argument. + for name, flag in fl.items(): + longopts.append(name + "=") + if len(name) == 1: # one-letter option: allow short flag type also + shortopts += name + if not flag.boolean: + shortopts += ":" + + longopts.append('undefok=') + undefok_flags = [] + + # In case --undefok is specified, loop to pick up unrecognized + # options one by one. + unrecognized_opts = [] + args = argv[1:] + while True: + try: + if self.__dict__['__use_gnu_getopt']: + optlist, unparsed_args = getopt.gnu_getopt(args, shortopts, longopts) + else: + optlist, unparsed_args = getopt.getopt(args, shortopts, longopts) + break + except getopt.GetoptError, e: + if not e.opt or e.opt in fl: + # Not an unrecognized option, reraise the exception as a FlagsError + raise FlagsError(e) + # Handle an unrecognized option. + unrecognized_opts.append(e.opt) + # Remove offender from args and try again + for arg_index in range(len(args)): + if ((args[arg_index] == '--' + e.opt) or + (args[arg_index] == '-' + e.opt) or + args[arg_index].startswith('--' + e.opt + '=')): + args = args[0:arg_index] + args[arg_index+1:] + break + else: + # We should have found the option, so we don't expect to get + # here. We could assert, but raising the original exception + # might work better. + raise FlagsError(e) + + for name, arg in optlist: + if name == '--undefok': + flag_names = arg.split(',') + undefok_flags.extend(flag_names) + # For boolean flags, if --undefok=boolflag is specified, then we should + # also accept --noboolflag, in addition to --boolflag. + # Since we don't know the type of the undefok'd flag, this will affect + # non-boolean flags as well. + # NOTE: You shouldn't use --undefok=noboolflag, because then we will + # accept --nonoboolflag here. We are choosing not to do the conversion + # from noboolflag -> boolflag because of the ambiguity that flag names + # can start with 'no'. + undefok_flags.extend('no' + name for name in flag_names) + continue + if name.startswith('--'): + # long option + name = name[2:] + short_option = 0 + else: + # short option + name = name[1:] + short_option = 1 + if fl.has_key(name): + flag = fl[name] + if flag.boolean and short_option: arg = 1 + flag.Parse(arg) + + # If there were unrecognized options, raise an exception unless + # the options were named via --undefok. + for opt in unrecognized_opts: + if opt not in undefok_flags: + raise UnrecognizedFlagError(opt) + + if unparsed_args: + if self.__dict__['__use_gnu_getopt']: + # if using gnu_getopt just return the program name + remainder of argv. + return argv[:1] + unparsed_args + else: + # unparsed_args becomes the first non-flag detected by getopt to + # the end of argv. Because argv may have been modified above, + # return original_argv for this region. + return argv[:1] + original_argv[-len(unparsed_args):] + else: + return argv[:1] + + def Reset(self): + """Resets the values to the point before FLAGS(argv) was called.""" + for f in self.FlagDict().values(): + f.Unparse() + + def RegisteredFlags(self): + """Returns: a list of the names and short names of all registered flags.""" + return self.FlagDict().keys() + + def FlagValuesDict(self): + """Returns: a dictionary that maps flag names to flag values.""" + flag_values = {} + + for flag_name in self.RegisteredFlags(): + flag = self.FlagDict()[flag_name] + flag_values[flag_name] = flag.value + + return flag_values + + def __str__(self): + """Generates a help string for all known flags.""" + return self.GetHelp() + + def GetHelp(self, prefix=''): + """Generates a help string for all known flags.""" + helplist = [] + + flags_by_module = self.FlagsByModuleDict() + if flags_by_module: + + modules = flags_by_module.keys() + modules.sort() + + # Print the help for the main module first, if possible. + main_module = _GetMainModule() + if main_module in modules: + modules.remove(main_module) + modules = [main_module] + modules + + for module in modules: + self.__RenderOurModuleFlags(module, helplist) + + self.__RenderModuleFlags('gflags', + _SPECIAL_FLAGS.FlagDict().values(), + helplist) + + else: + # Just print one long list of flags. + self.__RenderFlagList( + self.FlagDict().values() + _SPECIAL_FLAGS.FlagDict().values(), + helplist, prefix) + + return '\n'.join(helplist) + + def __RenderModuleFlags(self, module, flags, output_lines, prefix=""): + """Generates a help string for a given module.""" + if not isinstance(module, str): + module = module.__name__ + output_lines.append('\n%s%s:' % (prefix, module)) + self.__RenderFlagList(flags, output_lines, prefix + " ") + + def __RenderOurModuleFlags(self, module, output_lines, prefix=""): + """Generates a help string for a given module.""" + flags = self._GetFlagsDefinedByModule(module) + if flags: + self.__RenderModuleFlags(module, flags, output_lines, prefix) + + def __RenderOurModuleKeyFlags(self, module, output_lines, prefix=""): + """Generates a help string for the key flags of a given module. + + Args: + module: A module object or a module name (a string). + output_lines: A list of strings. The generated help message + lines will be appended to this list. + prefix: A string that is prepended to each generated help line. + """ + key_flags = self._GetKeyFlagsForModule(module) + if key_flags: + self.__RenderModuleFlags(module, key_flags, output_lines, prefix) + + def ModuleHelp(self, module): + """Describe the key flags of a module. + + Args: + module: A module object or a module name (a string). + + Returns: + string describing the key flags of a module. + """ + helplist = [] + self.__RenderOurModuleKeyFlags(module, helplist) + return '\n'.join(helplist) + + def MainModuleHelp(self): + """Describe the key flags of the main module. + + Returns: + string describing the key flags of a module. + """ + return self.ModuleHelp(_GetMainModule()) + + def __RenderFlagList(self, flaglist, output_lines, prefix=" "): + fl = self.FlagDict() + special_fl = _SPECIAL_FLAGS.FlagDict() + flaglist = [(flag.name, flag) for flag in flaglist] + flaglist.sort() + flagset = {} + for (name, flag) in flaglist: + # It's possible this flag got deleted or overridden since being + # registered in the per-module flaglist. Check now against the + # canonical source of current flag information, the FlagDict. + if fl.get(name, None) != flag and special_fl.get(name, None) != flag: + # a different flag is using this name now + continue + # only print help once + if flagset.has_key(flag): continue + flagset[flag] = 1 + flaghelp = "" + if flag.short_name: flaghelp += "-%s," % flag.short_name + if flag.boolean: + flaghelp += "--[no]%s" % flag.name + ":" + else: + flaghelp += "--%s" % flag.name + ":" + flaghelp += " " + if flag.help: + flaghelp += flag.help + flaghelp = TextWrap(flaghelp, indent=prefix+" ", + firstline_indent=prefix) + if flag.default_as_str: + flaghelp += "\n" + flaghelp += TextWrap("(default: %s)" % flag.default_as_str, + indent=prefix+" ") + if flag.parser.syntactic_help: + flaghelp += "\n" + flaghelp += TextWrap("(%s)" % flag.parser.syntactic_help, + indent=prefix+" ") + output_lines.append(flaghelp) + + def get(self, name, default): + """Returns the value of a flag (if not None) or a default value. + + Args: + name: A string, the name of a flag. + default: Default value to use if the flag value is None. + """ + + value = self.__getattr__(name) + if value is not None: # Can't do if not value, b/c value might be '0' or "" + return value + else: + return default + + def ShortestUniquePrefixes(self, fl): + """Returns: dictionary; maps flag names to their shortest unique prefix.""" + # Sort the list of flag names + sorted_flags = [] + for name, flag in fl.items(): + sorted_flags.append(name) + if flag.boolean: + sorted_flags.append('no%s' % name) + sorted_flags.sort() + + # For each name in the sorted list, determine the shortest unique + # prefix by comparing itself to the next name and to the previous + # name (the latter check uses cached info from the previous loop). + shortest_matches = {} + prev_idx = 0 + for flag_idx in range(len(sorted_flags)): + curr = sorted_flags[flag_idx] + if flag_idx == (len(sorted_flags) - 1): + next = None + else: + next = sorted_flags[flag_idx+1] + next_len = len(next) + for curr_idx in range(len(curr)): + if (next is None + or curr_idx >= next_len + or curr[curr_idx] != next[curr_idx]): + # curr longer than next or no more chars in common + shortest_matches[curr] = curr[:max(prev_idx, curr_idx) + 1] + prev_idx = curr_idx + break + else: + # curr shorter than (or equal to) next + shortest_matches[curr] = curr + prev_idx = curr_idx + 1 # next will need at least one more char + return shortest_matches + + def __IsFlagFileDirective(self, flag_string): + """Checks whether flag_string contain a --flagfile= directive.""" + if isinstance(flag_string, type("")): + if flag_string.startswith('--flagfile='): + return 1 + elif flag_string == '--flagfile': + return 1 + elif flag_string.startswith('-flagfile='): + return 1 + elif flag_string == '-flagfile': + return 1 + else: + return 0 + return 0 + + def ExtractFilename(self, flagfile_str): + """Returns filename from a flagfile_str of form -[-]flagfile=filename. + + The cases of --flagfile foo and -flagfile foo shouldn't be hitting + this function, as they are dealt with in the level above this + function. + """ + if flagfile_str.startswith('--flagfile='): + return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) + elif flagfile_str.startswith('-flagfile='): + return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) + else: + raise FlagsError('Hit illegal --flagfile type: %s' % flagfile_str) + + def __GetFlagFileLines(self, filename, parsed_file_list): + """Returns the useful (!=comments, etc) lines from a file with flags. + + Args: + filename: A string, the name of the flag file. + parsed_file_list: A list of the names of the files we have + already read. MUTATED BY THIS FUNCTION. + + Returns: + List of strings. See the note below. + + NOTE(springer): This function checks for a nested --flagfile= + tag and handles the lower file recursively. It returns a list of + all the lines that _could_ contain command flags. This is + EVERYTHING except whitespace lines and comments (lines starting + with '#' or '//'). + """ + line_list = [] # All line from flagfile. + flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags. + try: + file_obj = open(filename, 'r') + except IOError, e_msg: + print e_msg + print 'ERROR:: Unable to open flagfile: %s' % (filename) + return flag_line_list + + line_list = file_obj.readlines() + file_obj.close() + parsed_file_list.append(filename) + + # This is where we check each line in the file we just read. + for line in line_list: + if line.isspace(): + pass + # Checks for comment (a line that starts with '#'). + elif line.startswith('#') or line.startswith('//'): + pass + # Checks for a nested "--flagfile=" flag in the current file. + # If we find one, recursively parse down into that file. + elif self.__IsFlagFileDirective(line): + sub_filename = self.ExtractFilename(line) + # We do a little safety check for reparsing a file we've already done. + if not sub_filename in parsed_file_list: + included_flags = self.__GetFlagFileLines(sub_filename, + parsed_file_list) + flag_line_list.extend(included_flags) + else: # Case of hitting a circularly included file. + print >>sys.stderr, ('Warning: Hit circular flagfile dependency: %s' + % sub_filename) + else: + # Any line that's not a comment or a nested flagfile should get + # copied into 2nd position. This leaves earlier arguements + # further back in the list, thus giving them higher priority. + flag_line_list.append(line.strip()) + return flag_line_list + + def ReadFlagsFromFiles(self, argv, force_gnu=True): + """Processes command line args, but also allow args to be read from file. + Args: + argv: A list of strings, usually sys.argv[1:], which may contain one or + more flagfile directives of the form --flagfile="./filename". + Note that the name of the program (sys.argv[0]) should be omitted. + force_gnu: If False, --flagfile parsing obeys normal flag semantics. + If True, --flagfile parsing instead follows gnu_getopt semantics. + *** WARNING *** force_gnu=False may become the future default! + + Returns: + + A new list which has the original list combined with what we read + from any flagfile(s). + + References: Global gflags.FLAG class instance. + + This function should be called before the normal FLAGS(argv) call. + This function scans the input list for a flag that looks like: + --flagfile=. Then it opens , reads all valid key + and value pairs and inserts them into the input list between the + first item of the list and any subsequent items in the list. + + Note that your application's flags are still defined the usual way + using gflags DEFINE_flag() type functions. + + Notes (assuming we're getting a commandline of some sort as our input): + --> Flags from the command line argv _should_ always take precedence! + --> A further "--flagfile=" CAN be nested in a flagfile. + It will be processed after the parent flag file is done. + --> For duplicate flags, first one we hit should "win". + --> In a flagfile, a line beginning with # or // is a comment. + --> Entirely blank lines _should_ be ignored. + """ + parsed_file_list = [] + rest_of_args = argv + new_argv = [] + while rest_of_args: + current_arg = rest_of_args[0] + rest_of_args = rest_of_args[1:] + if self.__IsFlagFileDirective(current_arg): + # This handles the case of -(-)flagfile foo. In this case the + # next arg really is part of this one. + if current_arg == '--flagfile' or current_arg == '-flagfile': + if not rest_of_args: + raise IllegalFlagValue('--flagfile with no argument') + flag_filename = os.path.expanduser(rest_of_args[0]) + rest_of_args = rest_of_args[1:] + else: + # This handles the case of (-)-flagfile=foo. + flag_filename = self.ExtractFilename(current_arg) + new_argv[0:0] = self.__GetFlagFileLines(flag_filename, parsed_file_list) + else: + new_argv.append(current_arg) + # Stop parsing after '--', like getopt and gnu_getopt. + if current_arg == '--': + break + # Stop parsing after a non-flag, like getopt. + if not current_arg.startswith('-'): + if not force_gnu and not self.__dict__['__use_gnu_getopt']: + break + + if rest_of_args: + new_argv.extend(rest_of_args) + + return new_argv + + def FlagsIntoString(self): + """Returns a string with the flags assignments from this FlagValues object. + + This function ignores flags whose value is None. Each flag + assignment is separated by a newline. + + NOTE: MUST mirror the behavior of the C++ function + CommandlineFlagsIntoString from google3/base/commandlineflags.cc. + """ + s = '' + for flag in self.FlagDict().values(): + if flag.value is not None: + s += flag.Serialize() + '\n' + return s + + def AppendFlagsIntoFile(self, filename): + """Appends all flags assignments from this FlagInfo object to a file. + + Output will be in the format of a flagfile. + + NOTE: MUST mirror the behavior of the C++ version of + AppendFlagsIntoFile from google3/base/commandlineflags.cc. + """ + out_file = open(filename, 'a') + out_file.write(self.FlagsIntoString()) + out_file.close() + + def WriteHelpInXMLFormat(self, outfile=None): + """Outputs flag documentation in XML format. + + NOTE: We use element names that are consistent with those used by + the C++ command-line flag library, from + google3/base/commandlineflags_reporting.cc. We also use a few new + elements (e.g., ), but we do not interfere / overlap with + existing XML elements used by the C++ library. Please maintain this + consistency. + + Args: + outfile: File object we write to. Default None means sys.stdout. + """ + outfile = outfile or sys.stdout + + outfile.write('\n') + outfile.write('\n') + indent = ' ' + _WriteSimpleXMLElement(outfile, 'program', os.path.basename(sys.argv[0]), + indent) + + usage_doc = sys.modules['__main__'].__doc__ + if not usage_doc: + usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] + else: + usage_doc = usage_doc.replace('%s', sys.argv[0]) + _WriteSimpleXMLElement(outfile, 'usage', usage_doc, indent) + + # Get list of key flags for the main module. + key_flags = self._GetKeyFlagsForModule(_GetMainModule()) + + # Sort flags by declaring module name and next by flag name. + flags_by_module = self.FlagsByModuleDict() + all_module_names = list(flags_by_module.keys()) + all_module_names.sort() + for module_name in all_module_names: + flag_list = [(f.name, f) for f in flags_by_module[module_name]] + flag_list.sort() + for unused_flag_name, flag in flag_list: + is_key = flag in key_flags + flag.WriteInfoInXMLFormat(outfile, module_name, + is_key=is_key, indent=indent) + + outfile.write('\n') + outfile.flush() +# end of FlagValues definition + + +# The global FlagValues instance +FLAGS = FlagValues() + + +def _MakeXMLSafe(s): + """Escapes <, >, and & from s, and removes XML 1.0-illegal chars.""" + s = cgi.escape(s) # Escape <, >, and & + # Remove characters that cannot appear in an XML 1.0 document + # (http://www.w3.org/TR/REC-xml/#charsets). + # + # NOTE: if there are problems with current solution, one may move to + # XML 1.1, which allows such chars, if they're entity-escaped (&#xHH;). + s = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f]', '', s) + return s + + +def _WriteSimpleXMLElement(outfile, name, value, indent): + """Writes a simple XML element. + + Args: + outfile: File object we write the XML element to. + name: A string, the name of XML element. + value: A Python object, whose string representation will be used + as the value of the XML element. + indent: A string, prepended to each line of generated output. + """ + value_str = str(value) + if isinstance(value, bool): + # Display boolean values as the C++ flag library does: no caps. + value_str = value_str.lower() + outfile.write('%s<%s>%s\n' % + (indent, name, _MakeXMLSafe(value_str), name)) + + +class Flag: + """Information about a command-line flag. + + 'Flag' objects define the following fields: + .name - the name for this flag + .default - the default value for this flag + .default_as_str - default value as repr'd string, e.g., "'true'" (or None) + .value - the most recent parsed value of this flag; set by Parse() + .help - a help string or None if no help is available + .short_name - the single letter alias for this flag (or None) + .boolean - if 'true', this flag does not accept arguments + .present - true if this flag was parsed from command line flags. + .parser - an ArgumentParser object + .serializer - an ArgumentSerializer object + .allow_override - the flag may be redefined without raising an error + + The only public method of a 'Flag' object is Parse(), but it is + typically only called by a 'FlagValues' object. The Parse() method is + a thin wrapper around the 'ArgumentParser' Parse() method. The parsed + value is saved in .value, and the .present attribute is updated. If + this flag was already present, a FlagsError is raised. + + Parse() is also called during __init__ to parse the default value and + initialize the .value attribute. This enables other python modules to + safely use flags even if the __main__ module neglects to parse the + command line arguments. The .present attribute is cleared after + __init__ parsing. If the default value is set to None, then the + __init__ parsing step is skipped and the .value attribute is + initialized to None. + + Note: The default value is also presented to the user in the help + string, so it is important that it be a legal value for this flag. + """ + + def __init__(self, parser, serializer, name, default, help_string, + short_name=None, boolean=0, allow_override=0): + self.name = name + + if not help_string: + help_string = '(no help available)' + + self.help = help_string + self.short_name = short_name + self.boolean = boolean + self.present = 0 + self.parser = parser + self.serializer = serializer + self.allow_override = allow_override + self.value = None + + self.SetDefault(default) + + def __GetParsedValueAsString(self, value): + if value is None: + return None + if self.serializer: + return repr(self.serializer.Serialize(value)) + if self.boolean: + if value: + return repr('true') + else: + return repr('false') + return repr(str(value)) + + def Parse(self, argument): + try: + self.value = self.parser.Parse(argument) + except ValueError, e: # recast ValueError as IllegalFlagValue + raise IllegalFlagValue("flag --%s=%s: %s" % (self.name, argument, e)) + self.present += 1 + + def Unparse(self): + if self.default is None: + self.value = None + else: + self.Parse(self.default) + self.present = 0 + + def Serialize(self): + if self.value is None: + return '' + if self.boolean: + if self.value: + return "--%s" % self.name + else: + return "--no%s" % self.name + else: + if not self.serializer: + raise FlagsError("Serializer not present for flag %s" % self.name) + return "--%s=%s" % (self.name, self.serializer.Serialize(self.value)) + + def SetDefault(self, value): + """Changes the default value (and current value too) for this Flag.""" + # We can't allow a None override because it may end up not being + # passed to C++ code when we're overriding C++ flags. So we + # cowardly bail out until someone fixes the semantics of trying to + # pass None to a C++ flag. See swig_flags.Init() for details on + # this behavior. + if value is None and self.allow_override: + raise DuplicateFlag(self.name) + + self.default = value + self.Unparse() + self.default_as_str = self.__GetParsedValueAsString(self.value) + + def Type(self): + """Returns: a string that describes the type of this Flag.""" + # NOTE: we use strings, and not the types.*Type constants because + # our flags can have more exotic types, e.g., 'comma separated list + # of strings', 'whitespace separated list of strings', etc. + return self.parser.Type() + + def WriteInfoInXMLFormat(self, outfile, module_name, is_key=False, indent=''): + """Writes common info about this flag, in XML format. + + This is information that is relevant to all flags (e.g., name, + meaning, etc.). If you defined a flag that has some other pieces of + info, then please override _WriteCustomInfoInXMLFormat. + + Please do NOT override this method. + + Args: + outfile: File object we write to. + module_name: A string, the name of the module that defines this flag. + is_key: A boolean, True iff this flag is key for main module. + indent: A string that is prepended to each generated line. + """ + outfile.write(indent + '\n') + inner_indent = indent + ' ' + if is_key: + _WriteSimpleXMLElement(outfile, 'key', 'yes', inner_indent) + _WriteSimpleXMLElement(outfile, 'file', module_name, inner_indent) + # Print flag features that are relevant for all flags. + _WriteSimpleXMLElement(outfile, 'name', self.name, inner_indent) + if self.short_name: + _WriteSimpleXMLElement(outfile, 'short_name', self.short_name, + inner_indent) + if self.help: + _WriteSimpleXMLElement(outfile, 'meaning', self.help, inner_indent) + _WriteSimpleXMLElement(outfile, 'default', self.default, inner_indent) + _WriteSimpleXMLElement(outfile, 'current', self.value, inner_indent) + _WriteSimpleXMLElement(outfile, 'type', self.Type(), inner_indent) + # Print extra flag features this flag may have. + self._WriteCustomInfoInXMLFormat(outfile, inner_indent) + outfile.write(indent + '\n') + + def _WriteCustomInfoInXMLFormat(self, outfile, indent): + """Writes extra info about this flag, in XML format. + + "Extra" means "not already printed by WriteInfoInXMLFormat above." + + Args: + outfile: File object we write to. + indent: A string that is prepended to each generated line. + """ + # Usually, the parser knows the extra details about the flag, so + # we just forward the call to it. + self.parser.WriteCustomInfoInXMLFormat(outfile, indent) +# End of Flag definition + + +class ArgumentParser: + """Base class used to parse and convert arguments. + + The Parse() method checks to make sure that the string argument is a + legal value and convert it to a native type. If the value cannot be + converted, it should throw a 'ValueError' exception with a human + readable explanation of why the value is illegal. + + Subclasses should also define a syntactic_help string which may be + presented to the user to describe the form of the legal values. + """ + syntactic_help = "" + + def Parse(self, argument): + """Default implementation: always returns its argument unmodified.""" + return argument + + def Type(self): + return 'string' + + def WriteCustomInfoInXMLFormat(self, outfile, indent): + pass + + +class ArgumentSerializer: + """Base class for generating string representations of a flag value.""" + + def Serialize(self, value): + return str(value) + + +class ListSerializer(ArgumentSerializer): + + def __init__(self, list_sep): + self.list_sep = list_sep + + def Serialize(self, value): + return self.list_sep.join([str(x) for x in value]) + + +# The DEFINE functions are explained in mode details in the module doc string. + + +def DEFINE(parser, name, default, help, flag_values=FLAGS, serializer=None, + **args): + """Registers a generic Flag object. + + NOTE: in the docstrings of all DEFINE* functions, "registers" is short + for "creates a new flag and registers it". + + Auxiliary function: clients should use the specialized DEFINE_ + function instead. + + Args: + parser: ArgumentParser that is used to parse the flag arguments. + name: A string, the flag name. + default: The default value of the flag. + help: A help string. + flag_values: FlagValues object the flag will be registered with. + serializer: ArgumentSerializer that serializes the flag value. + args: Dictionary with extra keyword args that are passes to the + Flag __init__. + """ + DEFINE_flag(Flag(parser, serializer, name, default, help, **args), + flag_values) + + +def DEFINE_flag(flag, flag_values=FLAGS): + """Registers a 'Flag' object with a 'FlagValues' object. + + By default, the global FLAGS 'FlagValue' object is used. + + Typical users will use one of the more specialized DEFINE_xxx + functions, such as DEFINE_string or DEFINE_integer. But developers + who need to create Flag objects themselves should use this function + to register their flags. + """ + # copying the reference to flag_values prevents pychecker warnings + fv = flag_values + fv[flag.name] = flag + # Tell flag_values who's defining the flag. + if isinstance(flag_values, FlagValues): + # Regarding the above isinstance test: some users pass funny + # values of flag_values (e.g., {}) in order to avoid the flag + # registration (in the past, there used to be a flag_values == + # FLAGS test here) and redefine flags with the same name (e.g., + # debug). To avoid breaking their code, we perform the + # registration only if flag_values is a real FlagValues object. + flag_values._RegisterFlagByModule(_GetCallingModule(), flag) + + +def _InternalDeclareKeyFlags(flag_names, flag_values=FLAGS): + """Declares a flag as key for the calling module. + + Internal function. User code should call DECLARE_key_flag or + ADOPT_module_key_flags instead. + + Args: + flag_names: A list of strings that are names of already-registered + Flag objects. + flag_values: A FlagValues object. This should almost never need + to be overridden. + + Raises: + UnrecognizedFlagError: when we refer to a flag that was not + defined yet. + """ + module = _GetCallingModule() + + for flag_name in flag_names: + if flag_name not in flag_values: + raise UnrecognizedFlagError(flag_name) + flag = flag_values.FlagDict()[flag_name] + flag_values._RegisterKeyFlagForModule(module, flag) + + +def DECLARE_key_flag(flag_name, flag_values=FLAGS): + """Declares one flag as key to the current module. + + Key flags are flags that are deemed really important for a module. + They are important when listing help messages; e.g., if the + --helpshort command-line flag is used, then only the key flags of the + main module are listed (instead of all flags, as in the case of + --help). + + Sample usage: + + flags.DECLARED_key_flag('flag_1') + + Args: + flag_name: A string, the name of an already declared flag. + (Redeclaring flags as key, including flags implicitly key + because they were declared in this module, is a no-op.) + flag_values: A FlagValues object. This should almost never + need to be overridden. + """ + _InternalDeclareKeyFlags([flag_name], flag_values=flag_values) + + +def ADOPT_module_key_flags(module, flag_values=FLAGS): + """Declares that all flags key to a module are key to the current module. + + Args: + module: A module object. + flag_values: A FlagValues object. This should almost never need + to be overridden. + + Raises: + FlagsError: When given an argument that is a module name (a + string), instead of a module object. + """ + # NOTE(salcianu): an even better test would be if not + # isinstance(module, types.ModuleType) but I didn't want to import + # types for such a tiny use. + if isinstance(module, str): + raise FlagsError('Received module name %s; expected a module object.' + % module) + _InternalDeclareKeyFlags( + [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)], + flag_values=flag_values) + + +# +# STRING FLAGS +# + + +def DEFINE_string(name, default, help, flag_values=FLAGS, **args): + """Registers a flag whose value can be any string.""" + parser = ArgumentParser() + serializer = ArgumentSerializer() + DEFINE(parser, name, default, help, flag_values, serializer, **args) + + +# +# BOOLEAN FLAGS +# +# and the special HELP flags. + +class BooleanParser(ArgumentParser): + """Parser of boolean values.""" + + def Convert(self, argument): + """Converts the argument to a boolean; raise ValueError on errors.""" + if type(argument) == str: + if argument.lower() in ['true', 't', '1']: + return True + elif argument.lower() in ['false', 'f', '0']: + return False + + bool_argument = bool(argument) + if argument == bool_argument: + # The argument is a valid boolean (True, False, 0, or 1), and not just + # something that always converts to bool (list, string, int, etc.). + return bool_argument + + raise ValueError('Non-boolean argument to boolean flag', argument) + + def Parse(self, argument): + val = self.Convert(argument) + return val + + def Type(self): + return 'bool' + + +class BooleanFlag(Flag): + """Basic boolean flag. + + Boolean flags do not take any arguments, and their value is either + True (1) or False (0). The false value is specified on the command + line by prepending the word 'no' to either the long or the short flag + name. + + For example, if a Boolean flag was created whose long name was + 'update' and whose short name was 'x', then this flag could be + explicitly unset through either --noupdate or --nox. + """ + + def __init__(self, name, default, help, short_name=None, **args): + p = BooleanParser() + Flag.__init__(self, p, None, name, default, help, short_name, 1, **args) + if not self.help: self.help = "a boolean value" + + +def DEFINE_boolean(name, default, help, flag_values=FLAGS, **args): + """Registers a boolean flag. + + Such a boolean flag does not take an argument. If a user wants to + specify a false value explicitly, the long option beginning with 'no' + must be used: i.e. --noflag + + This flag will have a value of None, True or False. None is possible + if default=None and the user does not specify the flag on the command + line. + """ + DEFINE_flag(BooleanFlag(name, default, help, **args), flag_values) + +# Match C++ API to unconfuse C++ people. +DEFINE_bool = DEFINE_boolean + +class HelpFlag(BooleanFlag): + """ + HelpFlag is a special boolean flag that prints usage information and + raises a SystemExit exception if it is ever found in the command + line arguments. Note this is called with allow_override=1, so other + apps can define their own --help flag, replacing this one, if they want. + """ + def __init__(self): + BooleanFlag.__init__(self, "help", 0, "show this help", + short_name="?", allow_override=1) + def Parse(self, arg): + if arg: + doc = sys.modules["__main__"].__doc__ + flags = str(FLAGS) + print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) + if flags: + print "flags:" + print flags + sys.exit(1) + + +class HelpXMLFlag(BooleanFlag): + """Similar to HelpFlag, but generates output in XML format.""" + + def __init__(self): + BooleanFlag.__init__(self, 'helpxml', False, + 'like --help, but generates XML output', + allow_override=1) + + def Parse(self, arg): + if arg: + FLAGS.WriteHelpInXMLFormat(sys.stdout) + sys.exit(1) + + +class HelpshortFlag(BooleanFlag): + """ + HelpshortFlag is a special boolean flag that prints usage + information for the "main" module, and rasies a SystemExit exception + if it is ever found in the command line arguments. Note this is + called with allow_override=1, so other apps can define their own + --helpshort flag, replacing this one, if they want. + """ + def __init__(self): + BooleanFlag.__init__(self, "helpshort", 0, + "show usage only for this module", allow_override=1) + def Parse(self, arg): + if arg: + doc = sys.modules["__main__"].__doc__ + flags = FLAGS.MainModuleHelp() + print doc or ("\nUSAGE: %s [flags]\n" % sys.argv[0]) + if flags: + print "flags:" + print flags + sys.exit(1) + + +# +# FLOAT FLAGS +# + +class FloatParser(ArgumentParser): + """Parser of floating point values. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = "a" + number_name = "number" + syntactic_help = " ".join((number_article, number_name)) + + def __init__(self, lower_bound=None, upper_bound=None): + self.lower_bound = lower_bound + self.upper_bound = upper_bound + sh = self.syntactic_help + if lower_bound != None and upper_bound != None: + sh = ("%s in the range [%s, %s]" % (sh, lower_bound, upper_bound)) + elif lower_bound == 1: + sh = "a positive %s" % self.number_name + elif upper_bound == -1: + sh = "a negative %s" % self.number_name + elif lower_bound == 0: + sh = "a non-negative %s" % self.number_name + elif upper_bound != None: + sh = "%s <= %s" % (self.number_name, upper_bound) + elif lower_bound != None: + sh = "%s >= %s" % (self.number_name, lower_bound) + self.syntactic_help = sh + + def Convert(self, argument): + """Converts argument to a float; raises ValueError on errors.""" + return float(argument) + + def Parse(self, argument): + val = self.Convert(argument) + if ((self.lower_bound != None and val < self.lower_bound) or + (self.upper_bound != None and val > self.upper_bound)): + raise ValueError("%s is not %s" % (val, self.syntactic_help)) + return val + + def Type(self): + return 'float' + + def WriteCustomInfoInXMLFormat(self, outfile, indent): + if self.lower_bound is not None: + _WriteSimpleXMLElement(outfile, 'lower_bound', self.lower_bound, indent) + if self.upper_bound is not None: + _WriteSimpleXMLElement(outfile, 'upper_bound', self.upper_bound, indent) +# End of FloatParser + + +def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None, + flag_values=FLAGS, **args): + """Registers a flag whose value must be a float. + + If lower_bound or upper_bound are set, then this flag must be + within the given range. + """ + parser = FloatParser(lower_bound, upper_bound) + serializer = ArgumentSerializer() + DEFINE(parser, name, default, help, flag_values, serializer, **args) + + +# +# INTEGER FLAGS +# + + +class IntegerParser(FloatParser): + """Parser of an integer value. + + Parsed value may be bounded to a given upper and lower bound. + """ + number_article = "an" + number_name = "integer" + syntactic_help = " ".join((number_article, number_name)) + + def Convert(self, argument): + __pychecker__ = 'no-returnvalues' + if type(argument) == str: + base = 10 + if len(argument) > 2 and argument[0] == "0" and argument[1] == "x": + base = 16 + try: + return int(argument, base) + # ValueError is thrown when argument is a string, and overflows an int. + except ValueError: + return long(argument, base) + else: + try: + return int(argument) + # OverflowError is thrown when argument is numeric, and overflows an int. + except OverflowError: + return long(argument) + + def Type(self): + return 'int' + + +def DEFINE_integer(name, default, help, lower_bound=None, upper_bound=None, + flag_values=FLAGS, **args): + """Registers a flag whose value must be an integer. + + If lower_bound, or upper_bound are set, then this flag must be + within the given range. + """ + parser = IntegerParser(lower_bound, upper_bound) + serializer = ArgumentSerializer() + DEFINE(parser, name, default, help, flag_values, serializer, **args) + + +# +# ENUM FLAGS +# + + +class EnumParser(ArgumentParser): + """Parser of a string enum value (a string value from a given set). + + If enum_values (see below) is not specified, any string is allowed. + """ + + def __init__(self, enum_values=None): + self.enum_values = enum_values + + def Parse(self, argument): + if self.enum_values and argument not in self.enum_values: + raise ValueError("value should be one of <%s>" % + "|".join(self.enum_values)) + return argument + + def Type(self): + return 'string enum' + + +class EnumFlag(Flag): + """Basic enum flag; its value can be any string from list of enum_values.""" + + def __init__(self, name, default, help, enum_values=None, + short_name=None, **args): + enum_values = enum_values or [] + p = EnumParser(enum_values) + g = ArgumentSerializer() + Flag.__init__(self, p, g, name, default, help, short_name, **args) + if not self.help: self.help = "an enum string" + self.help = "<%s>: %s" % ("|".join(enum_values), self.help) + + def _WriteCustomInfoInXMLFormat(self, outfile, indent): + for enum_value in self.parser.enum_values: + _WriteSimpleXMLElement(outfile, 'enum_value', enum_value, indent) + + +def DEFINE_enum(name, default, enum_values, help, flag_values=FLAGS, + **args): + """Registers a flag whose value can be any string from enum_values.""" + DEFINE_flag(EnumFlag(name, default, help, enum_values, ** args), + flag_values) + + +# +# LIST FLAGS +# + + +class BaseListParser(ArgumentParser): + """Base class for a parser of lists of strings. + + To extend, inherit from this class; from the subclass __init__, call + + BaseListParser.__init__(self, token, name) + + where token is a character used to tokenize, and name is a description + of the separator. + """ + + def __init__(self, token=None, name=None): + assert name + self._token = token + self._name = name + self.syntactic_help = "a %s separated list" % self._name + + def Parse(self, argument): + if isinstance(argument, list): + return argument + elif argument == '': + return [] + else: + return [s.strip() for s in argument.split(self._token)] + + def Type(self): + return '%s separated list of strings' % self._name + + +class ListParser(BaseListParser): + """Parser for a comma-separated list of strings.""" + + def __init__(self): + BaseListParser.__init__(self, ',', 'comma') + + def WriteCustomInfoInXMLFormat(self, outfile, indent): + BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) + _WriteSimpleXMLElement(outfile, 'list_separator', repr(','), indent) + + +class WhitespaceSeparatedListParser(BaseListParser): + """Parser for a whitespace-separated list of strings.""" + + def __init__(self): + BaseListParser.__init__(self, None, 'whitespace') + + def WriteCustomInfoInXMLFormat(self, outfile, indent): + BaseListParser.WriteCustomInfoInXMLFormat(self, outfile, indent) + separators = list(string.whitespace) + separators.sort() + for ws_char in string.whitespace: + _WriteSimpleXMLElement(outfile, 'list_separator', repr(ws_char), indent) + + +def DEFINE_list(name, default, help, flag_values=FLAGS, **args): + """Registers a flag whose value is a comma-separated list of strings.""" + parser = ListParser() + serializer = ListSerializer(',') + DEFINE(parser, name, default, help, flag_values, serializer, **args) + + +def DEFINE_spaceseplist(name, default, help, flag_values=FLAGS, **args): + """Registers a flag whose value is a whitespace-separated list of strings. + + Any whitespace can be used as a separator. + """ + parser = WhitespaceSeparatedListParser() + serializer = ListSerializer(' ') + DEFINE(parser, name, default, help, flag_values, serializer, **args) + + +# +# MULTI FLAGS +# + + +class MultiFlag(Flag): + """A flag that can appear multiple time on the command-line. + + The value of such a flag is a list that contains the individual values + from all the appearances of that flag on the command-line. + + See the __doc__ for Flag for most behavior of this class. Only + differences in behavior are described here: + + * The default value may be either a single value or a list of values. + A single value is interpreted as the [value] singleton list. + + * The value of the flag is always a list, even if the option was + only supplied once, and even if the default value is a single + value + """ + + def __init__(self, *args, **kwargs): + Flag.__init__(self, *args, **kwargs) + self.help += ';\n repeat this option to specify a list of values' + + def Parse(self, arguments): + """Parses one or more arguments with the installed parser. + + Args: + arguments: a single argument or a list of arguments (typically a + list of default values); a single argument is converted + internally into a list containing one item. + """ + if not isinstance(arguments, list): + # Default value may be a list of values. Most other arguments + # will not be, so convert them into a single-item list to make + # processing simpler below. + arguments = [arguments] + + if self.present: + # keep a backup reference to list of previously supplied option values + values = self.value + else: + # "erase" the defaults with an empty list + values = [] + + for item in arguments: + # have Flag superclass parse argument, overwriting self.value reference + Flag.Parse(self, item) # also increments self.present + values.append(self.value) + + # put list of option values back in the 'value' attribute + self.value = values + + def Serialize(self): + if not self.serializer: + raise FlagsError("Serializer not present for flag %s" % self.name) + if self.value is None: + return '' + + s = '' + + multi_value = self.value + + for self.value in multi_value: + if s: s += ' ' + s += Flag.Serialize(self) + + self.value = multi_value + + return s + + def Type(self): + return 'multi ' + self.parser.Type() + + +def DEFINE_multi(parser, serializer, name, default, help, flag_values=FLAGS, + **args): + """Registers a generic MultiFlag that parses its args with a given parser. + + Auxiliary function. Normal users should NOT use it directly. + + Developers who need to create their own 'Parser' classes for options + which can appear multiple times can call this module function to + register their flags. + """ + DEFINE_flag(MultiFlag(parser, serializer, name, default, help, **args), + flag_values) + + +def DEFINE_multistring(name, default, help, flag_values=FLAGS, **args): + """Registers a flag whose value can be a list of any strings. + + Use the flag on the command line multiple times to place multiple + string values into the list. The 'default' may be a single string + (which will be converted into a single-element list) or a list of + strings. + """ + parser = ArgumentParser() + serializer = ArgumentSerializer() + DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) + + +def DEFINE_multi_int(name, default, help, lower_bound=None, upper_bound=None, + flag_values=FLAGS, **args): + """Registers a flag whose value can be a list of arbitrary integers. + + Use the flag on the command line multiple times to place multiple + integer values into the list. The 'default' may be a single integer + (which will be converted into a single-element list) or a list of + integers. + """ + parser = IntegerParser(lower_bound, upper_bound) + serializer = ArgumentSerializer() + DEFINE_multi(parser, serializer, name, default, help, flag_values, **args) + + +# Now register the flags that we want to exist in all applications. +# These are all defined with allow_override=1, so user-apps can use +# these flagnames for their own purposes, if they want. +DEFINE_flag(HelpFlag()) +DEFINE_flag(HelpshortFlag()) +DEFINE_flag(HelpXMLFlag()) + +# Define special flags here so that help may be generated for them. +_SPECIAL_FLAGS = FlagValues() + + +DEFINE_string( + 'flagfile', "", + "Insert flag definitions from the given file into the command line.", + _SPECIAL_FLAGS) + +DEFINE_string( + 'undefok', "", + "comma-separated list of flag names that it is okay to specify " + "on the command line even if the program does not define a flag " + "with that name. IMPORTANT: flags in this list that have " + "arguments MUST use the --flag=value format.", _SPECIAL_FLAGS) diff --git a/vendor/python-gflags/gflags2man.py b/vendor/python-gflags/gflags2man.py new file mode 100755 index 000000000000..f346564301e9 --- /dev/null +++ b/vendor/python-gflags/gflags2man.py @@ -0,0 +1,536 @@ +#!/usr/bin/env python +# +# Copyright (c) 2007, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""gflags2man runs a Google flags base program and generates a man page. + +Run the program, parse the output, and then format that into a man +page. + +Usage: + gflags2man [program] ... +""" + +# TODO(csilvers): work with windows paths (\) as well as unix (/) + +# This may seem a bit of an end run, but it: doesn't bloat flags, can +# support python/java/C++, supports older executables, and can be +# extended to other document formats. +# Inspired by help2man. + +__author__ = 'Dan Christian' + +import os +import re +import sys +import stat +import time + +import gflags + +_VERSION = '0.1' + + +def _GetDefaultDestDir(): + home = os.environ.get('HOME', '') + homeman = os.path.join(home, 'man', 'man1') + if home and os.path.exists(homeman): + return homeman + else: + return os.environ.get('TMPDIR', '/tmp') + +FLAGS = gflags.FLAGS +gflags.DEFINE_string('dest_dir', _GetDefaultDestDir(), + 'Directory to write resulting manpage to.' + ' Specify \'-\' for stdout') +gflags.DEFINE_string('help_flag', '--help', + 'Option to pass to target program in to get help') +gflags.DEFINE_integer('v', 0, 'verbosity level to use for output') + +_MIN_VALID_USAGE_MSG = 9 # if fewer lines than this, help is suspect + + +class Logging: + """A super-simple logging class""" + def error(self, msg): print >>sys.stderr, "ERROR: ", msg + def warn(self, msg): print >>sys.stderr, "WARNING: ", msg + def info(self, msg): print msg + def debug(self, msg): self.vlog(1, msg) + def vlog(self, level, msg): + if FLAGS.v >= level: print msg +logging = Logging() + + +def GetRealPath(filename): + """Given an executable filename, find in the PATH or find absolute path. + Args: + filename An executable filename (string) + Returns: + Absolute version of filename. + None if filename could not be found locally, absolutely, or in PATH + """ + if os.path.isabs(filename): # already absolute + return filename + + if filename.startswith('./') or filename.startswith('../'): # relative + return os.path.abspath(filename) + + path = os.getenv('PATH', '') + for directory in path.split(':'): + tryname = os.path.join(directory, filename) + if os.path.exists(tryname): + if not os.path.isabs(directory): # relative directory + return os.path.abspath(tryname) + return tryname + if os.path.exists(filename): + return os.path.abspath(filename) + return None # could not determine + +class Flag(object): + """The information about a single flag.""" + + def __init__(self, flag_desc, help): + """Create the flag object. + Args: + flag_desc The command line forms this could take. (string) + help The help text (string) + """ + self.desc = flag_desc # the command line forms + self.help = help # the help text + self.default = '' # default value + self.tips = '' # parsing/syntax tips + + +class ProgramInfo(object): + """All the information gleaned from running a program with --help.""" + + # Match a module block start, for python scripts --help + # "goopy.logging:" + module_py_re = re.compile(r'(\S.+):$') + # match the start of a flag listing + # " -v,--verbosity: Logging verbosity" + flag_py_re = re.compile(r'\s+(-\S+):\s+(.*)$') + # " (default: '0')" + flag_default_py_re = re.compile(r'\s+\(default:\s+\'(.*)\'\)$') + # " (an integer)" + flag_tips_py_re = re.compile(r'\s+\((.*)\)$') + + # Match a module block start, for c++ programs --help + # "google/base/commandlineflags" + module_c_re = re.compile(r'\s+Flags from (\S.+):$') + # match the start of a flag listing + # " -v,--verbosity: Logging verbosity" + flag_c_re = re.compile(r'\s+(-\S+)\s+(.*)$') + + # Match a module block start, for java programs --help + # "com.google.common.flags" + module_java_re = re.compile(r'\s+Flags for (\S.+):$') + # match the start of a flag listing + # " -v,--verbosity: Logging verbosity" + flag_java_re = re.compile(r'\s+(-\S+)\s+(.*)$') + + def __init__(self, executable): + """Create object with executable. + Args: + executable Program to execute (string) + """ + self.long_name = executable + self.name = os.path.basename(executable) # name + # Get name without extension (PAR files) + (self.short_name, self.ext) = os.path.splitext(self.name) + self.executable = GetRealPath(executable) # name of the program + self.output = [] # output from the program. List of lines. + self.desc = [] # top level description. List of lines + self.modules = {} # { section_name(string), [ flags ] } + self.module_list = [] # list of module names in their original order + self.date = time.localtime(time.time()) # default date info + + def Run(self): + """Run it and collect output. + + Returns: + 1 (true) If everything went well. + 0 (false) If there were problems. + """ + if not self.executable: + logging.error('Could not locate "%s"' % self.long_name) + return 0 + + finfo = os.stat(self.executable) + self.date = time.localtime(finfo[stat.ST_MTIME]) + + logging.info('Running: %s %s &1' + % (self.executable, FLAGS.help_flag)) + # --help output is often routed to stderr, so we combine with stdout. + # Re-direct stdin to /dev/null to encourage programs that + # don't understand --help to exit. + (child_stdin, child_stdout_and_stderr) = os.popen4( + [self.executable, FLAGS.help_flag]) + child_stdin.close() # ' start_line+1 + and '' == self.output[start_line+1].rstrip()): + start_line += 2 + logging.debug('Flags start (python): %s' % line) + return (start_line, 'python') + # SWIG flags just have the module name followed by colon. + if exec_mod_start == line: + logging.debug('Flags start (swig): %s' % line) + return (start_line, 'python') + # C++ flags begin after a blank line and with a constant string + if after_blank and line.startswith(' Flags from '): + logging.debug('Flags start (c): %s' % line) + return (start_line, 'c') + # java flags begin with a constant string + if line == 'where flags are': + logging.debug('Flags start (java): %s' % line) + start_line += 2 # skip "Standard flags:" + return (start_line, 'java') + + logging.debug('Desc: %s' % line) + self.desc.append(line) + after_blank = (line == '') + else: + logging.warn('Never found the start of the flags section for "%s"!' + % self.long_name) + return (-1, '') + + def ParsePythonFlags(self, start_line=0): + """Parse python/swig style flags.""" + modname = None # name of current module + modlist = [] + flag = None + for line_num in range(start_line, len(self.output)): # collect flags + line = self.output[line_num].rstrip() + if not line: # blank + continue + + mobj = self.module_py_re.match(line) + if mobj: # start of a new module + modname = mobj.group(1) + logging.debug('Module: %s' % line) + if flag: + modlist.append(flag) + self.module_list.append(modname) + self.modules.setdefault(modname, []) + modlist = self.modules[modname] + flag = None + continue + + mobj = self.flag_py_re.match(line) + if mobj: # start of a new flag + if flag: + modlist.append(flag) + logging.debug('Flag: %s' % line) + flag = Flag(mobj.group(1), mobj.group(2)) + continue + + if not flag: # continuation of a flag + logging.error('Flag info, but no current flag "%s"' % line) + mobj = self.flag_default_py_re.match(line) + if mobj: # (default: '...') + flag.default = mobj.group(1) + logging.debug('Fdef: %s' % line) + continue + mobj = self.flag_tips_py_re.match(line) + if mobj: # (tips) + flag.tips = mobj.group(1) + logging.debug('Ftip: %s' % line) + continue + if flag and flag.help: + flag.help += line # multiflags tack on an extra line + else: + logging.info('Extra: %s' % line) + if flag: + modlist.append(flag) + + def ParseCFlags(self, start_line=0): + """Parse C style flags.""" + modname = None # name of current module + modlist = [] + flag = None + for line_num in range(start_line, len(self.output)): # collect flags + line = self.output[line_num].rstrip() + if not line: # blank lines terminate flags + if flag: # save last flag + modlist.append(flag) + flag = None + continue + + mobj = self.module_c_re.match(line) + if mobj: # start of a new module + modname = mobj.group(1) + logging.debug('Module: %s' % line) + if flag: + modlist.append(flag) + self.module_list.append(modname) + self.modules.setdefault(modname, []) + modlist = self.modules[modname] + flag = None + continue + + mobj = self.flag_c_re.match(line) + if mobj: # start of a new flag + if flag: # save last flag + modlist.append(flag) + logging.debug('Flag: %s' % line) + flag = Flag(mobj.group(1), mobj.group(2)) + continue + + # append to flag help. type and default are part of the main text + if flag: + flag.help += ' ' + line.strip() + else: + logging.info('Extra: %s' % line) + if flag: + modlist.append(flag) + + def ParseJavaFlags(self, start_line=0): + """Parse Java style flags (com.google.common.flags).""" + # The java flags prints starts with a "Standard flags" "module" + # that doesn't follow the standard module syntax. + modname = 'Standard flags' # name of current module + self.module_list.append(modname) + self.modules.setdefault(modname, []) + modlist = self.modules[modname] + flag = None + + for line_num in range(start_line, len(self.output)): # collect flags + line = self.output[line_num].rstrip() + logging.vlog(2, 'Line: "%s"' % line) + if not line: # blank lines terminate module + if flag: # save last flag + modlist.append(flag) + flag = None + continue + + mobj = self.module_java_re.match(line) + if mobj: # start of a new module + modname = mobj.group(1) + logging.debug('Module: %s' % line) + if flag: + modlist.append(flag) + self.module_list.append(modname) + self.modules.setdefault(modname, []) + modlist = self.modules[modname] + flag = None + continue + + mobj = self.flag_java_re.match(line) + if mobj: # start of a new flag + if flag: # save last flag + modlist.append(flag) + logging.debug('Flag: %s' % line) + flag = Flag(mobj.group(1), mobj.group(2)) + continue + + # append to flag help. type and default are part of the main text + if flag: + flag.help += ' ' + line.strip() + else: + logging.info('Extra: %s' % line) + if flag: + modlist.append(flag) + + def Filter(self): + """Filter parsed data to create derived fields.""" + if not self.desc: + self.short_desc = '' + return + + for i in range(len(self.desc)): # replace full path with name + if self.desc[i].find(self.executable) >= 0: + self.desc[i] = self.desc[i].replace(self.executable, self.name) + + self.short_desc = self.desc[0] + word_list = self.short_desc.split(' ') + all_names = [ self.name, self.short_name, ] + # Since the short_desc is always listed right after the name, + # trim it from the short_desc + while word_list and (word_list[0] in all_names + or word_list[0].lower() in all_names): + del word_list[0] + self.short_desc = '' # signal need to reconstruct + if not self.short_desc and word_list: + self.short_desc = ' '.join(word_list) + + +class GenerateDoc(object): + """Base class to output flags information.""" + + def __init__(self, proginfo, directory='.'): + """Create base object. + Args: + proginfo A ProgramInfo object + directory Directory to write output into + """ + self.info = proginfo + self.dirname = directory + + def Output(self): + """Output all sections of the page.""" + self.Open() + self.Header() + self.Body() + self.Footer() + + def Open(self): raise NotImplementedError # define in subclass + def Header(self): raise NotImplementedError # define in subclass + def Body(self): raise NotImplementedError # define in subclass + def Footer(self): raise NotImplementedError # define in subclass + + +class GenerateMan(GenerateDoc): + """Output a man page.""" + + def __init__(self, proginfo, directory='.'): + """Create base object. + Args: + proginfo A ProgramInfo object + directory Directory to write output into + """ + GenerateDoc.__init__(self, proginfo, directory) + + def Open(self): + if self.dirname == '-': + logging.info('Writing to stdout') + self.fp = sys.stdout + else: + self.file_path = '%s.1' % os.path.join(self.dirname, self.info.name) + logging.info('Writing: %s' % self.file_path) + self.fp = open(self.file_path, 'w') + + def Header(self): + self.fp.write( + '.\\" DO NOT MODIFY THIS FILE! It was generated by gflags2man %s\n' + % _VERSION) + self.fp.write( + '.TH %s "1" "%s" "%s" "User Commands"\n' + % (self.info.name, time.strftime('%x', self.info.date), self.info.name)) + self.fp.write( + '.SH NAME\n%s \\- %s\n' % (self.info.name, self.info.short_desc)) + self.fp.write( + '.SH SYNOPSIS\n.B %s\n[\\fIFLAGS\\fR]...\n' % self.info.name) + + def Body(self): + self.fp.write( + '.SH DESCRIPTION\n.\\" Add any additional description here\n.PP\n') + for ln in self.info.desc: + self.fp.write('%s\n' % ln) + self.fp.write( + '.SH OPTIONS\n') + # This shows flags in the original order + for modname in self.info.module_list: + if modname.find(self.info.executable) >= 0: + mod = modname.replace(self.info.executable, self.info.name) + else: + mod = modname + self.fp.write('\n.P\n.I %s\n' % mod) + for flag in self.info.modules[modname]: + help_string = flag.help + if flag.default or flag.tips: + help_string += '\n.br\n' + if flag.default: + help_string += ' (default: \'%s\')' % flag.default + if flag.tips: + help_string += ' (%s)' % flag.tips + self.fp.write( + '.TP\n%s\n%s\n' % (flag.desc, help_string)) + + def Footer(self): + self.fp.write( + '.SH COPYRIGHT\nCopyright \(co %s Google.\n' + % time.strftime('%Y', self.info.date)) + self.fp.write('Gflags2man created this page from "%s %s" output.\n' + % (self.info.name, FLAGS.help_flag)) + self.fp.write('\nGflags2man was written by Dan Christian. ' + ' Note that the date on this' + ' page is the modification date of %s.\n' % self.info.name) + + +def main(argv): + argv = FLAGS(argv) # handles help as well + if len(argv) <= 1: + print >>sys.stderr, __doc__ + print >>sys.stderr, "flags:" + print >>sys.stderr, str(FLAGS) + return 1 + + for arg in argv[1:]: + prog = ProgramInfo(arg) + if not prog.Run(): + continue + prog.Parse() + prog.Filter() + doc = GenerateMan(prog, FLAGS.dest_dir) + doc.Output() + return 0 + +if __name__ == '__main__': + main(sys.argv) diff --git a/vendor/python-gflags/gflags_helpxml_test.py b/vendor/python-gflags/gflags_helpxml_test.py new file mode 100755 index 000000000000..aea2ffbdb254 --- /dev/null +++ b/vendor/python-gflags/gflags_helpxml_test.py @@ -0,0 +1,563 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for the XML-format help generated by the gflags.py module.""" + +__author__ = 'Alex Salcianu' + + +import string +import StringIO +import sys +import unittest +import xml.dom.minidom +import xml.sax.saxutils + +# We use the name 'flags' internally in this test, for historical reasons. +# Don't do this yourself! :-) Just do 'import gflags; FLAGS=gflags.FLAGS; etc' +import gflags as flags + +# For historic reasons, we use the name module_bar instead of test_module_bar +import test_module_bar as module_bar + +def MultiLineEqual(expected_help, help): + """Returns True if expected_help == help. Otherwise returns False + and logs the difference in a human-readable way. + """ + if help == expected_help: + return True + + print "Error: FLAGS.MainModuleHelp() didn't return the expected result." + print "Got:" + print help + print "[End of got]" + + help_lines = help.split('\n') + expected_help_lines = expected_help.split('\n') + + num_help_lines = len(help_lines) + num_expected_help_lines = len(expected_help_lines) + + if num_help_lines != num_expected_help_lines: + print "Number of help lines = %d, expected %d" % ( + num_help_lines, num_expected_help_lines) + + num_to_match = min(num_help_lines, num_expected_help_lines) + + for i in range(num_to_match): + if help_lines[i] != expected_help_lines[i]: + print "One discrepancy: Got:" + print help_lines[i] + print "Expected:" + print expected_help_lines[i] + break + else: + # If we got here, found no discrepancy, print first new line. + if num_help_lines > num_expected_help_lines: + print "New help line:" + print help_lines[num_expected_help_lines] + elif num_expected_help_lines > num_help_lines: + print "Missing expected help line:" + print expected_help_lines[num_help_lines] + else: + print "Bug in this test -- discrepancy detected but not found." + + return False + + +class _MakeXMLSafeTest(unittest.TestCase): + + def _Check(self, s, expected_output): + self.assertEqual(flags._MakeXMLSafe(s), expected_output) + + def testMakeXMLSafe(self): + self._Check('plain text', 'plain text') + self._Check('(x < y) && (a >= b)', + '(x < y) && (a >= b)') + # Some characters with ASCII code < 32 are illegal in XML 1.0 and + # are removed by us. However, '\n', '\t', and '\r' are legal. + self._Check('\x09\x0btext \x02 with\x0dsome \x08 good & bad chars', + '\ttext with\rsome good & bad chars') + + +def _ListSeparatorsInXMLFormat(separators, indent=''): + """Generates XML encoding of a list of list separators. + + Args: + separators: A list of list separators. Usually, this should be a + string whose characters are the valid list separators, e.g., ',' + means that both comma (',') and space (' ') are valid list + separators. + indent: A string that is added at the beginning of each generated + XML element. + + Returns: + A string. + """ + result = '' + separators = list(separators) + separators.sort() + for sep_char in separators: + result += ('%s%s\n' % + (indent, repr(sep_char))) + return result + + +class WriteFlagHelpInXMLFormatTest(unittest.TestCase): + """Test the XML-format help for a single flag at a time. + + There is one test* method for each kind of DEFINE_* declaration. + """ + + def setUp(self): + # self.fv is a FlagValues object, just like flags.FLAGS. Each + # test registers one flag with this FlagValues. + self.fv = flags.FlagValues() + + def assertMultiLineEqual(self, expected, actual): + self.assert_(MultiLineEqual(expected, actual)) + + def _CheckFlagHelpInXML(self, flag_name, module_name, + expected_output, is_key=False): + # StringIO.StringIO is a file object that writes into a memory string. + sio = StringIO.StringIO() + flag_obj = self.fv[flag_name] + flag_obj.WriteInfoInXMLFormat(sio, module_name, is_key=is_key, indent=' ') + self.assertMultiLineEqual(sio.getvalue(), expected_output) + sio.close() + + def testFlagHelpInXML_Int(self): + flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=self.fv) + expected_output_pattern = ( + ' \n' + ' module.name\n' + ' index\n' + ' An integer flag\n' + ' 17\n' + ' %d\n' + ' int\n' + ' \n') + self._CheckFlagHelpInXML('index', 'module.name', + expected_output_pattern % 17) + # Check that the output is correct even when the current value of + # a flag is different from the default one. + self.fv['index'].value = 20 + self._CheckFlagHelpInXML('index', 'module.name', + expected_output_pattern % 20) + + def testFlagHelpInXML_IntWithBounds(self): + flags.DEFINE_integer('nb_iters', 17, 'An integer flag', + lower_bound=5, upper_bound=27, + flag_values=self.fv) + expected_output = ( + ' \n' + ' yes\n' + ' module.name\n' + ' nb_iters\n' + ' An integer flag\n' + ' 17\n' + ' 17\n' + ' int\n' + ' 5\n' + ' 27\n' + ' \n') + self._CheckFlagHelpInXML('nb_iters', 'module.name', + expected_output, is_key=True) + + def testFlagHelpInXML_String(self): + flags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.', + flag_values=self.fv) + expected_output = ( + ' \n' + ' simple_module\n' + ' file_path\n' + ' A test string flag.\n' + ' /path/to/my/dir\n' + ' /path/to/my/dir\n' + ' string\n' + ' \n') + self._CheckFlagHelpInXML('file_path', 'simple_module', + expected_output) + + def testFlagHelpInXML_StringWithXMLIllegalChars(self): + flags.DEFINE_string('file_path', '/path/to/\x08my/dir', + 'A test string flag.', flag_values=self.fv) + # '\x08' is not a legal character in XML 1.0 documents. Our + # current code purges such characters from the generated XML. + expected_output = ( + ' \n' + ' simple_module\n' + ' file_path\n' + ' A test string flag.\n' + ' /path/to/my/dir\n' + ' /path/to/my/dir\n' + ' string\n' + ' \n') + self._CheckFlagHelpInXML('file_path', 'simple_module', + expected_output) + + def testFlagHelpInXML_Boolean(self): + flags.DEFINE_boolean('use_hack', False, 'Use performance hack', + flag_values=self.fv) + expected_output = ( + ' \n' + ' yes\n' + ' a_module\n' + ' use_hack\n' + ' Use performance hack\n' + ' false\n' + ' false\n' + ' bool\n' + ' \n') + self._CheckFlagHelpInXML('use_hack', 'a_module', + expected_output, is_key=True) + + def testFlagHelpInXML_Enum(self): + flags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'], + 'Compiler version to use.', flag_values=self.fv) + expected_output = ( + ' \n' + ' tool\n' + ' cc_version\n' + ' <stable|experimental>: ' + 'Compiler version to use.\n' + ' stable\n' + ' stable\n' + ' string enum\n' + ' stable\n' + ' experimental\n' + ' \n') + self._CheckFlagHelpInXML('cc_version', 'tool', expected_output) + + def testFlagHelpInXML_CommaSeparatedList(self): + flags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip', + 'Files to process.', flag_values=self.fv) + expected_output = ( + ' \n' + ' tool\n' + ' files\n' + ' Files to process.\n' + ' a.cc,a.h,archive/old.zip\n' + ' [\'a.cc\', \'a.h\', \'archive/old.zip\']\n' + ' comma separated list of strings\n' + ' \',\'\n' + ' \n') + self._CheckFlagHelpInXML('files', 'tool', expected_output) + + def testFlagHelpInXML_SpaceSeparatedList(self): + flags.DEFINE_spaceseplist('dirs', 'src libs bin', + 'Directories to search.', flag_values=self.fv) + expected_output = ( + ' \n' + ' tool\n' + ' dirs\n' + ' Directories to search.\n' + ' src libs bin\n' + ' [\'src\', \'libs\', \'bin\']\n' + ' whitespace separated list of strings\n' + 'LIST_SEPARATORS' + ' \n').replace('LIST_SEPARATORS', + _ListSeparatorsInXMLFormat(string.whitespace, + indent=' ')) + self._CheckFlagHelpInXML('dirs', 'tool', expected_output) + + def testFlagHelpInXML_MultiString(self): + flags.DEFINE_multistring('to_delete', ['a.cc', 'b.h'], + 'Files to delete', flag_values=self.fv) + expected_output = ( + ' \n' + ' tool\n' + ' to_delete\n' + ' Files to delete;\n ' + 'repeat this option to specify a list of values\n' + ' [\'a.cc\', \'b.h\']\n' + ' [\'a.cc\', \'b.h\']\n' + ' multi string\n' + ' \n') + self._CheckFlagHelpInXML('to_delete', 'tool', expected_output) + + def testFlagHelpInXML_MultiInt(self): + flags.DEFINE_multi_int('cols', [5, 7, 23], + 'Columns to select', flag_values=self.fv) + expected_output = ( + ' \n' + ' tool\n' + ' cols\n' + ' Columns to select;\n ' + 'repeat this option to specify a list of values\n' + ' [5, 7, 23]\n' + ' [5, 7, 23]\n' + ' multi int\n' + ' \n') + self._CheckFlagHelpInXML('cols', 'tool', expected_output) + + +# The next EXPECTED_HELP_XML_* constants are parts of a template for +# the expected XML output from WriteHelpInXMLFormatTest below. When +# we assemble these parts into a single big string, we'll take into +# account the ordering between the name of the main module and the +# name of module_bar. Next, we'll fill in the docstring for this +# module (%(usage_doc)s), the name of the main module +# (%(main_module_name)s) and the name of the module module_bar +# (%(module_bar_name)s). See WriteHelpInXMLFormatTest below. +# +# NOTE: given the current implementation of _GetMainModule(), we +# already know the ordering between the main module and module_bar. +# However, there is no guarantee that _GetMainModule will never be +# changed in the future (especially since it's far from perfect). +EXPECTED_HELP_XML_START = """\ + + + gflags_helpxml_test.py + %(usage_doc)s +""" + +EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE = """\ + + yes + %(main_module_name)s + cc_version + <stable|experimental>: Compiler version to use. + stable + stable + string enum + stable + experimental + + + yes + %(main_module_name)s + cols + Columns to select; + repeat this option to specify a list of values + [5, 7, 23] + [5, 7, 23] + multi int + + + yes + %(main_module_name)s + dirs + Directories to create. + src libs bins + ['src', 'libs', 'bins'] + whitespace separated list of strings +%(whitespace_separators)s + + yes + %(main_module_name)s + file_path + A test string flag. + /path/to/my/dir + /path/to/my/dir + string + + + yes + %(main_module_name)s + files + Files to process. + a.cc,a.h,archive/old.zip + ['a.cc', 'a.h', 'archive/old.zip'] + comma separated list of strings + \',\' + + + yes + %(main_module_name)s + index + An integer flag + 17 + 17 + int + + + yes + %(main_module_name)s + nb_iters + An integer flag + 17 + 17 + int + 5 + 27 + + + yes + %(main_module_name)s + to_delete + Files to delete; + repeat this option to specify a list of values + ['a.cc', 'b.h'] + ['a.cc', 'b.h'] + multi string + + + yes + %(main_module_name)s + use_hack + Use performance hack + false + false + bool + +""" + +EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR = """\ + + %(module_bar_name)s + tmod_bar_t + Sample int flag. + 4 + 4 + int + + + yes + %(module_bar_name)s + tmod_bar_u + Sample int flag. + 5 + 5 + int + + + %(module_bar_name)s + tmod_bar_v + Sample int flag. + 6 + 6 + int + + + %(module_bar_name)s + tmod_bar_x + Boolean flag. + true + true + bool + + + %(module_bar_name)s + tmod_bar_y + String flag. + default + default + string + + + yes + %(module_bar_name)s + tmod_bar_z + Another boolean flag from module bar. + false + false + bool + +""" + +EXPECTED_HELP_XML_END = """\ + +""" + + +class WriteHelpInXMLFormatTest(unittest.TestCase): + """Big test of FlagValues.WriteHelpInXMLFormat, with several flags.""" + + def assertMultiLineEqual(self, expected, actual): + self.assert_(MultiLineEqual(expected, actual)) + + def testWriteHelpInXMLFormat(self): + fv = flags.FlagValues() + # Since these flags are defined by the top module, they are all key. + flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv) + flags.DEFINE_integer('nb_iters', 17, 'An integer flag', + lower_bound=5, upper_bound=27, flag_values=fv) + flags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.', + flag_values=fv) + flags.DEFINE_boolean('use_hack', False, 'Use performance hack', + flag_values=fv) + flags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'], + 'Compiler version to use.', flag_values=fv) + flags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip', + 'Files to process.', flag_values=fv) + flags.DEFINE_spaceseplist('dirs', 'src libs bins', + 'Directories to create.', flag_values=fv) + flags.DEFINE_multistring('to_delete', ['a.cc', 'b.h'], + 'Files to delete', flag_values=fv) + flags.DEFINE_multi_int('cols', [5, 7, 23], + 'Columns to select', flag_values=fv) + # Define a few flags in a different module. + module_bar.DefineFlags(flag_values=fv) + # And declare only a few of them to be key. This way, we have + # different kinds of flags, defined in different modules, and not + # all of them are key flags. + flags.DECLARE_key_flag('tmod_bar_z', flag_values=fv) + flags.DECLARE_key_flag('tmod_bar_u', flag_values=fv) + + # Generate flag help in XML format in the StringIO sio. + sio = StringIO.StringIO() + fv.WriteHelpInXMLFormat(sio) + + # Check that we got the expected result. + expected_output_template = EXPECTED_HELP_XML_START + main_module_name = flags._GetMainModule() + module_bar_name = module_bar.__name__ + + if main_module_name < module_bar_name: + expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE + expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR + else: + expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR + expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE + + expected_output_template += EXPECTED_HELP_XML_END + + # XML representation of the whitespace list separators. + whitespace_separators = _ListSeparatorsInXMLFormat(string.whitespace, + indent=' ') + expected_output = ( + expected_output_template % + {'usage_doc': sys.modules['__main__'].__doc__, + 'main_module_name': main_module_name, + 'module_bar_name': module_bar_name, + 'whitespace_separators': whitespace_separators}) + + actual_output = sio.getvalue() + self.assertMultiLineEqual(actual_output, expected_output) + + # Also check that our result is valid XML. minidom.parseString + # throws an xml.parsers.expat.ExpatError in case of an error. + xml.dom.minidom.parseString(actual_output) + + +if __name__ == '__main__': + unittest.main() diff --git a/vendor/python-gflags/gflags_unittest.py b/vendor/python-gflags/gflags_unittest.py new file mode 100755 index 000000000000..07420c03f721 --- /dev/null +++ b/vendor/python-gflags/gflags_unittest.py @@ -0,0 +1,1679 @@ +#!/usr/bin/env python + +# Copyright (c) 2007, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"Unittest for gflags.py module" + +__pychecker__ = "no-local" # for unittest + + +import sys +import os +import shutil +import unittest + +# We use the name 'flags' internally in this test, for historical reasons. +# Don't do this yourself! :-) Just do 'import gflags; FLAGS=gflags.FLAGS; etc' +import gflags as flags +FLAGS=flags.FLAGS + +# For historic reasons, we use the name module_foo instead of +# test_module_foo, and module_bar instead of test_module_bar. +import test_module_foo as module_foo +import test_module_bar as module_bar +import test_module_baz as module_baz + +def MultiLineEqual(expected_help, help): + """Returns True if expected_help == help. Otherwise returns False + and logs the difference in a human-readable way. + """ + if help == expected_help: + return True + + print "Error: FLAGS.MainModuleHelp() didn't return the expected result." + print "Got:" + print help + print "[End of got]" + + help_lines = help.split('\n') + expected_help_lines = expected_help.split('\n') + + num_help_lines = len(help_lines) + num_expected_help_lines = len(expected_help_lines) + + if num_help_lines != num_expected_help_lines: + print "Number of help lines = %d, expected %d" % ( + num_help_lines, num_expected_help_lines) + + num_to_match = min(num_help_lines, num_expected_help_lines) + + for i in range(num_to_match): + if help_lines[i] != expected_help_lines[i]: + print "One discrepancy: Got:" + print help_lines[i] + print "Expected:" + print expected_help_lines[i] + break + else: + # If we got here, found no discrepancy, print first new line. + if num_help_lines > num_expected_help_lines: + print "New help line:" + print help_lines[num_expected_help_lines] + elif num_expected_help_lines > num_help_lines: + print "Missing expected help line:" + print expected_help_lines[num_help_lines] + else: + print "Bug in this test -- discrepancy detected but not found." + + return False + + +class FlagsUnitTest(unittest.TestCase): + "Flags Unit Test" + + def setUp(self): + # make sure we are using the old, stupid way of parsing flags. + FLAGS.UseGnuGetOpt(False) + + def assertListEqual(self, list1, list2): + """Asserts that, when sorted, list1 and list2 are identical.""" + sorted_list1 = list1[:] + sorted_list2 = list2[:] + sorted_list1.sort() + sorted_list2.sort() + self.assertEqual(sorted_list1, sorted_list2) + + def assertMultiLineEqual(self, expected, actual): + self.assert_(MultiLineEqual(expected, actual)) + + + def test_flags(self): + + ############################################## + # Test normal usage with no (expected) errors. + + # Define flags + number_test_framework_flags = len(FLAGS.RegisteredFlags()) + repeatHelp = "how many times to repeat (0-5)" + flags.DEFINE_integer("repeat", 4, repeatHelp, + lower_bound=0, short_name='r') + flags.DEFINE_string("name", "Bob", "namehelp") + flags.DEFINE_boolean("debug", 0, "debughelp") + flags.DEFINE_boolean("q", 1, "quiet mode") + flags.DEFINE_boolean("quack", 0, "superstring of 'q'") + flags.DEFINE_boolean("noexec", 1, "boolean flag with no as prefix") + flags.DEFINE_integer("x", 3, "how eXtreme to be") + flags.DEFINE_integer("l", 0x7fffffff00000000L, "how long to be") + flags.DEFINE_list('letters', 'a,b,c', "a list of letters") + flags.DEFINE_list('numbers', [1, 2, 3], "a list of numbers") + flags.DEFINE_enum("kwery", None, ['who', 'what', 'why', 'where', 'when'], + "?") + + # Specify number of flags defined above. The short_name defined + # for 'repeat' counts as an extra flag. + number_defined_flags = 11 + 1 + self.assertEqual(len(FLAGS.RegisteredFlags()), + number_defined_flags + number_test_framework_flags) + + assert FLAGS.repeat == 4, "integer default values not set:" + FLAGS.repeat + assert FLAGS.name == 'Bob', "default values not set:" + FLAGS.name + assert FLAGS.debug == 0, "boolean default values not set:" + FLAGS.debug + assert FLAGS.q == 1, "boolean default values not set:" + FLAGS.q + assert FLAGS.x == 3, "integer default values not set:" + FLAGS.x + assert FLAGS.l == 0x7fffffff00000000L, ("integer default values not set:" + + FLAGS.l) + assert FLAGS.letters == ['a', 'b', 'c'], ("list default values not set:" + + FLAGS.letters) + assert FLAGS.numbers == [1, 2, 3], ("list default values not set:" + + FLAGS.numbers) + assert FLAGS.kwery is None, ("enum default None value not set:" + + FLAGS.kwery) + + flag_values = FLAGS.FlagValuesDict() + assert flag_values['repeat'] == 4 + assert flag_values['name'] == 'Bob' + assert flag_values['debug'] == 0 + assert flag_values['r'] == 4 # short for of repeat + assert flag_values['q'] == 1 + assert flag_values['quack'] == 0 + assert flag_values['x'] == 3 + assert flag_values['l'] == 0x7fffffff00000000L + assert flag_values['letters'] == ['a', 'b', 'c'] + assert flag_values['numbers'] == [1, 2, 3] + assert flag_values['kwery'] is None + + # Verify string form of defaults + assert FLAGS['repeat'].default_as_str == "'4'" + assert FLAGS['name'].default_as_str == "'Bob'" + assert FLAGS['debug'].default_as_str == "'false'" + assert FLAGS['q'].default_as_str == "'true'" + assert FLAGS['quack'].default_as_str == "'false'" + assert FLAGS['noexec'].default_as_str == "'true'" + assert FLAGS['x'].default_as_str == "'3'" + assert FLAGS['l'].default_as_str == "'9223372032559808512'" + assert FLAGS['letters'].default_as_str == "'a,b,c'" + assert FLAGS['numbers'].default_as_str == "'1,2,3'" + + # Verify that the iterator for flags yields all the keys + keys = list(FLAGS) + keys.sort() + reg_flags = FLAGS.RegisteredFlags() + reg_flags.sort() + self.assertEqual(keys, reg_flags) + + # Parse flags + # .. empty command line + argv = ('./program',) + argv = FLAGS(argv) + assert len(argv) == 1, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + + # .. non-empty command line + argv = ('./program', '--debug', '--name=Bob', '-q', '--x=8') + argv = FLAGS(argv) + assert len(argv) == 1, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert FLAGS['debug'].present == 1 + FLAGS['debug'].present = 0 # Reset + assert FLAGS['name'].present == 1 + FLAGS['name'].present = 0 # Reset + assert FLAGS['q'].present == 1 + FLAGS['q'].present = 0 # Reset + assert FLAGS['x'].present == 1 + FLAGS['x'].present = 0 # Reset + + # Flags list + self.assertEqual(len(FLAGS.RegisteredFlags()), + number_defined_flags + number_test_framework_flags) + assert 'name' in FLAGS.RegisteredFlags() + assert 'debug' in FLAGS.RegisteredFlags() + assert 'repeat' in FLAGS.RegisteredFlags() + assert 'r' in FLAGS.RegisteredFlags() + assert 'q' in FLAGS.RegisteredFlags() + assert 'quack' in FLAGS.RegisteredFlags() + assert 'x' in FLAGS.RegisteredFlags() + assert 'l' in FLAGS.RegisteredFlags() + assert 'letters' in FLAGS.RegisteredFlags() + assert 'numbers' in FLAGS.RegisteredFlags() + + # has_key + assert FLAGS.has_key('name') + assert not FLAGS.has_key('name2') + assert 'name' in FLAGS + assert 'name2' not in FLAGS + + # try deleting a flag + del FLAGS.r + self.assertEqual(len(FLAGS.RegisteredFlags()), + number_defined_flags - 1 + number_test_framework_flags) + assert not 'r' in FLAGS.RegisteredFlags() + + # .. command line with extra stuff + argv = ('./program', '--debug', '--name=Bob', 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + assert FLAGS['debug'].present == 1 + FLAGS['debug'].present = 0 # Reset + assert FLAGS['name'].present == 1 + FLAGS['name'].present = 0 # Reset + + # Test reset + argv = ('./program', '--debug') + argv = FLAGS(argv) + assert len(argv) == 1, "wrong number of arguments pulled" + assert argv[0] == './program', "program name not preserved" + assert FLAGS['debug'].present == 1 + assert FLAGS['debug'].value + FLAGS.Reset() + assert FLAGS['debug'].present == 0 + assert not FLAGS['debug'].value + + # Test that reset restores default value when default value is None. + argv = ('./program', '--kwery=who') + argv = FLAGS(argv) + assert len(argv) == 1, "wrong number of arguments pulled" + assert argv[0] == './program', "program name not preserved" + assert FLAGS['kwery'].present == 1 + assert FLAGS['kwery'].value == 'who' + FLAGS.Reset() + assert FLAGS['kwery'].present == 0 + assert FLAGS['kwery'].value == None + + # Test integer argument passing + argv = ('./program', '--x', '0x12345') + argv = FLAGS(argv) + self.assertEquals(FLAGS.x, 0x12345) + self.assertEquals(type(FLAGS.x), int) + + argv = ('./program', '--x', '0x1234567890ABCDEF1234567890ABCDEF') + argv = FLAGS(argv) + self.assertEquals(FLAGS.x, 0x1234567890ABCDEF1234567890ABCDEF) + self.assertEquals(type(FLAGS.x), long) + + # Treat 0-prefixed parameters as base-10, not base-8 + argv = ('./program', '--x', '012345') + argv = FLAGS(argv) + self.assertEquals(FLAGS.x, 12345) + self.assertEquals(type(FLAGS.x), int) + + argv = ('./program', '--x', '0123459') + argv = FLAGS(argv) + self.assertEquals(FLAGS.x, 123459) + self.assertEquals(type(FLAGS.x), int) + + argv = ('./program', '--x', '0x123efg') + try: + argv = FLAGS(argv) + raise AssertionError("failed to detect invalid hex argument") + except flags.IllegalFlagValue: + pass + + argv = ('./program', '--x', '0X123efg') + try: + argv = FLAGS(argv) + raise AssertionError("failed to detect invalid hex argument") + except flags.IllegalFlagValue: + pass + + # Test boolean argument parsing + flags.DEFINE_boolean("test0", None, "test boolean parsing") + argv = ('./program', '--notest0') + argv = FLAGS(argv) + assert FLAGS.test0 == 0 + + flags.DEFINE_boolean("test1", None, "test boolean parsing") + argv = ('./program', '--test1') + argv = FLAGS(argv) + assert FLAGS.test1 == 1 + + FLAGS.test0 = None + argv = ('./program', '--test0=false') + argv = FLAGS(argv) + assert FLAGS.test0 == 0 + + FLAGS.test1 = None + argv = ('./program', '--test1=true') + argv = FLAGS(argv) + assert FLAGS.test1 == 1 + + FLAGS.test0 = None + argv = ('./program', '--test0=0') + argv = FLAGS(argv) + assert FLAGS.test0 == 0 + + FLAGS.test1 = None + argv = ('./program', '--test1=1') + argv = FLAGS(argv) + assert FLAGS.test1 == 1 + + # Test booleans that already have 'no' as a prefix + FLAGS.noexec = None + argv = ('./program', '--nonoexec', '--name', 'Bob') + argv = FLAGS(argv) + assert FLAGS.noexec == 0 + + FLAGS.noexec = None + argv = ('./program', '--name', 'Bob', '--noexec') + argv = FLAGS(argv) + assert FLAGS.noexec == 1 + + # Test unassigned booleans + flags.DEFINE_boolean("testnone", None, "test boolean parsing") + argv = ('./program',) + argv = FLAGS(argv) + assert FLAGS.testnone == None + + # Test get with default + flags.DEFINE_boolean("testget1", None, "test parsing with defaults") + flags.DEFINE_boolean("testget2", None, "test parsing with defaults") + flags.DEFINE_boolean("testget3", None, "test parsing with defaults") + flags.DEFINE_integer("testget4", None, "test parsing with defaults") + argv = ('./program','--testget1','--notestget2') + argv = FLAGS(argv) + assert FLAGS.get('testget1', 'foo') == 1 + assert FLAGS.get('testget2', 'foo') == 0 + assert FLAGS.get('testget3', 'foo') == 'foo' + assert FLAGS.get('testget4', 'foo') == 'foo' + + # test list code + lists = [['hello','moo','boo','1'], + [],] + + flags.DEFINE_list('testlist', '', 'test lists parsing') + flags.DEFINE_spaceseplist('testspacelist', '', 'tests space lists parsing') + + for name, sep in (('testlist', ','), ('testspacelist', ' '), + ('testspacelist', '\n')): + for lst in lists: + argv = ('./program', '--%s=%s' % (name, sep.join(lst))) + argv = FLAGS(argv) + self.assertEquals(getattr(FLAGS, name), lst) + + # Test help text + flagsHelp = str(FLAGS) + assert flagsHelp.find("repeat") != -1, "cannot find flag in help" + assert flagsHelp.find(repeatHelp) != -1, "cannot find help string in help" + + # Test flag specified twice + argv = ('./program', '--repeat=4', '--repeat=2', '--debug', '--nodebug') + argv = FLAGS(argv) + self.assertEqual(FLAGS.get('repeat', None), 2) + self.assertEqual(FLAGS.get('debug', None), 0) + + # Test MultiFlag with single default value + flags.DEFINE_multistring('s_str', 'sing1', + 'string option that can occur multiple times', + short_name='s') + self.assertEqual(FLAGS.get('s_str', None), [ 'sing1', ]) + + # Test MultiFlag with list of default values + multi_string_defs = [ 'def1', 'def2', ] + flags.DEFINE_multistring('m_str', multi_string_defs, + 'string option that can occur multiple times', + short_name='m') + self.assertEqual(FLAGS.get('m_str', None), multi_string_defs) + + # Test flag specified multiple times with a MultiFlag + argv = ('./program', '--m_str=str1', '-m', 'str2') + argv = FLAGS(argv) + self.assertEqual(FLAGS.get('m_str', None), [ 'str1', 'str2', ]) + + # Test single-letter flags; should support both single and double dash + argv = ('./program', '-q', '-x8') + argv = FLAGS(argv) + self.assertEqual(FLAGS.get('q', None), 1) + self.assertEqual(FLAGS.get('x', None), 8) + + argv = ('./program', '--q', '--x', '9', '--noqu') + argv = FLAGS(argv) + self.assertEqual(FLAGS.get('q', None), 1) + self.assertEqual(FLAGS.get('x', None), 9) + # --noqu should match '--noquack since it's a unique prefix + self.assertEqual(FLAGS.get('quack', None), 0) + + argv = ('./program', '--noq', '--x=10', '--qu') + argv = FLAGS(argv) + self.assertEqual(FLAGS.get('q', None), 0) + self.assertEqual(FLAGS.get('x', None), 10) + self.assertEqual(FLAGS.get('quack', None), 1) + + #################################### + # Test flag serialization code: + + oldtestlist = FLAGS.testlist + oldtestspacelist = FLAGS.testspacelist + + argv = ('./program', + FLAGS['test0'].Serialize(), + FLAGS['test1'].Serialize(), + FLAGS['testnone'].Serialize(), + FLAGS['s_str'].Serialize()) + argv = FLAGS(argv) + self.assertEqual(FLAGS['test0'].Serialize(), '--notest0') + self.assertEqual(FLAGS['test1'].Serialize(), '--test1') + self.assertEqual(FLAGS['testnone'].Serialize(), '') + self.assertEqual(FLAGS['s_str'].Serialize(), '--s_str=sing1') + + testlist1 = ['aa', 'bb'] + testspacelist1 = ['aa', 'bb', 'cc'] + FLAGS.testlist = list(testlist1) + FLAGS.testspacelist = list(testspacelist1) + argv = ('./program', + FLAGS['testlist'].Serialize(), + FLAGS['testspacelist'].Serialize()) + argv = FLAGS(argv) + self.assertEqual(FLAGS.testlist, testlist1) + self.assertEqual(FLAGS.testspacelist, testspacelist1) + + testlist1 = ['aa some spaces', 'bb'] + testspacelist1 = ['aa', 'bb,some,commas,', 'cc'] + FLAGS.testlist = list(testlist1) + FLAGS.testspacelist = list(testspacelist1) + argv = ('./program', + FLAGS['testlist'].Serialize(), + FLAGS['testspacelist'].Serialize()) + argv = FLAGS(argv) + self.assertEqual(FLAGS.testlist, testlist1) + self.assertEqual(FLAGS.testspacelist, testspacelist1) + + FLAGS.testlist = oldtestlist + FLAGS.testspacelist = oldtestspacelist + + #################################### + # Test flag-update: + + def ArgsString(): + flagnames = FLAGS.RegisteredFlags() + + flagnames.sort() + nonbool_flags = ['--%s %s' % (name, FLAGS.get(name, None)) + for name in flagnames + if not isinstance(FLAGS[name], flags.BooleanFlag)] + + truebool_flags = ['--%s' % (name) + for name in flagnames + if isinstance(FLAGS[name], flags.BooleanFlag) and + FLAGS.get(name, None)] + falsebool_flags = ['--no%s' % (name) + for name in flagnames + if isinstance(FLAGS[name], flags.BooleanFlag) and + not FLAGS.get(name, None)] + return ' '.join(nonbool_flags + truebool_flags + falsebool_flags) + + argv = ('./program', '--repeat=3', '--name=giants', '--nodebug') + + FLAGS(argv) + self.assertEqual(FLAGS.get('repeat', None), 3) + self.assertEqual(FLAGS.get('name', None), 'giants') + self.assertEqual(FLAGS.get('debug', None), 0) + self.assertEqual(ArgsString(), + "--kwery None " + "--l 9223372032559808512 " + "--letters ['a', 'b', 'c'] " + "--m ['str1', 'str2'] --m_str ['str1', 'str2'] " + "--name giants " + "--numbers [1, 2, 3] " + "--repeat 3 " + "--s ['sing1'] --s_str ['sing1'] " + "--testget4 None --testlist [] " + "--testspacelist [] --x 10 " + "--noexec --quack " + "--test1 " + "--testget1 --tmod_baz_x --no? --nodebug --nohelp --nohelpshort --nohelpxml " + "--noq --notest0 --notestget2 " + "--notestget3 --notestnone") + + argv = ('./program', '--debug', '--m_str=upd1', '-s', 'upd2') + FLAGS(argv) + self.assertEqual(FLAGS.get('repeat', None), 3) + self.assertEqual(FLAGS.get('name', None), 'giants') + self.assertEqual(FLAGS.get('debug', None), 1) + + # items appended to existing non-default value lists for --m/--m_str + # new value overwrites default value (not appended to it) for --s/--s_str + self.assertEqual(ArgsString(), + "--kwery None " + "--l 9223372032559808512 " + "--letters ['a', 'b', 'c'] " + "--m ['str1', 'str2', 'upd1'] " + "--m_str ['str1', 'str2', 'upd1'] " + "--name giants " + "--numbers [1, 2, 3] " + "--repeat 3 " + "--s ['upd2'] --s_str ['upd2'] " + "--testget4 None --testlist [] " + "--testspacelist [] --x 10 " + "--debug --noexec --quack " + "--test1 " + "--testget1 --tmod_baz_x --no? --nohelp --nohelpshort --nohelpxml " + "--noq --notest0 --notestget2 " + "--notestget3 --notestnone") + + + #################################### + # Test all kind of error conditions. + + # Duplicate flag detection + try: + flags.DEFINE_boolean("run", 0, "runhelp", short_name='q') + raise AssertionError("duplicate flag detection failed") + except flags.DuplicateFlag, e: + pass + + # Duplicate short flag detection + try: + flags.DEFINE_boolean("zoom1", 0, "runhelp z1", short_name='z') + flags.DEFINE_boolean("zoom2", 0, "runhelp z2", short_name='z') + raise AssertionError("duplicate short flag detection failed") + except flags.DuplicateFlag, e: + self.assertTrue("The flag 'z' is defined twice. " in e.args[0]) + self.assertTrue("First from" in e.args[0]) + self.assertTrue(", Second from" in e.args[0]) + + # Duplicate mixed flag detection + try: + flags.DEFINE_boolean("short1", 0, "runhelp s1", short_name='s') + flags.DEFINE_boolean("s", 0, "runhelp s2") + raise AssertionError("duplicate mixed flag detection failed") + except flags.DuplicateFlag, e: + self.assertTrue("The flag 's' is defined twice. " in e.args[0]) + self.assertTrue("First from" in e.args[0]) + self.assertTrue(", Second from" in e.args[0]) + + # Make sure allow_override works + try: + flags.DEFINE_boolean("dup1", 0, "runhelp d11", short_name='u', + allow_override=0) + flag = FLAGS.FlagDict()['dup1'] + self.assertEqual(flag.default, 0) + + flags.DEFINE_boolean("dup1", 1, "runhelp d12", short_name='u', + allow_override=1) + flag = FLAGS.FlagDict()['dup1'] + self.assertEqual(flag.default, 1) + except flags.DuplicateFlag, e: + raise AssertionError("allow_override did not permit a flag duplication") + + # Make sure allow_override works + try: + flags.DEFINE_boolean("dup2", 0, "runhelp d21", short_name='u', + allow_override=1) + flag = FLAGS.FlagDict()['dup2'] + self.assertEqual(flag.default, 0) + + flags.DEFINE_boolean("dup2", 1, "runhelp d22", short_name='u', + allow_override=0) + flag = FLAGS.FlagDict()['dup2'] + self.assertEqual(flag.default, 1) + except flags.DuplicateFlag, e: + raise AssertionError("allow_override did not permit a flag duplication") + + # Make sure allow_override doesn't work with None default + try: + flags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u', + allow_override=0) + flag = FLAGS.FlagDict()['dup3'] + self.assertEqual(flag.default, 0) + + flags.DEFINE_boolean("dup3", None, "runhelp d32", short_name='u', + allow_override=1) + raise AssertionError('Cannot override a flag with a default of None') + except flags.DuplicateFlag, e: + pass + + # Make sure that when we override, the help string gets updated correctly + flags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u', + allow_override=1) + flags.DEFINE_boolean("dup3", 1, "runhelp d32", short_name='u', + allow_override=1) + self.assert_(str(FLAGS).find('runhelp d31') == -1) + self.assert_(str(FLAGS).find('runhelp d32') != -1) + + # Make sure AppendFlagValues works + new_flags = flags.FlagValues() + flags.DEFINE_boolean("new1", 0, "runhelp n1", flag_values=new_flags) + flags.DEFINE_boolean("new2", 0, "runhelp n2", flag_values=new_flags) + self.assertEqual(len(new_flags.FlagDict()), 2) + old_len = len(FLAGS.FlagDict()) + FLAGS.AppendFlagValues(new_flags) + self.assertEqual(len(FLAGS.FlagDict())-old_len, 2) + self.assertEqual("new1" in FLAGS.FlagDict(), True) + self.assertEqual("new2" in FLAGS.FlagDict(), True) + + # Make sure AppendFlagValues works with flags with shortnames. + new_flags = flags.FlagValues() + flags.DEFINE_boolean("new3", 0, "runhelp n3", flag_values=new_flags) + flags.DEFINE_boolean("new4", 0, "runhelp n4", flag_values=new_flags, + short_name="n4") + self.assertEqual(len(new_flags.FlagDict()), 3) + old_len = len(FLAGS.FlagDict()) + FLAGS.AppendFlagValues(new_flags) + self.assertEqual(len(FLAGS.FlagDict())-old_len, 3) + self.assertTrue("new3" in FLAGS.FlagDict()) + self.assertTrue("new4" in FLAGS.FlagDict()) + self.assertTrue("n4" in FLAGS.FlagDict()) + self.assertEqual(FLAGS.FlagDict()['n4'], FLAGS.FlagDict()['new4']) + + # Make sure AppendFlagValues fails on duplicates + flags.DEFINE_boolean("dup4", 0, "runhelp d41") + new_flags = flags.FlagValues() + flags.DEFINE_boolean("dup4", 0, "runhelp d42", flag_values=new_flags) + try: + FLAGS.AppendFlagValues(new_flags) + raise AssertionError("ignore_copy was not set but caused no exception") + except flags.DuplicateFlag, e: + pass + + # Integer out of bounds + try: + argv = ('./program', '--repeat=-4') + FLAGS(argv) + raise AssertionError('integer bounds exception not raised:' + + str(FLAGS.repeat)) + except flags.IllegalFlagValue: + pass + + # Non-integer + try: + argv = ('./program', '--repeat=2.5') + FLAGS(argv) + raise AssertionError("malformed integer value exception not raised") + except flags.IllegalFlagValue: + pass + + # Missing required arugment + try: + argv = ('./program', '--name') + FLAGS(argv) + raise AssertionError("Flag argument required exception not raised") + except flags.FlagsError: + pass + + # Non-boolean arguments for boolean + try: + argv = ('./program', '--debug=goofup') + FLAGS(argv) + raise AssertionError("Illegal flag value exception not raised") + except flags.IllegalFlagValue: + pass + + try: + argv = ('./program', '--debug=42') + FLAGS(argv) + raise AssertionError("Illegal flag value exception not raised") + except flags.IllegalFlagValue: + pass + + + # Non-numeric argument for integer flag --repeat + try: + argv = ('./program', '--repeat', 'Bob', 'extra') + FLAGS(argv) + raise AssertionError("Illegal flag value exception not raised") + except flags.IllegalFlagValue: + pass + + ################################################ + # Code to test the flagfile=<> loading behavior + ################################################ + def _SetupTestFiles(self): + """ Creates and sets up some dummy flagfile files with bogus flags""" + + # Figure out where to create temporary files + tmp_path = '/tmp/flags_unittest' + if os.path.exists(tmp_path): + shutil.rmtree(tmp_path) + os.makedirs(tmp_path) + + try: + tmp_flag_file_1 = open((tmp_path + '/UnitTestFile1.tst'), 'w') + tmp_flag_file_2 = open((tmp_path + '/UnitTestFile2.tst'), 'w') + tmp_flag_file_3 = open((tmp_path + '/UnitTestFile3.tst'), 'w') + except IOError, e_msg: + print e_msg + print 'FAIL\n File Creation problem in Unit Test' + sys.exit(1) + + # put some dummy flags in our test files + tmp_flag_file_1.write('#A Fake Comment\n') + tmp_flag_file_1.write('--UnitTestMessage1=tempFile1!\n') + tmp_flag_file_1.write('\n') + tmp_flag_file_1.write('--UnitTestNumber=54321\n') + tmp_flag_file_1.write('--noUnitTestBoolFlag\n') + file_list = [tmp_flag_file_1.name] + # this one includes test file 1 + tmp_flag_file_2.write('//A Different Fake Comment\n') + tmp_flag_file_2.write('--flagfile=%s\n' % tmp_flag_file_1.name) + tmp_flag_file_2.write('--UnitTestMessage2=setFromTempFile2\n') + tmp_flag_file_2.write('\t\t\n') + tmp_flag_file_2.write('--UnitTestNumber=6789a\n') + file_list.append(tmp_flag_file_2.name) + # this file points to itself + tmp_flag_file_3.write('--flagfile=%s\n' % tmp_flag_file_3.name) + tmp_flag_file_3.write('--UnitTestMessage1=setFromTempFile3\n') + tmp_flag_file_3.write('#YAFC\n') + tmp_flag_file_3.write('--UnitTestBoolFlag\n') + file_list.append(tmp_flag_file_3.name) + + tmp_flag_file_1.close() + tmp_flag_file_2.close() + tmp_flag_file_3.close() + + return file_list # these are just the file names + # end SetupFiles def + + def _RemoveTestFiles(self, tmp_file_list): + """Closes the files we just created. tempfile deletes them for us """ + for file_name in tmp_file_list: + try: + os.remove(file_name) + except OSError, e_msg: + print '%s\n, Problem deleting test file' % e_msg + #end RemoveTestFiles def + + def __DeclareSomeFlags(self): + flags.DEFINE_string('UnitTestMessage1', 'Foo!', 'You Add Here.') + flags.DEFINE_string('UnitTestMessage2', 'Bar!', 'Hello, Sailor!') + flags.DEFINE_boolean('UnitTestBoolFlag', 0, 'Some Boolean thing') + flags.DEFINE_integer('UnitTestNumber', 12345, 'Some integer', + lower_bound=0) + flags.DEFINE_list('UnitTestList', "1,2,3", 'Some list') + + def _UndeclareSomeFlags(self): + FLAGS.__delattr__('UnitTestMessage1') + FLAGS.__delattr__('UnitTestMessage2') + FLAGS.__delattr__('UnitTestBoolFlag') + FLAGS.__delattr__('UnitTestNumber') + FLAGS.__delattr__('UnitTestList') + + def _ReadFlagsFromFiles(self, argv, force_gnu): + return argv[:1] + FLAGS.ReadFlagsFromFiles(argv[1:], force_gnu=force_gnu) + + #### Flagfile Unit Tests #### + def testMethod_flagfiles_1(self): + """ Test trivial case with no flagfile based options. """ + self.__DeclareSomeFlags() + try: + fake_cmd_line = 'fooScript --UnitTestBoolFlag' + fake_argv = fake_cmd_line.split(' ') + FLAGS(fake_argv) + self.assertEqual( FLAGS.UnitTestBoolFlag, 1) + self.assertEqual( fake_argv, self._ReadFlagsFromFiles(fake_argv, False)) + finally: + self._UndeclareSomeFlags() + # end testMethodOne + + def testMethod_flagfiles_2(self): + """Tests parsing one file + arguments off simulated argv""" + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = 'fooScript --q --flagfile=%s' % tmp_files[0] + fake_argv = fake_cmd_line.split(' ') + + # We should see the original cmd line with the file's contents spliced in. + # Note that these will be in REVERSE order from order encountered in file + # This is done so arguements we encounter sooner will have priority. + expected_results = ['fooScript', + '--UnitTestMessage1=tempFile1!', + '--UnitTestNumber=54321', + '--noUnitTestBoolFlag', + '--q'] + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + # end testTwo def + + def testMethod_flagfiles_3(self): + """Tests parsing nested files + arguments of simulated argv""" + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = ('fooScript --UnitTestNumber=77 --flagfile=%s' + % tmp_files[1]) + fake_argv = fake_cmd_line.split(' ') + + expected_results = ['fooScript', + '--UnitTestMessage1=tempFile1!', + '--UnitTestNumber=54321', + '--noUnitTestBoolFlag', + '--UnitTestMessage2=setFromTempFile2', + '--UnitTestNumber=6789a', + '--UnitTestNumber=77'] + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + # end testThree def + + def testMethod_flagfiles_4(self): + """Tests parsing self-referential files + arguments of simulated argv. + This test should print a warning to stderr of some sort. + """ + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = ('fooScript --flagfile=%s --noUnitTestBoolFlag' + % tmp_files[2]) + fake_argv = fake_cmd_line.split(' ') + expected_results = ['fooScript', + '--UnitTestMessage1=setFromTempFile3', + '--UnitTestBoolFlag', + '--noUnitTestBoolFlag' ] + + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + + def testMethod_flagfiles_5(self): + """Test that --flagfile parsing respects the '--' end-of-options marker.""" + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = 'fooScript --SomeFlag -- --flagfile=%s' % tmp_files[0] + fake_argv = fake_cmd_line.split(' ') + expected_results = ['fooScript', + '--SomeFlag', + '--', + '--flagfile=%s' % tmp_files[0]] + + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + + def testMethod_flagfiles_6(self): + """Test that --flagfile parsing stops at non-options (non-GNU behavior).""" + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s' + % tmp_files[0]) + fake_argv = fake_cmd_line.split(' ') + expected_results = ['fooScript', + '--SomeFlag', + 'some_arg', + '--flagfile=%s' % tmp_files[0]] + + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + + def testMethod_flagfiles_7(self): + """Test that --flagfile parsing skips over a non-option (GNU behavior).""" + self.__DeclareSomeFlags() + try: + FLAGS.UseGnuGetOpt() + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s' + % tmp_files[0]) + fake_argv = fake_cmd_line.split(' ') + expected_results = ['fooScript', + '--UnitTestMessage1=tempFile1!', + '--UnitTestNumber=54321', + '--noUnitTestBoolFlag', + '--SomeFlag', + 'some_arg'] + + test_results = self._ReadFlagsFromFiles(fake_argv, False) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + + def testMethod_flagfiles_8(self): + """Test that --flagfile parsing respects force_gnu=True.""" + self.__DeclareSomeFlags() + try: + tmp_files = self._SetupTestFiles() + # specify our temp file on the fake cmd line + fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s' + % tmp_files[0]) + fake_argv = fake_cmd_line.split(' ') + expected_results = ['fooScript', + '--UnitTestMessage1=tempFile1!', + '--UnitTestNumber=54321', + '--noUnitTestBoolFlag', + '--SomeFlag', + 'some_arg'] + + test_results = self._ReadFlagsFromFiles(fake_argv, True) + self.assertEqual(expected_results, test_results) + finally: + self._RemoveTestFiles(tmp_files) + self._UndeclareSomeFlags() + + def test_flagfiles_user_path_expansion(self): + """Test that user directory referenced paths (ie. ~/foo) are correctly + expanded. This test depends on whatever account's running the unit test + to have read/write access to their own home directory, otherwise it'll + FAIL. + """ + self.__DeclareSomeFlags() + fake_flagfile_item_style_1 = '--flagfile=~/foo.file' + fake_flagfile_item_style_2 = '-flagfile=~/foo.file' + + expected_results = os.path.expanduser('~/foo.file') + + test_results = FLAGS.ExtractFilename(fake_flagfile_item_style_1) + self.assertEqual(expected_results, test_results) + + test_results = FLAGS.ExtractFilename(fake_flagfile_item_style_2) + self.assertEqual(expected_results, test_results) + + self._UndeclareSomeFlags() + + # end testFour def + + def test_no_touchy_non_flags(self): + """ + Test that the flags parser does not mutilate arguments which are + not supposed to be flags + """ + self.__DeclareSomeFlags() + fake_argv = ['fooScript', '--UnitTestBoolFlag', + 'command', '--command_arg1', '--UnitTestBoom', '--UnitTestB'] + argv = FLAGS(fake_argv) + self.assertEqual(argv, fake_argv[:1] + fake_argv[2:]) + self._UndeclareSomeFlags() + + def test_parse_flags_after_args_if_using_gnu_getopt(self): + """ + Test that flags given after arguments are parsed if using gnu_getopt. + """ + self.__DeclareSomeFlags() + FLAGS.UseGnuGetOpt() + fake_argv = ['fooScript', '--UnitTestBoolFlag', + 'command', '--UnitTestB'] + argv = FLAGS(fake_argv) + self.assertEqual(argv, ['fooScript', 'command']) + self._UndeclareSomeFlags() + + def test_SetDefault(self): + """ + Test changing flag defaults. + """ + self.__DeclareSomeFlags() + # Test that SetDefault changes both the default and the value, + # and that the value is changed when one is given as an option. + FLAGS['UnitTestMessage1'].SetDefault('New value') + self.assertEqual(FLAGS.UnitTestMessage1, 'New value') + self.assertEqual(FLAGS['UnitTestMessage1'].default_as_str,"'New value'") + FLAGS([ 'dummyscript', '--UnitTestMessage1=Newer value' ]) + self.assertEqual(FLAGS.UnitTestMessage1, 'Newer value') + + # Test that setting the default to None works correctly. + FLAGS['UnitTestNumber'].SetDefault(None) + self.assertEqual(FLAGS.UnitTestNumber, None) + self.assertEqual(FLAGS['UnitTestNumber'].default_as_str, None) + FLAGS([ 'dummyscript', '--UnitTestNumber=56' ]) + self.assertEqual(FLAGS.UnitTestNumber, 56) + + # Test that setting the default to zero works correctly. + FLAGS['UnitTestNumber'].SetDefault(0) + self.assertEqual(FLAGS.UnitTestNumber, 0) + self.assertEqual(FLAGS['UnitTestNumber'].default_as_str, "'0'") + FLAGS([ 'dummyscript', '--UnitTestNumber=56' ]) + self.assertEqual(FLAGS.UnitTestNumber, 56) + + # Test that setting the default to "" works correctly. + FLAGS['UnitTestMessage1'].SetDefault("") + self.assertEqual(FLAGS.UnitTestMessage1, "") + self.assertEqual(FLAGS['UnitTestMessage1'].default_as_str, "''") + FLAGS([ 'dummyscript', '--UnitTestMessage1=fifty-six' ]) + self.assertEqual(FLAGS.UnitTestMessage1, "fifty-six") + + # Test that setting the default to false works correctly. + FLAGS['UnitTestBoolFlag'].SetDefault(False) + self.assertEqual(FLAGS.UnitTestBoolFlag, False) + self.assertEqual(FLAGS['UnitTestBoolFlag'].default_as_str, "'false'") + FLAGS([ 'dummyscript', '--UnitTestBoolFlag=true' ]) + self.assertEqual(FLAGS.UnitTestBoolFlag, True) + + # Test that setting a list default works correctly. + FLAGS['UnitTestList'].SetDefault('4,5,6') + self.assertEqual(FLAGS.UnitTestList, ['4', '5', '6']) + self.assertEqual(FLAGS['UnitTestList'].default_as_str, "'4,5,6'") + FLAGS([ 'dummyscript', '--UnitTestList=7,8,9' ]) + self.assertEqual(FLAGS.UnitTestList, ['7', '8', '9']) + + # Test that setting invalid defaults raises exceptions + self.assertRaises(flags.IllegalFlagValue, + FLAGS['UnitTestNumber'].SetDefault, 'oops') + self.assertRaises(flags.IllegalFlagValue, + FLAGS['UnitTestNumber'].SetDefault, -1) + self.assertRaises(flags.IllegalFlagValue, + FLAGS['UnitTestBoolFlag'].SetDefault, 'oops') + + self._UndeclareSomeFlags() + + def testMethod_ShortestUniquePrefixes(self): + """ + Test FlagValues.ShortestUniquePrefixes + """ + flags.DEFINE_string('a', '', '') + flags.DEFINE_string('abc', '', '') + flags.DEFINE_string('common_a_string', '', '') + flags.DEFINE_boolean('common_b_boolean', 0, '') + flags.DEFINE_boolean('common_c_boolean', 0, '') + flags.DEFINE_boolean('common', 0, '') + flags.DEFINE_integer('commonly', 0, '') + flags.DEFINE_boolean('zz', 0, '') + flags.DEFINE_integer('nozz', 0, '') + + shorter_flags = FLAGS.ShortestUniquePrefixes(FLAGS.FlagDict()) + + expected_results = {'nocommon_b_boolean': 'nocommon_b', + 'common_c_boolean': 'common_c', + 'common_b_boolean': 'common_b', + 'a': 'a', + 'abc': 'ab', + 'zz': 'z', + 'nozz': 'nozz', + 'common_a_string': 'common_a', + 'commonly': 'commonl', + 'nocommon_c_boolean': 'nocommon_c', + 'nocommon': 'nocommon', + 'common': 'common'} + + for name, shorter in expected_results.iteritems(): + self.assertEquals(shorter_flags[name], shorter) + + FLAGS.__delattr__('a') + FLAGS.__delattr__('abc') + FLAGS.__delattr__('common_a_string') + FLAGS.__delattr__('common_b_boolean') + FLAGS.__delattr__('common_c_boolean') + FLAGS.__delattr__('common') + FLAGS.__delattr__('commonly') + FLAGS.__delattr__('zz') + FLAGS.__delattr__('nozz') + + def test_twodasharg_first(self): + flags.DEFINE_string("twodash_name", "Bob", "namehelp") + flags.DEFINE_string("twodash_blame", "Rob", "blamehelp") + argv = ('./program', + '--', + '--twodash_name=Harry') + argv = FLAGS(argv) + self.assertEqual('Bob', FLAGS.twodash_name) + self.assertEqual(argv[1], '--twodash_name=Harry') + + def test_twodasharg_middle(self): + flags.DEFINE_string("twodash2_name", "Bob", "namehelp") + flags.DEFINE_string("twodash2_blame", "Rob", "blamehelp") + argv = ('./program', + '--twodash2_blame=Larry', + '--', + '--twodash2_name=Harry') + argv = FLAGS(argv) + self.assertEqual('Bob', FLAGS.twodash2_name) + self.assertEqual('Larry', FLAGS.twodash2_blame) + self.assertEqual(argv[1], '--twodash2_name=Harry') + + def test_onedasharg_first(self): + flags.DEFINE_string("onedash_name", "Bob", "namehelp") + flags.DEFINE_string("onedash_blame", "Rob", "blamehelp") + argv = ('./program', + '-', + '--onedash_name=Harry') + argv = FLAGS(argv) + self.assertEqual(argv[1], '-') + # TODO(csilvers): we should still parse --onedash_name=Harry as a + # flag, but currently we don't (we stop flag processing as soon as + # we see the first non-flag). + # - This requires gnu_getopt from Python 2.3+ see FLAGS.UseGnuGetOpt() + + def test_unrecognized_flags(self): + # Unknown flag --nosuchflag + try: + argv = ('./program', '--nosuchflag', '--name=Bob', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'nosuchflag' + + # Unknown flag -w (short option) + try: + argv = ('./program', '-w', '--name=Bob', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'w' + + # Unknown flag --nosuchflagwithparam=foo + try: + argv = ('./program', '--nosuchflagwithparam=foo', '--name=Bob', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'nosuchflagwithparam' + + # Allow unknown flag --nosuchflag if specified with undefok + argv = ('./program', '--nosuchflag', '--name=Bob', + '--undefok=nosuchflag', 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + # Allow unknown flag --noboolflag if undefok=boolflag is specified + argv = ('./program', '--noboolflag', '--name=Bob', + '--undefok=boolflag', 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + # But not if the flagname is misspelled: + try: + argv = ('./program', '--nosuchflag', '--name=Bob', + '--undefok=nosuchfla', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'nosuchflag' + + try: + argv = ('./program', '--nosuchflag', '--name=Bob', + '--undefok=nosuchflagg', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag: + assert e.flagname == 'nosuchflag' + + # Allow unknown short flag -w if specified with undefok + argv = ('./program', '-w', '--name=Bob', '--undefok=w', 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + # Allow unknown flag --nosuchflagwithparam=foo if specified + # with undefok + argv = ('./program', '--nosuchflagwithparam=foo', '--name=Bob', + '--undefok=nosuchflagwithparam', 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + # Even if undefok specifies multiple flags + argv = ('./program', '--nosuchflag', '-w', '--nosuchflagwithparam=foo', + '--name=Bob', + '--undefok=nosuchflag,w,nosuchflagwithparam', + 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + # However, not if undefok doesn't specify the flag + try: + argv = ('./program', '--nosuchflag', '--name=Bob', + '--undefok=another_such', 'extra') + FLAGS(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'nosuchflag' + + # Make sure --undefok doesn't mask other option errors. + try: + # Provide an option requiring a parameter but not giving it one. + argv = ('./program', '--undefok=name', '--name') + FLAGS(argv) + raise AssertionError("Missing option parameter exception not raised") + except flags.UnrecognizedFlag: + raise AssertionError("Wrong kind of error exception raised") + except flags.FlagsError: + pass + + # Test --undefok + argv = ('./program', '--nosuchflag', '-w', '--nosuchflagwithparam=foo', + '--name=Bob', + '--undefok', + 'nosuchflag,w,nosuchflagwithparam', + 'extra') + argv = FLAGS(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + + def test_nonglobal_flags(self): + """Test use of non-global FlagValues""" + nonglobal_flags = flags.FlagValues() + flags.DEFINE_string("nonglobal_flag", "Bob", "flaghelp", nonglobal_flags) + argv = ('./program', + '--nonglobal_flag=Mary', + 'extra') + argv = nonglobal_flags(argv) + assert len(argv) == 2, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + assert argv[1]=='extra', "extra argument not preserved" + assert nonglobal_flags['nonglobal_flag'].value == 'Mary' + + def test_unrecognized_nonglobal_flags(self): + """Test unrecognized non-global flags""" + nonglobal_flags = flags.FlagValues() + argv = ('./program', + '--nosuchflag') + try: + argv = nonglobal_flags(argv) + raise AssertionError("Unknown flag exception not raised") + except flags.UnrecognizedFlag, e: + assert e.flagname == 'nosuchflag' + pass + + argv = ('./program', + '--nosuchflag', + '--undefok=nosuchflag') + + argv = nonglobal_flags(argv) + assert len(argv) == 1, "wrong number of arguments pulled" + assert argv[0]=='./program', "program name not preserved" + + def test_module_help(self): + """Test ModuleHelp().""" + helpstr = FLAGS.ModuleHelp(module_baz) + + expected_help = "\n" + module_baz.__name__ + ":" + """ + --[no]tmod_baz_x: Boolean flag. + (default: 'true')""" + + self.assertMultiLineEqual(expected_help, helpstr) + + def test_main_module_help(self): + """Test MainModuleHelp().""" + helpstr = FLAGS.MainModuleHelp() + + # When this test is invoked on behalf of flags_unittest_2_2, + # the main module has not defined any flags. Since there's + # no easy way to run this script in our test environment + # directly from python2.2, don't bother to test the output + # of MainModuleHelp() in that scenario. + if sys.version.startswith('2.2.'): + return + + expected_help = "\n" + sys.argv[0] + ':' + """ + --[no]debug: debughelp + (default: 'false') + -u,--[no]dup1: runhelp d12 + (default: 'true') + -u,--[no]dup2: runhelp d22 + (default: 'true') + -u,--[no]dup3: runhelp d32 + (default: 'true') + --[no]dup4: runhelp d41 + (default: 'false') + -?,--[no]help: show this help + --[no]helpshort: show usage only for this module + --[no]helpxml: like --help, but generates XML output + --kwery: : ? + --l: how long to be + (default: '9223372032559808512') + (an integer) + --letters: a list of letters + (default: 'a,b,c') + (a comma separated list) + -m,--m_str: string option that can occur multiple times; + repeat this option to specify a list of values + (default: "['def1', 'def2']") + --name: namehelp + (default: 'Bob') + --[no]noexec: boolean flag with no as prefix + (default: 'true') + --numbers: a list of numbers + (default: '1,2,3') + (a comma separated list) + --[no]q: quiet mode + (default: 'true') + --[no]quack: superstring of 'q' + (default: 'false') + -r,--repeat: how many times to repeat (0-5) + (default: '4') + (a non-negative integer) + -s,--s_str: string option that can occur multiple times; + repeat this option to specify a list of values + (default: "['sing1']") + --[no]test0: test boolean parsing + --[no]test1: test boolean parsing + --[no]testget1: test parsing with defaults + --[no]testget2: test parsing with defaults + --[no]testget3: test parsing with defaults + --testget4: test parsing with defaults + (an integer) + --testlist: test lists parsing + (default: '') + (a comma separated list) + --[no]testnone: test boolean parsing + --testspacelist: tests space lists parsing + (default: '') + (a whitespace separated list) + --x: how eXtreme to be + (default: '3') + (an integer) + -z,--[no]zoom1: runhelp z1 + (default: 'false')""" + + if not MultiLineEqual(expected_help, helpstr): + self.fail() + + def test_create_flag_errors(self): + # Since the exception classes are exposed, nothing stops users + # from creating their own instances. This test makes sure that + # people modifying the flags module understand that the external + # mechanisms for creating the exceptions should continue to work. + e = flags.FlagsError() + e = flags.FlagsError("message") + e = flags.DuplicateFlag() + e = flags.DuplicateFlag("message") + e = flags.IllegalFlagValue() + e = flags.IllegalFlagValue("message") + e = flags.UnrecognizedFlag() + e = flags.UnrecognizedFlag("message") + + def testFlagValuesDelAttr(self): + """Checks that del FLAGS.flag_id works.""" + default_value = 'default value for testFlagValuesDelAttr' + # 1. Declare and delete a flag with no short name. + flags.DEFINE_string('delattr_foo', default_value, 'A simple flag.') + self.assertEquals(FLAGS.delattr_foo, default_value) + flag_obj = FLAGS['delattr_foo'] + # We also check that _FlagIsRegistered works as expected :) + self.assertTrue(FLAGS._FlagIsRegistered(flag_obj)) + del FLAGS.delattr_foo + self.assertFalse('delattr_foo' in FLAGS.FlagDict()) + self.assertFalse(FLAGS._FlagIsRegistered(flag_obj)) + # If the previous del FLAGS.delattr_foo did not work properly, the + # next definition will trigger a redefinition error. + flags.DEFINE_integer('delattr_foo', 3, 'A simple flag.') + del FLAGS.delattr_foo + + self.assertFalse('delattr_foo' in FLAGS.RegisteredFlags()) + + # 2. Declare and delete a flag with a short name. + flags.DEFINE_string('delattr_bar', default_value, 'flag with short name', + short_name='x5') + flag_obj = FLAGS['delattr_bar'] + self.assertTrue(FLAGS._FlagIsRegistered(flag_obj)) + del FLAGS.x5 + self.assertTrue(FLAGS._FlagIsRegistered(flag_obj)) + del FLAGS.delattr_bar + self.assertFalse(FLAGS._FlagIsRegistered(flag_obj)) + + # 3. Just like 2, but del FLAGS.name last + flags.DEFINE_string('delattr_bar', default_value, 'flag with short name', + short_name='x5') + flag_obj = FLAGS['delattr_bar'] + self.assertTrue(FLAGS._FlagIsRegistered(flag_obj)) + del FLAGS.delattr_bar + self.assertTrue(FLAGS._FlagIsRegistered(flag_obj)) + del FLAGS.x5 + self.assertFalse(FLAGS._FlagIsRegistered(flag_obj)) + + self.assertFalse('delattr_bar' in FLAGS.RegisteredFlags()) + self.assertFalse('x5' in FLAGS.RegisteredFlags()) + + def _GetNamesOfDefinedFlags(self, module, flag_values=FLAGS): + """Returns the list of names of flags defined by a module. + + Auxiliary for the testKeyFlags* methods. + + Args: + module: A module object or a string module name. + flag_values: A FlagValues object. + + Returns: + A list of strings. + """ + return [f.name for f in flag_values._GetFlagsDefinedByModule(module)] + + def _GetNamesOfKeyFlags(self, module, flag_values=FLAGS): + """Returns the list of names of key flags for a module. + + Auxiliary for the testKeyFlags* methods. + + Args: + module: A module object or a string module name. + flag_values: A FlagValues object. + + Returns: + A list of strings. + """ + return [f.name for f in flag_values._GetKeyFlagsForModule(module)] + + def testKeyFlags(self): + # Before starting any testing, make sure no flags are already + # defined for module_foo and module_bar. + self.assertListEqual(self._GetNamesOfKeyFlags(module_foo), []) + self.assertListEqual(self._GetNamesOfKeyFlags(module_bar), []) + self.assertListEqual(self._GetNamesOfDefinedFlags(module_foo), []) + self.assertListEqual(self._GetNamesOfDefinedFlags(module_bar), []) + + try: + # Defines a few flags in module_foo and module_bar. + module_foo.DefineFlags() + + # Part 1. Check that all flags defined by module_foo are key for + # that module, and similarly for module_bar. + for module in [module_foo, module_bar]: + self.assertListEqual(FLAGS._GetFlagsDefinedByModule(module), + FLAGS._GetKeyFlagsForModule(module)) + # Also check that each module defined the expected flags. + self.assertListEqual(self._GetNamesOfDefinedFlags(module), + module.NamesOfDefinedFlags()) + + # Part 2. Check that flags.DECLARE_key_flag works fine. + # Declare that some flags from module_bar are key for + # module_foo. + module_foo.DeclareKeyFlags() + + # Check that module_foo has the expected list of defined flags. + self.assertListEqual(self._GetNamesOfDefinedFlags(module_foo), + module_foo.NamesOfDefinedFlags()) + + # Check that module_foo has the expected list of key flags. + self.assertListEqual(self._GetNamesOfKeyFlags(module_foo), + module_foo.NamesOfDeclaredKeyFlags()) + + # Part 3. Check that flags.ADOPT_module_key_flags works fine. + # Trigger a call to flags.ADOPT_module_key_flags(module_bar) + # inside module_foo. This should declare a few more key + # flags in module_foo. + module_foo.DeclareExtraKeyFlags() + + # Check that module_foo has the expected list of key flags. + self.assertListEqual(self._GetNamesOfKeyFlags(module_foo), + module_foo.NamesOfDeclaredKeyFlags() + + module_foo.NamesOfDeclaredExtraKeyFlags()) + finally: + module_foo.RemoveFlags() + + def testKeyFlagsWithNonDefaultFlagValuesObject(self): + # Check that key flags work even when we use a FlagValues object + # that is not the default flags.FLAGS object. Otherwise, this + # test is similar to testKeyFlags, but it uses only module_bar. + # The other test module (module_foo) uses only the default values + # for the flag_values keyword arguments. This way, testKeyFlags + # and this method test both the default FlagValues, the explicitly + # specified one, and a mixed usage of the two. + + # A brand-new FlagValues object, to use instead of flags.FLAGS. + fv = flags.FlagValues() + + # Before starting any testing, make sure no flags are already + # defined for module_foo and module_bar. + self.assertListEqual( + self._GetNamesOfKeyFlags(module_bar, flag_values=fv), + []) + self.assertListEqual( + self._GetNamesOfDefinedFlags(module_bar, flag_values=fv), + []) + + module_bar.DefineFlags(flag_values=fv) + + # Check that all flags defined by module_bar are key for that + # module, and that module_bar defined the expected flags. + self.assertListEqual(fv._GetFlagsDefinedByModule(module_bar), + fv._GetKeyFlagsForModule(module_bar)) + self.assertListEqual( + self._GetNamesOfDefinedFlags(module_bar, flag_values=fv), + module_bar.NamesOfDefinedFlags()) + + # Pick two flags from module_bar, declare them as key for the + # current (i.e., main) module (via flags.DECLARE_key_flag), and + # check that we get the expected effect. The important thing is + # that we always use flags_values=fv (instead of the default + # FLAGS). + main_module = flags._GetMainModule() + names_of_flags_defined_by_bar = module_bar.NamesOfDefinedFlags() + flag_name_0 = names_of_flags_defined_by_bar[0] + flag_name_2 = names_of_flags_defined_by_bar[2] + + flags.DECLARE_key_flag(flag_name_0, flag_values=fv) + self.assertListEqual( + self._GetNamesOfKeyFlags(main_module, flag_values=fv), + [flag_name_0]) + + flags.DECLARE_key_flag(flag_name_2, flag_values=fv) + self.assertListEqual( + self._GetNamesOfKeyFlags(main_module, flag_values=fv), + [flag_name_0, flag_name_2]) + + flags.ADOPT_module_key_flags(module_bar, flag_values=fv) + key_flags = self._GetNamesOfKeyFlags(main_module, flag_values=fv) + # Order is irrelevant; hence, we sort both lists before comparison. + key_flags.sort() + names_of_flags_defined_by_bar.sort() + self.assertListEqual(key_flags, names_of_flags_defined_by_bar) + + def testMainModuleHelpWithKeyFlags(self): + # Similar to test_main_module_help, but this time we make sure to + # declare some key flags. + try: + help_flag_help = ( + " -?,--[no]help: show this help\n" + " --[no]helpshort: show usage only for this module\n" + " --[no]helpxml: like --help, but generates XML output" + ) + + expected_help = "\n%s:\n%s" % (sys.argv[0], help_flag_help) + + # Safety check that the main module does not declare any flags + # at the beginning of this test. + self.assertMultiLineEqual(expected_help, FLAGS.MainModuleHelp()) + + # Define one flag in this main module and some flags in modules + # a and b. Also declare one flag from module a and one flag + # from module b as key flags for the main module. + flags.DEFINE_integer('main_module_int_fg', 1, + 'Integer flag in the main module.') + + main_module_int_fg_help = ( + " --main_module_int_fg: Integer flag in the main module.\n" + " (default: '1')\n" + " (an integer)") + + expected_help += "\n" + main_module_int_fg_help + self.assertMultiLineEqual(expected_help, FLAGS.MainModuleHelp()) + + # The following call should be a no-op: any flag declared by a + # module is automatically key for that module. + flags.DECLARE_key_flag('main_module_int_fg') + self.assertMultiLineEqual(expected_help, FLAGS.MainModuleHelp()) + + # The definition of a few flags in an imported module should not + # change the main module help. + module_foo.DefineFlags() + self.assertMultiLineEqual(expected_help, FLAGS.MainModuleHelp()) + + flags.DECLARE_key_flag('tmod_foo_bool') + tmod_foo_bool_help = ( + " --[no]tmod_foo_bool: Boolean flag from module foo.\n" + " (default: 'true')") + expected_help += "\n" + tmod_foo_bool_help + self.assertMultiLineEqual(expected_help, FLAGS.MainModuleHelp()) + + flags.DECLARE_key_flag('tmod_bar_z') + tmod_bar_z_help = ( + " --[no]tmod_bar_z: Another boolean flag from module bar.\n" + " (default: 'false')") + # Unfortunately, there is some flag sorting inside + # MainModuleHelp, so we can't keep incrementally extending + # the expected_help string ... + expected_help = ("\n%s:\n%s\n%s\n%s\n%s" % + (sys.argv[0], + help_flag_help, + main_module_int_fg_help, + tmod_bar_z_help, + tmod_foo_bool_help)) + self.assertMultiLineEqual(FLAGS.MainModuleHelp(), expected_help) + + finally: + # At the end, delete all the flag information we created. + FLAGS.__delattr__('main_module_int_fg') + module_foo.RemoveFlags() + + def test_ADOPT_module_key_flags(self): + # Check that ADOPT_module_key_flags raises an exception when + # called with a module name (as opposed to a module object). + self.assertRaises(flags.FlagsError, + flags.ADOPT_module_key_flags, + 'google3.pyglib.app') + + def test_GetCallingModule(self): + self.assertEqual(flags._GetCallingModule(), sys.argv[0]) + self.assertEqual( + module_foo.GetModuleName(), + 'test_module_foo') + self.assertEqual( + module_bar.GetModuleName(), + 'test_module_bar') + + # We execute the following exec statements for their side-effect + # (i.e., not raising an error). They emphasize the case that not + # all code resides in one of the imported modules: Python is a + # really dynamic language, where we can dynamically construct some + # code and execute it. + code = ("import gflags\n" + "module_name = gflags._GetCallingModule()") + exec code + + # Next two exec statements executes code with a global environment + # that is different from the global environment of any imported + # module. + exec code in {} + # vars(self) returns a dictionary corresponding to the symbol + # table of the self object. dict(...) makes a distinct copy of + # this dictionary, such that any new symbol definition by the + # exec-ed code (e.g., import flags, module_name = ...) does not + # affect the symbol table of self. + exec code in dict(vars(self)) + + # Next test is actually more involved: it checks not only that + # _GetCallingModule does not crash inside exec code, it also checks + # that it returns the expected value: the code executed via exec + # code is treated as being executed by the current module. We + # check it twice: first time by executing exec from the main + # module, second time by executing it from module_bar. + global_dict = {} + exec code in global_dict + self.assertEqual(global_dict['module_name'], + sys.argv[0]) + + global_dict = {} + module_bar.ExecuteCode(code, global_dict) + self.assertEqual( + global_dict['module_name'], + 'test_module_bar') + + +def main(): + unittest.main() + + +if __name__ == '__main__': + main() diff --git a/vendor/python-gflags/setup.py b/vendor/python-gflags/setup.py new file mode 100755 index 000000000000..26820a627749 --- /dev/null +++ b/vendor/python-gflags/setup.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright (c) 2007, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from setuptools import setup + +setup(name='python-gflags', + version='1.3', + description='Google Commandline Flags Module', + license='BSD', + author='Google Inc.', + author_email='opensource@google.com', + url='http://code.google.com/p/python-gflags', + py_modules=["gflags"], + data_files=[("bin", ["gflags2man.py"])], + include_package_data=True, + ) diff --git a/vendor/python-gflags/test_module_bar.py b/vendor/python-gflags/test_module_bar.py new file mode 100755 index 000000000000..55541ff76215 --- /dev/null +++ b/vendor/python-gflags/test_module_bar.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Auxiliary module for testing flags.py. + +The purpose of this module is to define a few flags. We want to make +sure the unit tests for flags.py involve more than one module. +""" + +__author__ = 'Alex Salcianu' + +__pychecker__ = "no-local" # for unittest + +# We use the name 'flags' internally in this test, for historical reasons. +# Don't do this yourself! :-) Just do 'import gflags; FLAGS=gflags.FLAGS; etc' +import gflags as flags +FLAGS = flags.FLAGS + + +def DefineFlags(flag_values=FLAGS): + """Defines some flags. + + Args: + flag_values: The FlagValues object we want to register the flags + with. + """ + # The 'tmod_bar_' prefix (short for 'test_module_bar') ensures there + # is no name clash with the existing flags. + flags.DEFINE_boolean('tmod_bar_x', True, 'Boolean flag.', + flag_values=flag_values) + flags.DEFINE_string('tmod_bar_y', 'default', 'String flag.', + flag_values=flag_values) + flags.DEFINE_boolean('tmod_bar_z', False, + 'Another boolean flag from module bar.', + flag_values=flag_values) + flags.DEFINE_integer('tmod_bar_t', 4, 'Sample int flag.', + flag_values=flag_values) + flags.DEFINE_integer('tmod_bar_u', 5, 'Sample int flag.', + flag_values=flag_values) + flags.DEFINE_integer('tmod_bar_v', 6, 'Sample int flag.', + flag_values=flag_values) + + +def RemoveOneFlag(flag_name, flag_values=FLAGS): + """Removes the definition of one flag from flags.FLAGS. + + Note: if the flag is not defined in flags.FLAGS, this function does + not do anything (in particular, it does not raise any exception). + + Motivation: We use this function for cleanup *after* a test: if + there was a failure during a test and not all flags were declared, + we do not want the cleanup code to crash. + + Args: + flag_name: A string, the name of the flag to delete. + flag_values: The FlagValues object we remove the flag from. + """ + if flag_name in flag_values.FlagDict(): + flag_values.__delattr__(flag_name) + + +def NamesOfDefinedFlags(): + """Returns: List of names of the flags declared in this module.""" + return ['tmod_bar_x', + 'tmod_bar_y', + 'tmod_bar_z', + 'tmod_bar_t', + 'tmod_bar_u', + 'tmod_bar_v'] + + +def RemoveFlags(flag_values=FLAGS): + """Deletes the flag definitions done by the above DefineFlags(). + + Args: + flag_values: The FlagValues object we remove the flags from. + """ + for flag_name in NamesOfDefinedFlags(): + RemoveOneFlag(flag_name, flag_values=flag_values) + + +def GetModuleName(): + """Uses flags._GetCallingModule() to return the name of this module. + + For checking that _GetCallingModule works as expected. + + Returns: + A string, the name of this module. + """ + # Calling the protected _GetCallingModule generates a lint warning, + # but we do not have any other alternative to test that function. + return flags._GetCallingModule() + + +def ExecuteCode(code, global_dict): + """Executes some code in a given global environment. + + For testing of _GetCallingModule. + + Args: + code: A string, the code to be executed. + global_dict: A dictionary, the global environment that code should + be executed in. + """ + # Indeed, using exec generates a lint warning. But some user code + # actually uses exec, and we have to test for it ... + exec code in global_dict diff --git a/vendor/python-gflags/test_module_foo.py b/vendor/python-gflags/test_module_foo.py new file mode 100755 index 000000000000..9f2ab49e1588 --- /dev/null +++ b/vendor/python-gflags/test_module_foo.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Auxiliary module for testing flags.py. + +The purpose of this module is to define a few flags, and declare some +other flags as being important. We want to make sure the unit tests +for flags.py involve more than one module. +""" + +__author__ = 'Alex Salcianu' + +__pychecker__ = "no-local" # for unittest + +# We use the name 'flags' internally in this test, for historical reasons. +# Don't do this yourself! :-) Just do 'import gflags; FLAGS=gflags.FLAGS; etc' +import gflags as flags +FLAGS = flags.FLAGS + +# For historical reasons we use the name module_bar instead of test_module_bar. +import test_module_bar as module_bar + +DECLARED_KEY_FLAGS = ['tmod_bar_x', 'tmod_bar_z', 'tmod_bar_t'] + + +def DefineFlags(): + """Defines a few flags.""" + module_bar.DefineFlags() + # The 'tmod_foo_' prefix (short for 'test_module_foo') ensures that we + # have no name clash with existing flags. + flags.DEFINE_boolean('tmod_foo_bool', True, 'Boolean flag from module foo.') + flags.DEFINE_string('tmod_foo_str', 'default', 'String flag.') + flags.DEFINE_integer('tmod_foo_int', 3, 'Sample int flag.') + + +def DeclareKeyFlags(): + """Declares a few key flags.""" + for flag_name in DECLARED_KEY_FLAGS: + flags.DECLARE_key_flag(flag_name) + + +def DeclareExtraKeyFlags(): + """Declares some extra key flags.""" + flags.ADOPT_module_key_flags(module_bar) + + +def NamesOfDefinedFlags(): + """Returns: list of names of flags defined by this module.""" + return ['tmod_foo_bool', 'tmod_foo_str', 'tmod_foo_int'] + + +def NamesOfDeclaredKeyFlags(): + """Returns: list of names of key flags for this module.""" + return NamesOfDefinedFlags() + DECLARED_KEY_FLAGS + + +def NamesOfDeclaredExtraKeyFlags(): + """Returns the list of names of additional key flags for this module. + + These are the flags that became key for this module only as a result + of a call to DeclareExtraKeyFlags() above. I.e., the flags declared + by module_bar, that were not already declared as key for this + module. + + Returns: + The list of names of additional key flags for this module. + """ + names_of_extra_key_flags = list(module_bar.NamesOfDefinedFlags()) + for flag_name in NamesOfDeclaredKeyFlags(): + while flag_name in names_of_extra_key_flags: + names_of_extra_key_flags.remove(flag_name) + return names_of_extra_key_flags + + +def RemoveFlags(): + """Deletes the flag definitions done by the above DefineFlags().""" + for flag_name in NamesOfDefinedFlags(): + module_bar.RemoveOneFlag(flag_name) + module_bar.RemoveFlags() + + +def GetModuleName(): + """Uses flags._GetCallingModule() to return the name of this module. + + For checking that _GetCallingModule works as expected. + + Returns: + A string, the name of this module. + """ + # Calling the protected _GetCallingModule generates a lint warning, + # but we do not have any other alternative to test that function. + return flags._GetCallingModule() diff --git a/vendor/redis-py/.gitignore b/vendor/redis-py/.gitignore new file mode 100755 index 000000000000..0ed6db73c1c8 --- /dev/null +++ b/vendor/redis-py/.gitignore @@ -0,0 +1,5 @@ +*.pyc +redis.egg-info +build/ +dist/ +dump.rdb diff --git a/vendor/redis-py/CHANGES b/vendor/redis-py/CHANGES new file mode 100755 index 000000000000..0d1955b5ee52 --- /dev/null +++ b/vendor/redis-py/CHANGES @@ -0,0 +1,58 @@ +* 1.3.6 + * Implementation of all Hash commands + * Pipelines now wrap their execution with MULTI and EXEC commands to + process all commands atomically. + * Connections can now set timeout. If command execution exceeds the + timeout, an exception is raised. + * Numerous bug fixes and more tests. +* 1.3.4 + * Skipped version numbers ahead so that the client version matches the + Redis version it is feature-compatible with. Going forward, the client + will stay in sync with Redis version numbers when client updates are + made. + * Completely refactored the client library. It's now trivial to maintain + and add new commands. The library is also much more consistent. + * With the exception of "Response value type inference" (see below), the + client should be backwards compatible with 0.6.1. Some older, less + consistent methods will emit DeprecationWarnings, indicating that you + should use another command or option, but these should continue to + work as expected for the next few releases. + * WARNING: BACKWARDS INCOMPATIBLE CHANGE: "Response value type inference" + Previously, all values returned from Redis went through a decoding + process. In this process, if the response was numeric, it would be + automatically converted to an int or float type prior to being returned. + Otherwise the response would be decoded as a unicode string. This meant + that storing the string "123" would actually return an integer 123, and + that the string "foo" would be returned as the unicode object u"foo". + This fundamentally breaks the retrieval of binary data (byte strings) and + values that might accidentally look like a number (a hash value). After + discussing this in detail with a number of users and on the Redis mailing + list (http://groups.google.com/group/redis-db/browse_thread/thread/9888eb9ff383c90c/ec44fe80b6400f7b#ec44fe80b6400f7b) + *ALL* values returned from methods such as get() now return raw + Python strings. It is now your responsibility to convert that data to + whatever datatype you need. Other methods that *always* return integer + or float values, such as INCR, DECR, LLEN, ZSCORE, etc., will continue + returning values of the appropriate type. This resolves issue #2, #8 + and #11: + http://github.com/andymccurdy/redis-py/issues#issue/2 + http://github.com/andymccurdy/redis-py/issues#issue/8 + http://github.com/andymccurdy/redis-py/issues#issue/11 + * The "select" method now takes a "host" and "port" argument in addition + to the database. Behind the scenes, select() swaps out the underlying + socket connection. This resolves issue #4: + http://github.com/andymccurdy/redis-py/issues#issue/4 + * The client now supports pipelining of Redis commands. Use the pipeline() + method to create a new Pipeline object. Each command called on the + pipeline object will be buffered until the pipeline if executed. + A list of each command's results will be returned by execution. Use + this for batch processing in order to eliminate multiple request/response + cycles. + +* 0.6.1 + * Added support for ZINCRBY via the `zincr` command + * Swapped score and member parameters to zadd to make it more similar to other commands. + * Added support for Python 2.4 (thanks David Moss) +* 0.6.0 Changed to Andy McCurdy's codebase on github +* 0.5.5 Patch from David Moss, SHUTDOWN and doctest bugfix +* 0.5.1-4 Bugfixes, no code changes, just packaging, 10/2/09 +* 0.5 Initial release, redis.py version 1.0.1, 10/2/09 diff --git a/vendor/redis-py/INSTALL b/vendor/redis-py/INSTALL new file mode 100755 index 000000000000..951f7dea8ac0 --- /dev/null +++ b/vendor/redis-py/INSTALL @@ -0,0 +1,6 @@ + +Please use + python setup.py install + +and report errors to Andy McCurdy (sedrik@gmail.com) + diff --git a/vendor/redis-py/LICENSE b/vendor/redis-py/LICENSE new file mode 100755 index 000000000000..073b05cec330 --- /dev/null +++ b/vendor/redis-py/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2010 Andy McCurdy + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/redis-py/MANIFEST.in b/vendor/redis-py/MANIFEST.in new file mode 100755 index 000000000000..1b2fcd9432b5 --- /dev/null +++ b/vendor/redis-py/MANIFEST.in @@ -0,0 +1,4 @@ +include CHANGES +include INSTALL +include LICENSE +include README.md diff --git a/vendor/redis-py/README.md b/vendor/redis-py/README.md new file mode 100755 index 000000000000..5a086e612629 --- /dev/null +++ b/vendor/redis-py/README.md @@ -0,0 +1,33 @@ +redis-py +======== + +This is the Python interface to the Redis key-value store. + + +Usage +----- + + >>> import redis + >>> r = redis.Redis(host='localhost', port=6379, db=0) + >>> r.set('foo', 'bar') # or r['foo'] = 'bar' + True + >>> r.get('foo') # or r['foo'] + 'bar' + +For a complete list of commands, check out the list of Redis commands here: +http://code.google.com/p/redis/wiki/CommandReference + + +Author +------ + +redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). +It can be found here: http://github.com/andymccurdy/redis-py + +Special thanks to: + +* Ludovico Magnocavallo, author of the original Python Redis client, from + which some of the socket code is still used. +* Alexander Solovyov for ideas on the generic response callback system. +* Paul Hubbard for initial packaging support. + diff --git a/vendor/redis-py/redis/__init__.py b/vendor/redis-py/redis/__init__.py new file mode 100755 index 000000000000..93155fb4dd00 --- /dev/null +++ b/vendor/redis-py/redis/__init__.py @@ -0,0 +1,10 @@ +# legacy imports +from redis.client import Redis, ConnectionPool +from redis.exceptions import RedisError, ConnectionError, AuthenticationError +from redis.exceptions import ResponseError, InvalidResponse, InvalidData + +__all__ = [ + 'Redis', 'ConnectionPool', + 'RedisError', 'ConnectionError', 'ResponseError', 'AuthenticationError' + 'InvalidResponse', 'InvalidData', + ] diff --git a/vendor/redis-py/redis/client.py b/vendor/redis-py/redis/client.py new file mode 100755 index 000000000000..d6932d1d3fc7 --- /dev/null +++ b/vendor/redis-py/redis/client.py @@ -0,0 +1,1259 @@ +import datetime +import errno +import socket +import threading +import time +import warnings +from itertools import chain +from redis.exceptions import ConnectionError, ResponseError, InvalidResponse +from redis.exceptions import RedisError, AuthenticationError + + +class ConnectionPool(threading.local): + "Manages a list of connections on the local thread" + def __init__(self): + self.connections = {} + + def make_connection_key(self, host, port, db): + "Create a unique key for the specified host, port and db" + return '%s:%s:%s' % (host, port, db) + + def get_connection(self, host, port, db, password, socket_timeout): + "Return a specific connection for the specified host, port and db" + key = self.make_connection_key(host, port, db) + if key not in self.connections: + self.connections[key] = Connection( + host, port, db, password, socket_timeout) + return self.connections[key] + + def get_all_connections(self): + "Return a list of all connection objects the manager knows about" + return self.connections.values() + + +class Connection(object): + "Manages TCP communication to and from a Redis server" + def __init__(self, host='localhost', port=6379, db=0, password=None, + socket_timeout=None): + self.host = host + self.port = port + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self._sock = None + self._fp = None + + def connect(self, redis_instance): + "Connects to the Redis server if not already connected" + if self._sock: + return + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((self.host, self.port)) + except socket.error, e: + # args for socket.error can either be (errno, "message") + # or just "message" + if len(e.args) == 1: + error_message = "Error connecting to %s:%s. %s." % \ + (self.host, self.port, e.args[0]) + else: + error_message = "Error %s connecting %s:%s. %s." % \ + (e.args[0], self.host, self.port, e.args[1]) + raise ConnectionError(error_message) + sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + sock.settimeout(self.socket_timeout) + self._sock = sock + self._fp = sock.makefile('r') + redis_instance._setup_connection() + + def disconnect(self): + "Disconnects from the Redis server" + if self._sock is None: + return + try: + self._sock.close() + except socket.error: + pass + self._sock = None + self._fp = None + + def send(self, command, redis_instance): + "Send ``command`` to the Redis server. Return the result." + self.connect(redis_instance) + try: + self._sock.sendall(command) + except socket.error, e: + if e.args[0] == errno.EPIPE: + self.disconnect() + raise ConnectionError("Error %s while writing to socket. %s." % \ + e.args) + + def read(self, length=None): + """ + Read a line from the socket is length is None, + otherwise read ``length`` bytes + """ + try: + if length is not None: + return self._fp.read(length) + return self._fp.readline() + except socket.error, e: + self.disconnect() + if e.args and e.args[0] == errno.EAGAIN: + raise ConnectionError("Error while reading from socket: %s" % \ + e.args[1]) + return '' + +def list_or_args(command, keys, args): + # returns a single list combining keys and args + # if keys is not a list or args has items, issue a + # deprecation warning + oldapi = bool(args) + try: + i = iter(keys) + # a string can be iterated, but indicates + # keys wasn't passed as a list + if isinstance(keys, basestring): + oldapi = True + except TypeError: + oldapi = True + keys = [keys] + if oldapi: + warnings.warn(DeprecationWarning( + "Passing *args to Redis.%s has been deprecated. " + "Pass an iterable to ``keys`` instead" % command + )) + keys.extend(args) + return keys + +def timestamp_to_datetime(response): + "Converts a unix timestamp to a Python datetime object" + if not response: + return None + try: + response = int(response) + except ValueError: + return None + return datetime.datetime.fromtimestamp(response) + +def string_keys_to_dict(key_string, callback): + return dict([(key, callback) for key in key_string.split()]) + +def dict_merge(*dicts): + merged = {} + [merged.update(d) for d in dicts] + return merged + +def parse_info(response): + "Parse the result of Redis's INFO command into a Python dict" + info = {} + def get_value(value): + if ',' not in value: + return value + sub_dict = {} + for item in value.split(','): + k, v = item.split('=') + try: + sub_dict[k] = int(v) + except ValueError: + sub_dict[k] = v + return sub_dict + for line in response.splitlines(): + key, value = line.split(':') + try: + info[key] = int(value) + except ValueError: + info[key] = get_value(value) + return info + +def pairs_to_dict(response): + "Create a dict given a list of key/value pairs" + return dict(zip(response[::2], response[1::2])) + +def zset_score_pairs(response, **options): + """ + If ``withscores`` is specified in the options, return the response as + a list of (value, score) pairs + """ + if not response or not options['withscores']: + return response + return zip(response[::2], map(float, response[1::2])) + +def int_or_none(response): + if response is None: + return None + return int(response) + +def float_or_none(response): + if response is None: + return None + return float(response) + + +class Redis(threading.local): + """ + Implementation of the Redis protocol. + + This abstract class provides a Python interface to all Redis commands + and an implementation of the Redis protocol. + + Connection and Pipeline derive from this, implementing how + the commands are sent and received to the Redis server + """ + RESPONSE_CALLBACKS = dict_merge( + string_keys_to_dict( + 'AUTH DEL EXISTS EXPIRE EXPIREAT HDEL HEXISTS HMSET MOVE MSETNX ' + 'RENAMENX SADD SISMEMBER SMOVE SETEX SETNX SREM ZADD ZREM', + bool + ), + string_keys_to_dict( + 'DECRBY HLEN INCRBY LLEN SCARD SDIFFSTORE SINTERSTORE ' + 'SUNIONSTORE ZCARD ZREMRANGEBYSCORE ZREVRANK', + int + ), + string_keys_to_dict( + # these return OK, or int if redis-server is >=1.3.4 + 'LPUSH RPUSH', + lambda r: isinstance(r, int) and r or r == 'OK' + ), + string_keys_to_dict('ZSCORE ZINCRBY', float_or_none), + string_keys_to_dict( + 'FLUSHALL FLUSHDB LSET LTRIM MSET RENAME ' + 'SAVE SELECT SET SHUTDOWN', + lambda r: r == 'OK' + ), + string_keys_to_dict('SDIFF SINTER SMEMBERS SUNION', + lambda r: set(r) + ), + string_keys_to_dict('ZRANGE ZRANGEBYSCORE ZREVRANGE', zset_score_pairs), + { + 'BGREWRITEAOF': lambda r: \ + r == 'Background rewriting of AOF file started', + 'BGSAVE': lambda r: r == 'Background saving started', + 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, + 'INFO': parse_info, + 'LASTSAVE': timestamp_to_datetime, + 'PING': lambda r: r == 'PONG', + 'RANDOMKEY': lambda r: r and r or None, + 'TTL': lambda r: r != -1 and r or None, + 'ZRANK': int_or_none, + } + ) + + # commands that should NOT pull data off the network buffer when executed + SUBSCRIPTION_COMMANDS = set(['SUBSCRIBE', 'UNSUBSCRIBE']) + + def __init__(self, host='localhost', port=6379, + db=0, password=None, socket_timeout=None, + connection_pool=None, + charset='utf-8', errors='strict'): + self.encoding = charset + self.errors = errors + self.connection = None + self.subscribed = False + self.connection_pool = connection_pool and connection_pool or ConnectionPool() + self.select(db, host, port, password, socket_timeout) + + #### Legacty accessors of connection information #### + def _get_host(self): + return self.connection.host + host = property(_get_host) + + def _get_port(self): + return self.connection.port + port = property(_get_port) + + def _get_db(self): + return self.connection.db + db = property(_get_db) + + def pipeline(self, transaction=True): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from multiple atomic operations, + pipelines are useful for batch loading of data as they reduce the + number of back and forth network operations between client and server. + """ + return Pipeline( + self.connection, + transaction, + self.encoding, + self.errors + ) + + + #### COMMAND EXECUTION AND PROTOCOL PARSING #### + def _execute_command(self, command_name, command, **options): + subscription_command = command_name in self.SUBSCRIPTION_COMMANDS + if self.subscribed and not subscription_command: + raise RedisError("Cannot issue commands other than SUBSCRIBE and " + "UNSUBSCRIBE while channels are open") + try: + self.connection.send(command, self) + if subscription_command: + return None + return self.parse_response(command_name, **options) + except ConnectionError: + self.connection.disconnect() + self.connection.send(command, self) + if subscription_command: + return None + return self.parse_response(command_name, **options) + + def execute_command(self, *args, **options): + "Sends the command to the redis server and returns it's response" + cmd_count = len(args) + cmds = [] + for i in args: + enc_value = self.encode(i) + cmds.append('$%s\r\n%s\r\n' % (len(enc_value), enc_value)) + return self._execute_command( + args[0], + '*%s\r\n%s' % (cmd_count, ''.join(cmds)), + **options + ) + + def _parse_response(self, command_name, catch_errors): + conn = self.connection + response = conn.read()[:-2] # strip last two characters (\r\n) + if not response: + self.connection.disconnect() + raise ConnectionError("Socket closed on remote end") + + # server returned a null value + if response in ('$-1', '*-1'): + return None + byte, response = response[0], response[1:] + + # server returned an error + if byte == '-': + if response.startswith('ERR '): + response = response[4:] + raise ResponseError(response) + # single value + elif byte == '+': + return response + # int value + elif byte == ':': + return int(response) + # bulk response + elif byte == '$': + length = int(response) + if length == -1: + return None + response = length and conn.read(length) or '' + conn.read(2) # read the \r\n delimiter + return response + # multi-bulk response + elif byte == '*': + length = int(response) + if length == -1: + return None + if not catch_errors: + return [self._parse_response(command_name, catch_errors) + for i in range(length)] + else: + # for pipelines, we need to read everything, + # including response errors. otherwise we'd + # completely mess up the receive buffer + data = [] + for i in range(length): + try: + data.append( + self._parse_response(command_name, catch_errors) + ) + except Exception, e: + data.append(e) + return data + + raise InvalidResponse("Unknown response type for: %s" % command_name) + + def parse_response(self, command_name, catch_errors=False, **options): + "Parses a response from the Redis server" + response = self._parse_response(command_name, catch_errors) + if command_name in self.RESPONSE_CALLBACKS: + return self.RESPONSE_CALLBACKS[command_name](response, **options) + return response + + def encode(self, value): + "Encode ``value`` using the instance's charset" + if isinstance(value, str): + return value + if isinstance(value, unicode): + return value.encode(self.encoding, self.errors) + # not a string or unicode, attempt to convert to a string + return str(value) + + #### CONNECTION HANDLING #### + def get_connection(self, host, port, db, password, socket_timeout): + "Returns a connection object" + conn = self.connection_pool.get_connection( + host, port, db, password, socket_timeout) + # if for whatever reason the connection gets a bad password, make + # sure a subsequent attempt with the right password makes its way + # to the connection + conn.password = password + return conn + + def _setup_connection(self): + """ + After successfully opening a socket to the Redis server, the + connection object calls this method to authenticate and select + the appropriate database. + """ + if self.connection.password: + if not self.execute_command('AUTH', self.connection.password): + raise AuthenticationError("Invalid Password") + self.execute_command('SELECT', self.connection.db) + + def select(self, db, host=None, port=None, password=None, + socket_timeout=None): + """ + Switch to a different Redis connection. + + If the host and port aren't provided and there's an existing + connection, use the existing connection's host and port instead. + + Note this method actually replaces the underlying connection object + prior to issuing the SELECT command. This makes sure we protect + the thread-safe connections + """ + if host is None: + if self.connection is None: + raise RedisError("A valid hostname or IP address " + "must be specified") + host = self.connection.host + if port is None: + if self.connection is None: + raise RedisError("A valid port must be specified") + port = self.connection.port + + self.connection = self.get_connection( + host, port, db, password, socket_timeout) + + + #### SERVER INFORMATION #### + def bgrewriteaof(self): + "Tell the Redis server to rewrite the AOF file from data in memory." + return self.execute_command('BGREWRITEAOF') + + def bgsave(self): + """ + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + """ + return self.execute_command('BGSAVE') + + def dbsize(self): + "Returns the number of keys in the current database" + return self.execute_command('DBSIZE') + + def delete(self, *names): + "Delete one or more keys specified by ``names``" + return self.execute_command('DEL', *names) + __delitem__ = delete + + def flush(self, all_dbs=False): + warnings.warn(DeprecationWarning( + "'flush' has been deprecated. " + "Use Redis.flushdb() or Redis.flushall() instead")) + if all_dbs: + return self.flushall() + return self.flushdb() + + def flushall(self): + "Delete all keys in all databases on the current host" + return self.execute_command('FLUSHALL') + + def flushdb(self): + "Delete all keys in the current database" + return self.execute_command('FLUSHDB') + + def info(self): + "Returns a dictionary containing information about the Redis server" + return self.execute_command('INFO') + + def lastsave(self): + """ + Return a Python datetime object representing the last time the + Redis database was saved to disk + """ + return self.execute_command('LASTSAVE') + + def ping(self): + "Ping the Redis server" + return self.execute_command('PING') + + def save(self): + """ + Tell the Redis server to save its data to disk, + blocking until the save is complete + """ + return self.execute_command('SAVE') + + #### BASIC KEY COMMANDS #### + def append(self, key, value): + """ + Appends the string ``value`` to the value at ``key``. If ``key`` + doesn't already exist, create it with a value of ``value``. + Returns the new length of the value at ``key``. + """ + return self.execute_command('APPEND', key, value) + + def decr(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + return self.execute_command('DECRBY', name, amount) + + def exists(self, name): + "Returns a boolean indicating whether key ``name`` exists" + return self.execute_command('EXISTS', name) + __contains__ = exists + + def expire(self, name, time): + "Set an expire flag on key ``name`` for ``time`` seconds" + return self.execute_command('EXPIRE', name, time) + + def expireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer indicating unix time or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + when = int(time.mktime(when.timetuple())) + return self.execute_command('EXPIREAT', name, when) + + def get(self, name): + """ + Return the value at key ``name``, or None of the key doesn't exist + """ + return self.execute_command('GET', name) + __getitem__ = get + + def getset(self, name, value): + """ + Set the value at key ``name`` to ``value`` if key doesn't exist + Return the value at key ``name`` atomically + """ + return self.execute_command('GETSET', name, value) + + def incr(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBY', name, amount) + + def keys(self, pattern='*'): + "Returns a list of keys matching ``pattern``" + return self.execute_command('KEYS', pattern) + + def mget(self, keys, *args): + """ + Returns a list of values ordered identically to ``keys`` + + * Passing *args to this method has been deprecated * + """ + keys = list_or_args('mget', keys, args) + return self.execute_command('MGET', *keys) + + def mset(self, mapping): + "Sets each key in the ``mapping`` dict to its corresponding value" + items = [] + [items.extend(pair) for pair in mapping.iteritems()] + return self.execute_command('MSET', *items) + + def msetnx(self, mapping): + """ + Sets each key in the ``mapping`` dict to its corresponding value if + none of the keys are already set + """ + items = [] + [items.extend(pair) for pair in mapping.iteritems()] + return self.execute_command('MSETNX', *items) + + def move(self, name, db): + "Moves the key ``name`` to a different Redis database ``db``" + return self.execute_command('MOVE', name, db) + + def randomkey(self): + "Returns the name of a random key" + return self.execute_command('RANDOMKEY') + + def rename(self, src, dst, **kwargs): + """ + Rename key ``src`` to ``dst`` + + * The following flags have been deprecated * + If ``preserve`` is True, rename the key only if the destination name + doesn't already exist + """ + if kwargs: + if 'preserve' in kwargs: + warnings.warn(DeprecationWarning( + "preserve option to 'rename' is deprecated, " + "use Redis.renamenx instead")) + if kwargs['preserve']: + return self.renamenx(src, dst) + return self.execute_command('RENAME', src, dst) + + def renamenx(self, src, dst): + "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" + return self.execute_command('RENAMENX', src, dst) + + + def set(self, name, value, **kwargs): + """ + Set the value at key ``name`` to ``value`` + + * The following flags have been deprecated * + If ``preserve`` is True, set the value only if key doesn't already + exist + If ``getset`` is True, set the value only if key doesn't already exist + and return the resulting value of key + """ + if kwargs: + if 'getset' in kwargs: + warnings.warn(DeprecationWarning( + "getset option to 'set' is deprecated, " + "use Redis.getset() instead")) + if kwargs['getset']: + return self.getset(name, value) + if 'preserve' in kwargs: + warnings.warn(DeprecationWarning( + "preserve option to 'set' is deprecated, " + "use Redis.setnx() instead")) + if kwargs['preserve']: + return self.setnx(name, value) + return self.execute_command('SET', name, value) + __setitem__ = set + + def setex(self, name, value, time): + """ + Set the value of key ``name`` to ``value`` + that expires in ``time`` seconds + """ + return self.execute_command('SETEX', name, time, value) + + def setnx(self, name, value): + "Set the value of key ``name`` to ``value`` if key doesn't exist" + return self.execute_command('SETNX', name, value) + + def substr(self, name, start, end=-1): + """ + Return a substring of the string at key ``name``. ``start`` and ``end`` + are 0-based integers specifying the portion of the string to return. + """ + return self.execute_command('SUBSTR', name, start, end) + + def ttl(self, name): + "Returns the number of seconds until the key ``name`` will expire" + return self.execute_command('TTL', name) + + def type(self, name): + "Returns the type of key ``name``" + return self.execute_command('TYPE', name) + + + #### LIST COMMANDS #### + def blpop(self, keys, timeout=0): + """ + LPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + keys = list(keys) + keys.append(timeout) + return self.execute_command('BLPOP', *keys) + + def brpop(self, keys, timeout=0): + """ + RPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + keys = list(keys) + keys.append(timeout) + return self.execute_command('BRPOP', *keys) + + def lindex(self, name, index): + """ + Return the item from list ``name`` at position ``index`` + + Negative indexes are supported and will return an item at the + end of the list + """ + return self.execute_command('LINDEX', name, index) + + def llen(self, name): + "Return the length of the list ``name``" + return self.execute_command('LLEN', name) + + def lpop(self, name): + "Remove and return the first item of the list ``name``" + return self.execute_command('LPOP', name) + + def lpush(self, name, value): + "Push ``value`` onto the head of the list ``name``" + return self.execute_command('LPUSH', name, value) + + def lrange(self, name, start, end): + """ + Return a slice of the list ``name`` between + position ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LRANGE', name, start, end) + + def lrem(self, name, value, num=0): + """ + Remove the first ``num`` occurrences of ``value`` from list ``name`` + + If ``num`` is 0, then all occurrences will be removed + """ + return self.execute_command('LREM', name, num, value) + + def lset(self, name, index, value): + "Set ``position`` of list ``name`` to ``value``" + return self.execute_command('LSET', name, index, value) + + def ltrim(self, name, start, end): + """ + Trim the list ``name``, removing all values not within the slice + between ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LTRIM', name, start, end) + + def pop(self, name, tail=False): + """ + Pop and return the first or last element of list ``name`` + + * This method has been deprecated, + use Redis.lpop or Redis.rpop instead * + """ + warnings.warn(DeprecationWarning( + "Redis.pop has been deprecated, " + "use Redis.lpop or Redis.rpop instead")) + if tail: + return self.rpop(name) + return self.lpop(name) + + def push(self, name, value, head=False): + """ + Push ``value`` onto list ``name``. + + * This method has been deprecated, + use Redis.lpush or Redis.rpush instead * + """ + warnings.warn(DeprecationWarning( + "Redis.push has been deprecated, " + "use Redis.lpush or Redis.rpush instead")) + if head: + return self.lpush(name, value) + return self.rpush(name, value) + + def rpop(self, name): + "Remove and return the last item of the list ``name``" + return self.execute_command('RPOP', name) + + def rpoplpush(self, src, dst): + """ + RPOP a value off of the ``src`` list and atomically LPUSH it + on to the ``dst`` list. Returns the value. + """ + return self.execute_command('RPOPLPUSH', src, dst) + + def rpush(self, name, value): + "Push ``value`` onto the tail of the list ``name``" + return self.execute_command('RPUSH', name, value) + + def sort(self, name, start=None, num=None, by=None, get=None, + desc=False, alpha=False, store=None): + """ + Sort and return the list, set or sorted set at ``name``. + + ``start`` and ``num`` allow for paging through the sorted data + + ``by`` allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + ``get`` allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where int he key + the item value is located + + ``desc`` allows for reversing the sort + + ``alpha`` allows for sorting lexicographically rather than numerically + + ``store`` allows for storing the result of the sort into + the key ``store`` + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + + pieces = [name] + if by is not None: + pieces.append('BY') + pieces.append(by) + if start is not None and num is not None: + pieces.append('LIMIT') + pieces.append(start) + pieces.append(num) + if get is not None: + pieces.append('GET') + pieces.append(get) + if desc: + pieces.append('DESC') + if alpha: + pieces.append('ALPHA') + if store is not None: + pieces.append('STORE') + pieces.append(store) + return self.execute_command('SORT', *pieces) + + + #### SET COMMANDS #### + def sadd(self, name, value): + "Add ``value`` to set ``name``" + return self.execute_command('SADD', name, value) + + def scard(self, name): + "Return the number of elements in set ``name``" + return self.execute_command('SCARD', name) + + def sdiff(self, keys, *args): + "Return the difference of sets specified by ``keys``" + keys = list_or_args('sdiff', keys, args) + return self.execute_command('SDIFF', *keys) + + def sdiffstore(self, dest, keys, *args): + """ + Store the difference of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sdiffstore', keys, args) + return self.execute_command('SDIFFSTORE', dest, *keys) + + def sinter(self, keys, *args): + "Return the intersection of sets specified by ``keys``" + keys = list_or_args('sinter', keys, args) + return self.execute_command('SINTER', *keys) + + def sinterstore(self, dest, keys, *args): + """ + Store the intersection of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sinterstore', keys, args) + return self.execute_command('SINTERSTORE', dest, *keys) + + def sismember(self, name, value): + "Return a boolean indicating if ``value`` is a member of set ``name``" + return self.execute_command('SISMEMBER', name, value) + + def smembers(self, name): + "Return all members of the set ``name``" + return self.execute_command('SMEMBERS', name) + + def smove(self, src, dst, value): + "Move ``value`` from set ``src`` to set ``dst`` atomically" + return self.execute_command('SMOVE', src, dst, value) + + def spop(self, name): + "Remove and return a random member of set ``name``" + return self.execute_command('SPOP', name) + + def srandmember(self, name): + "Return a random member of set ``name``" + return self.execute_command('SRANDMEMBER', name) + + def srem(self, name, value): + "Remove ``value`` from set ``name``" + return self.execute_command('SREM', name, value) + + def sunion(self, keys, *args): + "Return the union of sets specifiued by ``keys``" + keys = list_or_args('sunion', keys, args) + return self.execute_command('SUNION', *keys) + + def sunionstore(self, dest, keys, *args): + """ + Store the union of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sunionstore', keys, args) + return self.execute_command('SUNIONSTORE', dest, *keys) + + + #### SORTED SET COMMANDS #### + def zadd(self, name, value, score): + "Add member ``value`` with score ``score`` to sorted set ``name``" + return self.execute_command('ZADD', name, score, value) + + def zcard(self, name): + "Return the number of elements in the sorted set ``name``" + return self.execute_command('ZCARD', name) + + def zincr(self, key, member, value=1): + "This has been deprecated, use zincrby instead" + warnings.warn(DeprecationWarning( + "Redis.zincr has been deprecated, use Redis.zincrby instead" + )) + return self.zincrby(key, member, value) + + def zincrby(self, name, value, amount=1): + "Increment the score of ``value`` in sorted set ``name`` by ``amount``" + return self.execute_command('ZINCRBY', name, amount, value) + + def zinter(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTER', dest, keys, aggregate) + + def zrange(self, name, start, end, desc=False, withscores=False): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in ascending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``desc`` indicates to sort in descending order. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + """ + if desc: + return self.zrevrange(name, start, end, withscores) + pieces = ['ZRANGE', name, start, end] + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrangebyscore(self, name, min, max, + start=None, num=None, withscores=False): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYSCORE', name, min, max] + if start is not None and num is not None: + pieces.extend(['LIMIT', start, num]) + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrank(self, name, value): + """ + Returns a 0-based value indicating the rank of ``value`` in sorted set + ``name`` + """ + return self.execute_command('ZRANK', name, value) + + def zrem(self, name, value): + "Remove member ``value`` from sorted set ``name``" + return self.execute_command('ZREM', name, value) + + def zremrangebyscore(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with scores + between ``min`` and ``max`` + """ + return self.execute_command('ZREMRANGEBYSCORE', name, min, max) + + def zrevrange(self, name, start, num, withscores=False): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``num`` sorted in descending order. + + ``start`` and ``num`` can be negative, indicating the end of the range. + + ``withscores`` indicates to return the scores along with the values + as a dictionary of value => score + """ + pieces = ['ZREVRANGE', name, start, num] + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrevrank(self, name, value): + """ + Returns a 0-based value indicating the descending rank of + ``value`` in sorted set ``name`` + """ + return self.execute_command('ZREVRANK', name, value) + + def zscore(self, name, value): + "Return the score of element ``value`` in sorted set ``name``" + return self.execute_command('ZSCORE', name, value) + + def zunion(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNION', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + items = keys.items() + keys = [i[0] for i in items] + weights = [i[1] for i in items] + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append('WEIGHTS') + pieces.extend(weights) + if aggregate: + pieces.append('AGGREGATE') + pieces.append(aggregate) + return self.execute_command(*pieces) + + #### HASH COMMANDS #### + def hdel(self, name, key): + "Delete ``key`` from hash ``name``" + return self.execute_command('HDEL', name, key) + + def hexists(self, name, key): + "Returns a boolean indicating if ``key`` exists within hash ``name``" + return self.execute_command('HEXISTS', name, key) + + def hget(self, name, key): + "Return the value of ``key`` within the hash ``name``" + return self.execute_command('HGET', name, key) + + def hgetall(self, name): + "Return a Python dict of the hash's name/value pairs" + return self.execute_command('HGETALL', name) + + def hincrby(self, name, key, amount=1): + "Increment the value of ``key`` in hash ``name`` by ``amount``" + return self.execute_command('HINCRBY', name, key, amount) + + def hkeys(self, name): + "Return the list of keys within hash ``name``" + return self.execute_command('HKEYS', name) + + def hlen(self, name): + "Return the number of elements in hash ``name``" + return self.execute_command('HLEN', name) + + def hset(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` + Returns 1 if HSET created a new field, otherwise 0 + """ + return self.execute_command('HSET', name, key, value) + + def hmset(self, name, mapping): + """ + Sets each key in the ``mapping`` dict to its corresponding value + in the hash ``name`` + """ + items = [] + [items.extend(pair) for pair in mapping.iteritems()] + return self.execute_command('HMSET', name, *items) + + def hmget(self, name, keys): + "Returns a list of values ordered identically to ``keys``" + return self.execute_command('HMGET', name, *keys) + + def hvals(self, name): + "Return the list of values within hash ``name``" + return self.execute_command('HVALS', name) + + + # channels + def psubscribe(self, patterns): + "Subscribe to all channels matching any pattern in ``patterns``" + if isinstance(patterns, basestring): + patterns = [patterns] + response = self.execute_command('PSUBSCRIBE', *patterns) + # this is *after* the SUBSCRIBE in order to allow for lazy and broken + # connections that need to issue AUTH and SELECT commands + self.subscribed = True + return response + + def punsubscribe(self, patterns=[]): + """ + Unsubscribe from any channel matching any pattern in ``patterns``. + If empty, unsubscribe from all channels. + """ + if isinstance(patterns, basestring): + patterns = [patterns] + return self.execute_command('PUNSUBSCRIBE', *patterns) + + def subscribe(self, channels): + "Subscribe to ``channels``, waiting for messages to be published" + if isinstance(channels, basestring): + channels = [channels] + response = self.execute_command('SUBSCRIBE', *channels) + # this is *after* the SUBSCRIBE in order to allow for lazy and broken + # connections that need to issue AUTH and SELECT commands + self.subscribed = True + return response + + def unsubscribe(self, channels=[]): + """ + Unsubscribe from ``channels``. If empty, unsubscribe + from all channels + """ + if isinstance(channels, basestring): + channels = [channels] + return self.execute_command('UNSUBSCRIBE', *channels) + + def publish(self, channel, message): + """ + Publish ``message`` on ``channel``. + Returns the number of subscribers the message was delivered to. + """ + return self.execute_command('PUBLISH', channel, message) + + def listen(self): + "Listen for messages on channels this client has been subscribed to" + while self.subscribed: + r = self.parse_response('LISTEN') + message_type, channel, message = r[0], r[1], r[2] + yield (message_type, channel, message) + if message_type == 'unsubscribe' and message == 0: + self.subscribed = False + + +class Pipeline(Redis): + """ + Pipelines provide a way to transmit multiple commands to the Redis server + in one transmission. This is convenient for batch processing, such as + saving all the values in a list to Redis. + + All commands executed within a pipeline are wrapped with MULTI and EXEC + calls. This guarantees all commands executed in the pipeline will be + executed atomically. + + Any command raising an exception does *not* halt the execution of + subsequent commands in the pipeline. Instead, the exception is caught + and its instance is placed into the response list returned by execute(). + Code iterating over the response list should be able to deal with an + instance of an exception as a potential value. In general, these will be + ResponseError exceptions, such as those raised when issuing a command + on a key of a different datatype. + """ + def __init__(self, connection, transaction, charset, errors): + self.connection = connection + self.transaction = transaction + self.encoding = charset + self.errors = errors + self.subscribed = False # NOTE not in use, but necessary + self.reset() + + def reset(self): + self.command_stack = [] + + def _execute_command(self, command_name, command, **options): + """ + Stage a command to be executed when execute() is next called + + Returns the current Pipeline object back so commands can be + chained together, such as: + + pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') + + At some other point, you can then run: pipe.execute(), + which will execute all commands queued in the pipe. + """ + # if the command_name is 'AUTH' or 'SELECT', then this command + # must have originated after a socket connection and a call to + # _setup_connection(). run these commands immediately without + # buffering them. + if command_name in ('AUTH', 'SELECT'): + return super(Pipeline, self)._execute_command( + command_name, command, **options) + else: + self.command_stack.append((command_name, command, options)) + return self + + def _execute_transaction(self, commands): + # wrap the commands in MULTI ... EXEC statements to indicate an + # atomic operation + all_cmds = ''.join([c for _1, c, _2 in chain( + (('', 'MULTI\r\n', ''),), + commands, + (('', 'EXEC\r\n', ''),) + )]) + self.connection.send(all_cmds, self) + # parse off the response for MULTI and all commands prior to EXEC + for i in range(len(commands)+1): + _ = self.parse_response('_') + # parse the EXEC. we want errors returned as items in the response + response = self.parse_response('_', catch_errors=True) + if len(response) != len(commands): + raise ResponseError("Wrong number of response items from " + "pipeline execution") + # Run any callbacks for the commands run in the pipeline + data = [] + for r, cmd in zip(response, commands): + if not isinstance(r, Exception): + if cmd[0] in self.RESPONSE_CALLBACKS: + r = self.RESPONSE_CALLBACKS[cmd[0]](r, **cmd[2]) + data.append(r) + return data + + def _execute_pipeline(self, commands): + # build up all commands into a single request to increase network perf + all_cmds = ''.join([c for _1, c, _2 in commands]) + self.connection.send(all_cmds, self) + data = [] + for command_name, _, options in commands: + data.append( + self.parse_response(command_name, catch_errors=True, **options) + ) + return data + + def execute(self): + "Execute all the commands in the current pipeline" + stack = self.command_stack + self.reset() + if self.transaction: + execute = self._execute_transaction + else: + execute = self._execute_pipeline + try: + return execute(stack) + except ConnectionError: + self.connection.disconnect() + return execute(stack) + + def select(self, *args, **kwargs): + raise RedisError("Cannot select a different database from a pipeline") + diff --git a/vendor/redis-py/redis/exceptions.py b/vendor/redis-py/redis/exceptions.py new file mode 100755 index 000000000000..d3449b664173 --- /dev/null +++ b/vendor/redis-py/redis/exceptions.py @@ -0,0 +1,20 @@ +"Core exceptions raised by the Redis client" + +class RedisError(Exception): + pass + +class AuthenticationError(RedisError): + pass + +class ConnectionError(RedisError): + pass + +class ResponseError(RedisError): + pass + +class InvalidResponse(RedisError): + pass + +class InvalidData(RedisError): + pass + \ No newline at end of file diff --git a/vendor/redis-py/setup.py b/vendor/redis-py/setup.py new file mode 100755 index 000000000000..0660371dfc30 --- /dev/null +++ b/vendor/redis-py/setup.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +""" +@file setup.py +@author Andy McCurdy +@date 2/12/2010 +@brief Setuptools configuration for redis client +""" + +version = '1.36' + +sdict = { + 'name' : 'redis', + 'version' : version, + 'description' : 'Python client for Redis key-value store', + 'long_description' : 'Python client for Redis key-value store', + 'url': 'http://github.com/andymccurdy/redis-py', + 'download_url' : 'http://cloud.github.com/downloads/andymccurdy/redis-py/redis-%s.tar.gz' % version, + 'author' : 'Andy McCurdy', + 'author_email' : 'sedrik@gmail.com', + 'maintainer' : 'Andy McCurdy', + 'maintainer_email' : 'sedrik@gmail.com', + 'keywords' : ['Redis', 'key-value store'], + 'license' : 'MIT', + 'packages' : ['redis'], + 'test_suite' : 'tests.all_tests', + 'classifiers' : [ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python'], +} + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +setup(**sdict) + diff --git a/vendor/redis-py/tests/__init__.py b/vendor/redis-py/tests/__init__.py new file mode 100755 index 000000000000..45e55b079e52 --- /dev/null +++ b/vendor/redis-py/tests/__init__.py @@ -0,0 +1,11 @@ +import unittest +from server_commands import ServerCommandsTestCase +from connection_pool import ConnectionPoolTestCase +from pipeline import PipelineTestCase + +def all_tests(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(ServerCommandsTestCase)) + suite.addTest(unittest.makeSuite(ConnectionPoolTestCase)) + suite.addTest(unittest.makeSuite(PipelineTestCase)) + return suite diff --git a/vendor/redis-py/tests/connection_pool.py b/vendor/redis-py/tests/connection_pool.py new file mode 100755 index 000000000000..56f5f43c68f8 --- /dev/null +++ b/vendor/redis-py/tests/connection_pool.py @@ -0,0 +1,53 @@ +import redis +import threading +import time +import unittest + +class ConnectionPoolTestCase(unittest.TestCase): + def test_multiple_connections(self): + # 2 clients to the same host/port/db/pool should use the same connection + pool = redis.ConnectionPool() + r1 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool) + r2 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool) + self.assertEquals(r1.connection, r2.connection) + + # if one of them switches, they should have + # separate conncetion objects + r2.select(db=10, host='localhost', port=6379) + self.assertNotEqual(r1.connection, r2.connection) + + conns = [r1.connection, r2.connection] + conns.sort() + + # but returning to the original state shares the object again + r2.select(db=9, host='localhost', port=6379) + self.assertEquals(r1.connection, r2.connection) + + # the connection manager should still have just 2 connections + mgr_conns = pool.get_all_connections() + mgr_conns.sort() + self.assertEquals(conns, mgr_conns) + + def test_threaded_workers(self): + r = redis.Redis(host='localhost', port=6379, db=9) + r.set('a', 'foo') + r.set('b', 'bar') + + def _info_worker(): + for i in range(50): + _ = r.info() + time.sleep(0.01) + + def _keys_worker(): + for i in range(50): + _ = r.keys() + time.sleep(0.01) + + t1 = threading.Thread(target=_info_worker) + t2 = threading.Thread(target=_keys_worker) + t1.start() + t2.start() + + for i in [t1, t2]: + i.join() + diff --git a/vendor/redis-py/tests/pipeline.py b/vendor/redis-py/tests/pipeline.py new file mode 100755 index 000000000000..dcbfb0a301e6 --- /dev/null +++ b/vendor/redis-py/tests/pipeline.py @@ -0,0 +1,61 @@ +import redis +import unittest + +class PipelineTestCase(unittest.TestCase): + def setUp(self): + self.client = redis.Redis(host='localhost', port=6379, db=9) + self.client.flushdb() + + def tearDown(self): + self.client.flushdb() + + def test_pipeline(self): + pipe = self.client.pipeline() + pipe.set('a', 'a1').get('a').zadd('z', 'z1', 1).zadd('z', 'z2', 4) + pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) + self.assertEquals(pipe.execute(), + [ + True, + 'a1', + True, + True, + 2.0, + [('z1', 2.0), ('z2', 4)], + ] + ) + + def test_invalid_command_in_pipeline(self): + # all commands but the invalid one should be excuted correctly + self.client['c'] = 'a' + pipe = self.client.pipeline() + pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) + result = pipe.execute() + + self.assertEquals(result[0], True) + self.assertEquals(self.client['a'], '1') + self.assertEquals(result[1], True) + self.assertEquals(self.client['b'], '2') + # we can't lpush to a key that's a string value, so this should + # be a ResponseError exception + self.assert_(isinstance(result[2], redis.ResponseError)) + self.assertEquals(self.client['c'], 'a') + self.assertEquals(result[3], True) + self.assertEquals(self.client['d'], '4') + + # make sure the pipe was restored to a working state + self.assertEquals(pipe.set('z', 'zzz').execute(), [True]) + self.assertEquals(self.client['z'], 'zzz') + + def test_pipeline_cannot_select(self): + pipe = self.client.pipeline() + self.assertRaises(redis.RedisError, + pipe.select, 'localhost', 6379, db=9) + + def test_pipeline_no_transaction(self): + pipe = self.client.pipeline(transaction=False) + pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') + self.assertEquals(pipe.execute(), [True, True, True]) + self.assertEquals(self.client['a'], 'a1') + self.assertEquals(self.client['b'], 'b1') + self.assertEquals(self.client['c'], 'c1') + diff --git a/vendor/redis-py/tests/server_commands.py b/vendor/redis-py/tests/server_commands.py new file mode 100755 index 000000000000..056188aff521 --- /dev/null +++ b/vendor/redis-py/tests/server_commands.py @@ -0,0 +1,1092 @@ +import redis +import unittest +import datetime +import threading +import time +from distutils.version import StrictVersion + +class ServerCommandsTestCase(unittest.TestCase): + + def get_client(self): + return redis.Redis(host='localhost', port=6379, db=9) + + def setUp(self): + self.client = self.get_client() + self.client.flushdb() + + def tearDown(self): + self.client.flushdb() + + # GENERAL SERVER COMMANDS + def test_dbsize(self): + self.client['a'] = 'foo' + self.client['b'] = 'bar' + self.assertEquals(self.client.dbsize(), 2) + + def test_get_and_set(self): + # get and set can't be tested independently of each other + self.assertEquals(self.client.get('a'), None) + byte_string = 'value' + integer = 5 + unicode_string = unichr(3456) + u'abcd' + unichr(3421) + self.assert_(self.client.set('byte_string', byte_string)) + self.assert_(self.client.set('integer', 5)) + self.assert_(self.client.set('unicode_string', unicode_string)) + self.assertEquals(self.client.get('byte_string'), byte_string) + self.assertEquals(self.client.get('integer'), str(integer)) + self.assertEquals(self.client.get('unicode_string').decode('utf-8'), unicode_string) + + def test_getitem_and_setitem(self): + self.client['a'] = 'bar' + self.assertEquals(self.client['a'], 'bar') + + def test_delete(self): + self.assertEquals(self.client.delete('a'), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.delete('a'), True) + + def test_delitem(self): + self.client['a'] = 'foo' + del self.client['a'] + self.assertEquals(self.client['a'], None) + + def test_info(self): + self.client['a'] = 'foo' + self.client['b'] = 'bar' + info = self.client.info() + self.assert_(isinstance(info, dict)) + self.assertEquals(info['db9']['keys'], 2) + + def test_lastsave(self): + self.assert_(isinstance(self.client.lastsave(), datetime.datetime)) + + def test_ping(self): + self.assertEquals(self.client.ping(), True) + + + # KEYS + def test_append(self): + # invalid key type + self.client.rpush('a', 'a1') + self.assertRaises(redis.ResponseError, self.client.append, 'a', 'a1') + del self.client['a'] + # real logic + self.assertEquals(self.client.append('a', 'a1'), 2) + self.assertEquals(self.client['a'], 'a1') + self.assert_(self.client.append('a', 'a2'), 4) + self.assertEquals(self.client['a'], 'a1a2') + + def test_decr(self): + self.assertEquals(self.client.decr('a'), -1) + self.assertEquals(self.client['a'], '-1') + self.assertEquals(self.client.decr('a'), -2) + self.assertEquals(self.client['a'], '-2') + self.assertEquals(self.client.decr('a', amount=5), -7) + self.assertEquals(self.client['a'], '-7') + + def test_exists(self): + self.assertEquals(self.client.exists('a'), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.exists('a'), True) + + def test_expire_and_ttl(self): + self.assertEquals(self.client.expire('a', 10), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.expire('a', 10), True) + self.assertEquals(self.client.ttl('a'), 10) + + def test_expireat(self): + expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1) + self.assertEquals(self.client.expireat('a', expire_at), False) + self.client['a'] = 'foo' + # expire at in unix time + expire_at_seconds = int(time.mktime(expire_at.timetuple())) + self.assertEquals(self.client.expireat('a', expire_at_seconds), True) + self.assertEquals(self.client.ttl('a'), 60) + # expire at given a datetime object + self.client['b'] = 'bar' + self.assertEquals(self.client.expireat('b', expire_at), True) + self.assertEquals(self.client.ttl('b'), 60) + + def test_getset(self): + self.assertEquals(self.client.getset('a', 'foo'), None) + self.assertEquals(self.client.getset('a', 'bar'), 'foo') + + def test_incr(self): + self.assertEquals(self.client.incr('a'), 1) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client.incr('a'), 2) + self.assertEquals(self.client['a'], '2') + self.assertEquals(self.client.incr('a', amount=5), 7) + self.assertEquals(self.client['a'], '7') + + def test_keys(self): + self.assertEquals(self.client.keys(), []) + keys = set(['test_a', 'test_b', 'testc']) + for key in keys: + self.client[key] = 1 + self.assertEquals(set(self.client.keys(pattern='test_*')), + keys - set(['testc'])) + self.assertEquals(set(self.client.keys(pattern='test*')), keys) + + def test_mget(self): + self.assertEquals(self.client.mget(['a', 'b']), [None, None]) + self.client['a'] = '1' + self.client['b'] = '2' + self.client['c'] = '3' + self.assertEquals(self.client.mget(['a', 'other', 'b', 'c']), + ['1', None, '2', '3']) + + def test_mset(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.mset(d)) + for k,v in d.iteritems(): + self.assertEquals(self.client[k], v) + + def test_msetnx(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.msetnx(d)) + d2 = {'a': 'x', 'd': '4'} + self.assert_(not self.client.msetnx(d2)) + for k,v in d.iteritems(): + self.assertEquals(self.client[k], v) + self.assertEquals(self.client['d'], None) + + def test_randomkey(self): + self.assertEquals(self.client.randomkey(), None) + self.client['a'] = '1' + self.client['b'] = '2' + self.client['c'] = '3' + self.assert_(self.client.randomkey() in ('a', 'b', 'c')) + + def test_rename(self): + self.client['a'] = '1' + self.assert_(self.client.rename('a', 'b')) + self.assertEquals(self.client['a'], None) + self.assertEquals(self.client['b'], '1') + + def test_renamenx(self): + self.client['a'] = '1' + self.client['b'] = '2' + self.assert_(not self.client.renamenx('a', 'b')) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client['b'], '2') + + def test_setex(self): + self.assertEquals(self.client.setex('a', '1', 60), True) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client.ttl('a'), 60 ) + + def test_setnx(self): + self.assert_(self.client.setnx('a', '1')) + self.assertEquals(self.client['a'], '1') + self.assert_(not self.client.setnx('a', '2')) + self.assertEquals(self.client['a'], '1') + + def test_substr(self): + # invalid key type + self.client.rpush('a', 'a1') + self.assertRaises(redis.ResponseError, self.client.substr, 'a', 0) + del self.client['a'] + # real logic + self.client['a'] = 'abcdefghi' + self.assertEquals(self.client.substr('a', 0), 'abcdefghi') + self.assertEquals(self.client.substr('a', 2), 'cdefghi') + self.assertEquals(self.client.substr('a', 3, 5), 'def') + self.assertEquals(self.client.substr('a', 3, -2), 'defgh') + self.client['a'] = 123456 # does substr work with ints? + self.assertEquals(self.client.substr('a', 2, -2), '345') + + def test_type(self): + self.assertEquals(self.client.type('a'), 'none') + self.client['a'] = '1' + self.assertEquals(self.client.type('a'), 'string') + del self.client['a'] + self.client.lpush('a', '1') + self.assertEquals(self.client.type('a'), 'list') + del self.client['a'] + self.client.sadd('a', '1') + self.assertEquals(self.client.type('a'), 'set') + del self.client['a'] + self.client.zadd('a', '1', 1) + self.assertEquals(self.client.type('a'), 'zset') + + # LISTS + def make_list(self, name, l): + for i in l: + self.client.rpush(name, i) + + def test_blpop(self): + self.make_list('a', 'ab') + self.make_list('b', 'cd') + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ['b', 'c']) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ['b', 'd']) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ['a', 'a']) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ['a', 'b']) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), None) + + def test_brpop(self): + self.make_list('a', 'ab') + self.make_list('b', 'cd') + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ['b', 'd']) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ['b', 'c']) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ['a', 'b']) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ['a', 'a']) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), None) + + def test_lindex(self): + # no key + self.assertEquals(self.client.lindex('a', '0'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lindex, 'a', '0') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lindex('a', '0'), 'a') + self.assertEquals(self.client.lindex('a', '1'), 'b') + self.assertEquals(self.client.lindex('a', '2'), 'c') + + def test_llen(self): + # no key + self.assertEquals(self.client.llen('a'), 0) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.llen, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.llen('a'), 3) + + def test_lpop(self): + # no key + self.assertEquals(self.client.lpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lpop('a'), 'a') + self.assertEquals(self.client.lpop('a'), 'b') + self.assertEquals(self.client.lpop('a'), 'c') + self.assertEquals(self.client.lpop('a'), None) + + def test_lpush(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpush, 'a', 'a') + del self.client['a'] + # real logic + version = self.client.info()['redis_version'] + if StrictVersion(version) >= StrictVersion('1.3.4'): + self.assertEqual(1, self.client.lpush('a', 'b')) + self.assertEqual(2, self.client.lpush('a', 'a')) + else: + self.assert_(self.client.lpush('a', 'b')) + self.assert_(self.client.lpush('a', 'a')) + self.assertEquals(self.client.lindex('a', 0), 'a') + self.assertEquals(self.client.lindex('a', 1), 'b') + + def test_lrange(self): + # no key + self.assertEquals(self.client.lrange('a', 0, 1), []) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lrange, 'a', 0, 1) + del self.client['a'] + # real logic + self.make_list('a', 'abcde') + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'b', 'c']) + self.assertEquals(self.client.lrange('a', 2, 10), ['c', 'd', 'e']) + + def test_lrem(self): + # no key + self.assertEquals(self.client.lrem('a', 'foo'), 0) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lrem, 'a', 'b') + del self.client['a'] + # real logic + self.make_list('a', 'aaaa') + self.assertEquals(self.client.lrem('a', 'a', 1), 1) + self.assertEquals(self.client.lrange('a', 0, 3), ['a', 'a', 'a']) + self.assertEquals(self.client.lrem('a', 'a'), 3) + # remove all the elements in the list means the key is deleted + self.assertEquals(self.client.lrange('a', 0, 1), []) + + def test_lset(self): + # no key + self.assertRaises(redis.ResponseError, self.client.lset, 'a', 1, 'b') + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lset, 'a', 1, 'b') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'b', 'c']) + self.assert_(self.client.lset('a', 1, 'd')) + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'd', 'c']) + + def test_ltrim(self): + # no key -- TODO: Not sure why this is actually true. + self.assert_(self.client.ltrim('a', 0, 2)) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.ltrim, 'a', 0, 2) + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assert_(self.client.ltrim('a', 0, 1)) + self.assertEquals(self.client.lrange('a', 0, 5), ['a', 'b']) + + def test_lpop(self): + # no key + self.assertEquals(self.client.lpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lpop('a'), 'a') + self.assertEquals(self.client.lpop('a'), 'b') + self.assertEquals(self.client.lpop('a'), 'c') + self.assertEquals(self.client.lpop('a'), None) + + def test_rpop(self): + # no key + self.assertEquals(self.client.rpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.rpop('a'), 'c') + self.assertEquals(self.client.rpop('a'), 'b') + self.assertEquals(self.client.rpop('a'), 'a') + self.assertEquals(self.client.rpop('a'), None) + + def test_rpoplpush(self): + # no src key + self.make_list('b', ['b1']) + self.assertEquals(self.client.rpoplpush('a', 'b'), None) + # no dest key + self.assertEquals(self.client.rpoplpush('b', 'a'), 'b1') + self.assertEquals(self.client.lindex('a', 0), 'b1') + del self.client['a'] + del self.client['b'] + # src key is not a list + self.client['a'] = 'a1' + self.assertRaises(redis.ResponseError, self.client.rpoplpush, 'a', 'b') + del self.client['a'] + # dest key is not a list + self.make_list('a', ['a1']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpoplpush, 'a', 'b') + del self.client['a'] + del self.client['b'] + # real logic + self.make_list('a', ['a1', 'a2', 'a3']) + self.make_list('b', ['b1', 'b2', 'b3']) + self.assertEquals(self.client.rpoplpush('a', 'b'), 'a3') + self.assertEquals(self.client.lrange('a', 0, 2), ['a1', 'a2']) + self.assertEquals(self.client.lrange('b', 0, 4), + ['a3', 'b1', 'b2', 'b3']) + + def test_rpush(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpush, 'a', 'a') + del self.client['a'] + # real logic + version = self.client.info()['redis_version'] + if StrictVersion(version) >= StrictVersion('1.3.4'): + self.assertEqual(1, self.client.rpush('a', 'a')) + self.assertEqual(2, self.client.rpush('a', 'b')) + else: + self.assert_(self.client.rpush('a', 'a')) + self.assert_(self.client.rpush('a', 'b')) + self.assertEquals(self.client.lindex('a', 0), 'a') + self.assertEquals(self.client.lindex('a', 1), 'b') + + # Set commands + def make_set(self, name, l): + for i in l: + self.client.sadd(name, i) + + def test_sadd(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sadd, 'a', 'a1') + del self.client['a'] + # real logic + members = set(['a1', 'a2', 'a3']) + self.make_set('a', members) + self.assertEquals(self.client.smembers('a'), members) + + def test_scard(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.scard, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.scard('a'), 3) + + def test_sdiff(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sdiff, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['b1', 'a2', 'b3']) + self.assertEquals(self.client.sdiff(['a', 'b']), set(['a1', 'a3'])) + + def test_sdiffstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sdiffstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['b1', 'a2', 'b3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sdiffstore('c', ['a', 'b']), 2) + self.assertEquals(self.client.smembers('c'), set(['a1', 'a3'])) + + def test_sinter(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sinter, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['a1', 'b2', 'a3']) + self.assertEquals(self.client.sinter(['a', 'b']), set(['a1', 'a3'])) + + def test_sinterstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sinterstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['a1', 'b2', 'a3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sinterstore('c', ['a', 'b']), 2) + self.assertEquals(self.client.smembers('c'), set(['a1', 'a3'])) + + def test_sismember(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sismember, 'a', 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.sismember('a', 'a'), True) + self.assertEquals(self.client.sismember('a', 'b'), True) + self.assertEquals(self.client.sismember('a', 'c'), True) + self.assertEquals(self.client.sismember('a', 'd'), False) + + def test_smembers(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.smembers, 'a') + del self.client['a'] + # set doesn't exist + self.assertEquals(self.client.smembers('a'), set()) + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.smembers('a'), set(['a', 'b', 'c'])) + + def test_smove(self): + # src key is not set + self.make_set('b', ['b1', 'b2']) + self.assertEquals(self.client.smove('a', 'b', 'a1'), 0) + # src key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.smove, + 'a', 'b', 'a1') + del self.client['a'] + self.make_set('a', ['a1', 'a2']) + # dest key is not a set + del self.client['b'] + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.smove, + 'a', 'b', 'a1') + del self.client['b'] + self.make_set('b', ['b1', 'b2']) + # real logic + self.assert_(self.client.smove('a', 'b', 'a1')) + self.assertEquals(self.client.smembers('a'), set(['a2'])) + self.assertEquals(self.client.smembers('b'), set(['b1', 'b2', 'a1'])) + + def test_spop(self): + # key is not set + self.assertEquals(self.client.spop('a'), None) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.spop, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + value = self.client.spop('a') + self.assert_(value in 'abc') + self.assertEquals(self.client.smembers('a'), set('abc') - set(value)) + + def test_srandmember(self): + # key is not set + self.assertEquals(self.client.srandmember('a'), None) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.srandmember, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assert_(self.client.srandmember('a') in 'abc') + + def test_srem(self): + # key is not set + self.assertEquals(self.client.srem('a', 'a'), False) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.srem, 'a', 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.srem('a', 'd'), False) + self.assertEquals(self.client.srem('a', 'b'), True) + self.assertEquals(self.client.smembers('a'), set('ac')) + + def test_sunion(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sunion, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['a1', 'b2', 'a3']) + self.assertEquals(self.client.sunion(['a', 'b']), + set(['a1', 'a2', 'a3', 'b2'])) + + def test_sunionstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sunionstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['a1', 'b2', 'a3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sunionstore('c', ['a', 'b']), 4) + self.assertEquals(self.client.smembers('c'), + set(['a1', 'a2', 'a3', 'b2'])) + + # SORTED SETS + def make_zset(self, name, d): + for k,v in d.items(): + self.client.zadd(name, k, v) + + def test_zadd(self): + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrange('a', 0, 3), ['a1', 'a2', 'a3']) + + def test_zcard(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zcard, 'a') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zcard('a'), 3) + + def test_zincrby(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zincrby, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zincrby('a', 'a2'), 3.0) + self.assertEquals(self.client.zincrby('a', 'a3', amount=5), 8.0) + self.assertEquals(self.client.zscore('a', 'a2'), 3.0) + self.assertEquals(self.client.zscore('a', 'a3'), 8.0) + + def test_zinter(self): + self.make_zset('a', {'a1': 1, 'a2': 1, 'a3': 1}) + self.make_zset('b', {'a1': 2, 'a3': 2, 'a4': 2}) + self.make_zset('c', {'a1': 6, 'a3': 5, 'a4': 4}) + + # sum, no weight + self.assert_(self.client.zinter('z', ['a', 'b', 'c'])) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 8), ('a1', 9)] + ) + + # max, no weight + self.assert_(self.client.zinter('z', ['a', 'b', 'c'], aggregate='MAX')) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 5), ('a1', 6)] + ) + + # with weight + self.assert_(self.client.zinter('z', {'a': 1, 'b': 2, 'c': 3})) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 20), ('a1', 23)] + ) + + + def test_zrange(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrange, 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrange('a', 0, 1), ['a1', 'a2']) + self.assertEquals(self.client.zrange('a', 1, 2), ['a2', 'a3']) + self.assertEquals(self.client.zrange('a', 0, 1, withscores=True), + [('a1', 1.0), ('a2', 2.0)]) + self.assertEquals(self.client.zrange('a', 1, 2, withscores=True), + [('a2', 2.0), ('a3', 3.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrange('b', 0, 1, withscores=True), []) + + + def test_zrangebyscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrangebyscore, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zrangebyscore('a', 2, 4), + ['a2', 'a3', 'a4']) + self.assertEquals(self.client.zrangebyscore('a', 2, 4, start=1, num=2), + ['a3', 'a4']) + self.assertEquals(self.client.zrangebyscore('a', 2, 4, withscores=True), + [('a2', 2.0), ('a3', 3.0), ('a4', 4.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrangebyscore('b', 0, 1, withscores=True), []) + + def test_zrank(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrank, 'a', 'a4') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zrank('a', 'a1'), 0) + self.assertEquals(self.client.zrank('a', 'a2'), 1) + self.assertEquals(self.client.zrank('a', 'a3'), 2) + self.assertEquals(self.client.zrank('a', 'a4'), 3) + self.assertEquals(self.client.zrank('a', 'a5'), 4) + # non-existent value in zset + self.assertEquals(self.client.zrank('a', 'a6'), None) + + def test_zrem(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrem, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrem('a', 'a2'), True) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a3']) + self.assertEquals(self.client.zrem('a', 'b'), False) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a3']) + + def test_zremrangebyscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zremrangebyscore, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zremrangebyscore('a', 2, 4), 3) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a5']) + self.assertEquals(self.client.zremrangebyscore('a', 2, 4), 0) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a5']) + + def test_zrevrange(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrevrange, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrevrange('a', 0, 1), ['a3', 'a2']) + self.assertEquals(self.client.zrevrange('a', 1, 2), ['a2', 'a1']) + self.assertEquals(self.client.zrevrange('a', 0, 1, withscores=True), + [('a3', 3.0), ('a2', 2.0)]) + self.assertEquals(self.client.zrevrange('a', 1, 2, withscores=True), + [('a2', 2.0), ('a1', 1.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrange('b', 0, 1, withscores=True), []) + + def test_zrevrank(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrevrank, 'a', 'a4') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 5, 'a2': 4, 'a3': 3, 'a4': 2, 'a5': 1}) + self.assertEquals(self.client.zrevrank('a', 'a1'), 0) + self.assertEquals(self.client.zrevrank('a', 'a2'), 1) + self.assertEquals(self.client.zrevrank('a', 'a3'), 2) + self.assertEquals(self.client.zrevrank('a', 'a4'), 3) + self.assertEquals(self.client.zrevrank('a', 'a5'), 4) + + def test_zscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zscore, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 0, 'a2': 1, 'a3': 2}) + self.assertEquals(self.client.zscore('a', 'a1'), 0.0) + self.assertEquals(self.client.zscore('a', 'a2'), 1.0) + # test a non-existant member + self.assertEquals(self.client.zscore('a', 'a4'), None) + + def test_zunion(self): + self.make_zset('a', {'a1': 1, 'a2': 1, 'a3': 1}) + self.make_zset('b', {'a1': 2, 'a3': 2, 'a4': 2}) + self.make_zset('c', {'a1': 6, 'a4': 5, 'a5': 4}) + + # sum, no weight + self.assert_(self.client.zunion('z', ['a', 'b', 'c'])) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 3), ('a5', 4), ('a4', 7), ('a1', 9)] + ) + + # max, no weight + self.assert_(self.client.zunion('z', ['a', 'b', 'c'], aggregate='MAX')) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 2), ('a5', 4), ('a4', 5), ('a1', 6)] + ) + + # with weight + self.assert_(self.client.zunion('z', {'a': 1, 'b': 2, 'c': 3})) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 5), ('a5', 12), ('a4', 19), ('a1', 23)] + ) + + + # HASHES + def make_hash(self, key, d): + for k,v in d.iteritems(): + self.client.hset(key, k, v) + + def test_hget_and_hset(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hget, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hget('a', 'a1'), None) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hget('a', 'a1'), '1') + self.assertEquals(self.client.hget('a', 'a2'), '2') + self.assertEquals(self.client.hget('a', 'a3'), '3') + self.assertEquals(self.client.hset('a', 'a2', 5), 0) + self.assertEquals(self.client.hget('a', 'a2'), '5') + self.assertEquals(self.client.hset('a', 'a4', 4), 1) + self.assertEquals(self.client.hget('a', 'a4'), '4') + # key inside of hash that doesn't exist returns null value + self.assertEquals(self.client.hget('a', 'b'), None) + + def test_hmset(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.hmset('foo', d)) + self.assertEqual(self.client.hgetall('foo'), d) + self.assertRaises(redis.ResponseError, self.client.hmset, 'foo', {}) + + def test_hmget(self): + d = {'a': 1, 'b': 2, 'c': 3} + self.assert_(self.client.hmset('foo', d)) + self.assertEqual(self.client.hmget('foo', ['a', 'b', 'c']), ['1', '2', '3']) + self.assertEqual(self.client.hmget('foo', ['a', 'c']), ['1', '3']) + + def test_hmget_empty(self): + self.assertEqual(self.client.hmget('foo', ['a', 'b']), [None, None]) + + def test_hmget_no_keys(self): + self.assertRaises(redis.ResponseError, self.client.hmget, 'foo', []) + + def test_hdel(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hdel, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hdel('a', 'a1'), False) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hget('a', 'a2'), '2') + self.assert_(self.client.hdel('a', 'a2')) + self.assertEquals(self.client.hget('a', 'a2'), None) + + def test_hexists(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hexists, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hexists('a', 'a1'), False) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hexists('a', 'a1'), True) + self.assertEquals(self.client.hexists('a', 'a4'), False) + self.client.hdel('a', 'a1') + self.assertEquals(self.client.hexists('a', 'a1'), False) + + def test_hgetall(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hgetall, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hgetall('a'), {}) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + remote_hash = self.client.hgetall('a') + self.assertEquals(h, remote_hash) + + def test_hincrby(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hincrby, 'a', 'a1') + del self.client['a'] + # no key should create the hash and incr the key's value to 1 + self.assertEquals(self.client.hincrby('a', 'a1'), 1) + # real logic + self.assertEquals(self.client.hincrby('a', 'a1'), 2) + self.assertEquals(self.client.hincrby('a', 'a1', amount=2), 4) + # negative values decrement + self.assertEquals(self.client.hincrby('a', 'a1', amount=-3), 1) + # hash that exists, but key that doesn't + self.assertEquals(self.client.hincrby('a', 'a2', amount=3), 3) + # finally a key that's not an int + self.client.hset('a', 'a3', 'foo') + self.assertRaises(redis.ResponseError, self.client.hincrby, 'a', 'a3') + + + def test_hkeys(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hkeys, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hkeys('a'), []) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + keys = h.keys() + keys.sort() + remote_keys = self.client.hkeys('a') + remote_keys.sort() + self.assertEquals(keys, remote_keys) + + def test_hlen(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hlen, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hlen('a'), 0) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hlen('a'), 3) + self.client.hdel('a', 'a3') + self.assertEquals(self.client.hlen('a'), 2) + + def test_hvals(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hvals, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hvals('a'), []) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + vals = h.values() + vals.sort() + remote_vals = self.client.hvals('a') + remote_vals.sort() + self.assertEquals(vals, remote_vals) + + # SORT + def test_sort_bad_key(self): + # key is not set + self.assertEquals(self.client.sort('a'), []) + # key is a string value + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sort, 'a') + del self.client['a'] + + def test_sort_basic(self): + self.make_list('a', '3214') + self.assertEquals(self.client.sort('a'), ['1', '2', '3', '4']) + + def test_sort_limited(self): + self.make_list('a', '3214') + self.assertEquals(self.client.sort('a', start=1, num=2), ['2', '3']) + + def test_sort_by(self): + self.client['score:1'] = 8 + self.client['score:2'] = 3 + self.client['score:3'] = 5 + self.make_list('a_values', '123') + self.assertEquals(self.client.sort('a_values', by='score:*'), + ['2', '3', '1']) + + def test_sort_get(self): + self.client['user:1'] = 'u1' + self.client['user:2'] = 'u2' + self.client['user:3'] = 'u3' + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', get='user:*'), + ['u1', 'u2', 'u3']) + + def test_sort_desc(self): + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', desc=True), ['3', '2', '1']) + + def test_sort_alpha(self): + self.make_list('a', 'ecbda') + self.assertEquals(self.client.sort('a', alpha=True), + ['a', 'b', 'c', 'd', 'e']) + + def test_sort_store(self): + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', store='sorted_values'), 3) + self.assertEquals(self.client.lrange('sorted_values', 0, 5), + ['1', '2', '3']) + + def test_sort_all_options(self): + self.client['user:1:username'] = 'zeus' + self.client['user:2:username'] = 'titan' + self.client['user:3:username'] = 'hermes' + self.client['user:4:username'] = 'hercules' + self.client['user:5:username'] = 'apollo' + self.client['user:6:username'] = 'athena' + self.client['user:7:username'] = 'hades' + self.client['user:8:username'] = 'dionysus' + + self.client['user:1:favorite_drink'] = 'yuengling' + self.client['user:2:favorite_drink'] = 'rum' + self.client['user:3:favorite_drink'] = 'vodka' + self.client['user:4:favorite_drink'] = 'milk' + self.client['user:5:favorite_drink'] = 'pinot noir' + self.client['user:6:favorite_drink'] = 'water' + self.client['user:7:favorite_drink'] = 'gin' + self.client['user:8:favorite_drink'] = 'apple juice' + + self.make_list('gods', '12345678') + num = self.client.sort('gods', start=2, num=4, by='user:*:username', + get='user:*:favorite_drink', desc=True, alpha=True, store='sorted') + self.assertEquals(num, 4) + self.assertEquals(self.client.lrange('sorted', 0, 10), + ['vodka', 'milk', 'gin', 'apple juice']) + + # PUBSUB + def test_pubsub(self): + # create a new client to not polute the existing one + r = self.get_client() + channels = ('a1', 'a2', 'a3') + for c in channels: + r.subscribe(c) + channels_to_publish_to = channels + ('a4',) + messages_per_channel = 4 + def publish(): + for i in range(messages_per_channel): + for c in channels_to_publish_to: + self.client.publish(c, 'a message') + time.sleep(0.01) + t = threading.Thread(target=publish) + messages = [] + # should receive a message for each subscribe command + # plus a message for each iteration of the loop * num channels + num_messages_to_expect = len(channels) + \ + (messages_per_channel*len(channels)) + thread_started = False + for msg in r.listen(): + if not thread_started: + # start the thread delayed so that we are intermingling + # publish commands with pulling messsages off the socket + # with subscribe + thread_started = True + t.start() + messages.append(msg) + if len(messages) == num_messages_to_expect: + break + sent_types, sent_channels = {}, {} + for msg_type, channel, _ in messages: + sent_types.setdefault(msg_type, 0) + sent_types[msg_type] += 1 + if msg_type == 'message': + sent_channels.setdefault(channel, 0) + sent_channels[channel] += 1 + for channel in channels: + self.assertEquals(sent_channels[channel], messages_per_channel) + self.assert_(channel in channels) + self.assertEquals(sent_types['subscribe'], len(channels)) + self.assertEquals(sent_types['message'], + len(channels) * messages_per_channel) + + ## BINARY SAFE + # TODO add more tests + def test_binary_get_set(self): + self.assertTrue(self.client.set(' foo bar ', '123')) + self.assertEqual(self.client.get(' foo bar '), '123') + + self.assertTrue(self.client.set(' foo\r\nbar\r\n ', '456')) + self.assertEqual(self.client.get(' foo\r\nbar\r\n '), '456') + + self.assertTrue(self.client.set(' \r\n\t\x07\x13 ', '789')) + self.assertEqual(self.client.get(' \r\n\t\x07\x13 '), '789') + + self.assertEqual(sorted(self.client.keys('*')), [' \r\n\t\x07\x13 ', ' foo\r\nbar\r\n ', ' foo bar ']) + + self.assertTrue(self.client.delete(' foo bar ')) + self.assertTrue(self.client.delete(' foo\r\nbar\r\n ')) + self.assertTrue(self.client.delete(' \r\n\t\x07\x13 ')) + + def test_binary_lists(self): + mapping = {'foo bar': '123', + 'foo\r\nbar\r\n': '456', + 'foo\tbar\x07': '789', + } + # fill in lists + for key, value in mapping.iteritems(): + for c in value: + self.assertTrue(self.client.rpush(key, c)) + + # check that KEYS returns all the keys as they are + self.assertEqual(sorted(self.client.keys('*')), sorted(mapping.keys())) + + # check that it is possible to get list content by key name + for key in mapping.keys(): + self.assertEqual(self.client.lrange(key, 0, -1), list(mapping[key])) diff --git a/vendor/tornado/MANIFEST.in b/vendor/tornado/MANIFEST.in new file mode 100644 index 000000000000..c7a51e40946f --- /dev/null +++ b/vendor/tornado/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include demos *.py *.yaml *.html *.css *.png *.js *.xml *.sql README +include tornado/epoll.c diff --git a/vendor/tornado/README b/vendor/tornado/README new file mode 100644 index 000000000000..d504022243d1 --- /dev/null +++ b/vendor/tornado/README @@ -0,0 +1,27 @@ +Tornado +======= +Tornado is an open source version of the scalable, non-blocking web server +and and tools that power FriendFeed. Documentation and downloads are +available at http://www.tornadoweb.org/ + +Tornado is licensed under the Apache Licence, Version 2.0 +(http://www.apache.org/licenses/LICENSE-2.0.html). + +Installation +============ +To install: + + python setup.py build + sudo python setup.py install + +Tornado has been tested on Python 2.5 and 2.6. To use all of the features +of Tornado, you need to have PycURL and a JSON library like simplejson +installed. + +On Mac OS X, you can install the packages with: + + sudo easy_install setuptools pycurl==7.16.2.1 simplejson + +On Ubuntu Linux, you can install the packages with: + + sudo apt-get install python-pycurl python-simplejson diff --git a/vendor/tornado/demos/appengine/README b/vendor/tornado/demos/appengine/README new file mode 100644 index 000000000000..e4aead670194 --- /dev/null +++ b/vendor/tornado/demos/appengine/README @@ -0,0 +1,48 @@ +Running the Tornado AppEngine example +===================================== +This example is designed to run in Google AppEngine, so there are a couple +of steps to get it running. You can download the Google AppEngine Python +development environment at http://code.google.com/appengine/downloads.html. + +1. Link or copy the tornado code directory into this directory: + + ln -s ../../tornado tornado + + AppEngine doesn't use the Python modules installed on this machine. + You need to have the 'tornado' module copied or linked for AppEngine + to find it. + +3. Install and run dev_appserver + + If you don't already have the App Engine SDK, download it from + http://code.google.com/appengine/downloads.html + + To start the tornado demo, run the dev server on this directory: + + dev_appserver.py . + +4. Visit http://localhost:8080/ in your browser + + If you sign in as an administrator, you will be able to create and + edit blog posts. If you sign in as anybody else, you will only see + the existing blog posts. + + +If you want to deploy the blog in production: + +1. Register a new appengine application and put its id in app.yaml + + First register a new application at http://appengine.google.com/. + Then edit app.yaml in this directory and change the "application" + setting from "tornado-appenginge" to your new application id. + +2. Deploy to App Engine + + If you registered an application id, you can now upload your new + Tornado blog by running this command: + + appcfg update . + + After that, visit application_id.appspot.com, where application_id + is the application you registered. + diff --git a/vendor/tornado/demos/appengine/app.yaml b/vendor/tornado/demos/appengine/app.yaml new file mode 100644 index 000000000000..2d00c586ddf0 --- /dev/null +++ b/vendor/tornado/demos/appengine/app.yaml @@ -0,0 +1,11 @@ +application: tornado-appengine +version: 1 +runtime: python +api_version: 1 + +handlers: +- url: /static/ + static_dir: static + +- url: /.* + script: blog.py diff --git a/vendor/tornado/demos/appengine/blog.py b/vendor/tornado/demos/appengine/blog.py new file mode 100644 index 000000000000..ccaabd539267 --- /dev/null +++ b/vendor/tornado/demos/appengine/blog.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import markdown +import os.path +import re +import tornado.web +import tornado.wsgi +import unicodedata +import wsgiref.handlers + +from google.appengine.api import users +from google.appengine.ext import db + + +class Entry(db.Model): + """A single blog entry.""" + author = db.UserProperty() + title = db.StringProperty(required=True) + slug = db.StringProperty(required=True) + markdown = db.TextProperty(required=True) + html = db.TextProperty(required=True) + published = db.DateTimeProperty(auto_now_add=True) + updated = db.DateTimeProperty(auto_now=True) + + +def administrator(method): + """Decorate with this method to restrict to site admins.""" + @functools.wraps(method) + def wrapper(self, *args, **kwargs): + if not self.current_user: + if self.request.method == "GET": + self.redirect(self.get_login_url()) + return + raise tornado.web.HTTPError(403) + elif not self.current_user.administrator: + if self.request.method == "GET": + self.redirect("/") + return + raise tornado.web.HTTPError(403) + else: + return method(self, *args, **kwargs) + return wrapper + + +class BaseHandler(tornado.web.RequestHandler): + """Implements Google Accounts authentication methods.""" + def get_current_user(self): + user = users.get_current_user() + if user: user.administrator = users.is_current_user_admin() + return user + + def get_login_url(self): + return users.create_login_url(self.request.uri) + + def render_string(self, template_name, **kwargs): + # Let the templates access the users module to generate login URLs + return tornado.web.RequestHandler.render_string( + self, template_name, users=users, **kwargs) + + +class HomeHandler(BaseHandler): + def get(self): + entries = db.Query(Entry).order('-published').fetch(limit=5) + if not entries: + if not self.current_user or self.current_user.administrator: + self.redirect("/compose") + return + self.render("home.html", entries=entries) + + +class EntryHandler(BaseHandler): + def get(self, slug): + entry = db.Query(Entry).filter("slug =", slug).get() + if not entry: raise tornado.web.HTTPError(404) + self.render("entry.html", entry=entry) + + +class ArchiveHandler(BaseHandler): + def get(self): + entries = db.Query(Entry).order('-published') + self.render("archive.html", entries=entries) + + +class FeedHandler(BaseHandler): + def get(self): + entries = db.Query(Entry).order('-published').fetch(limit=10) + self.set_header("Content-Type", "application/atom+xml") + self.render("feed.xml", entries=entries) + + +class ComposeHandler(BaseHandler): + @administrator + def get(self): + key = self.get_argument("key", None) + entry = Entry.get(key) if key else None + self.render("compose.html", entry=entry) + + @administrator + def post(self): + key = self.get_argument("key", None) + if key: + entry = Entry.get(key) + entry.title = self.get_argument("title") + entry.markdown = self.get_argument("markdown") + entry.html = markdown.markdown(self.get_argument("markdown")) + else: + title = self.get_argument("title") + slug = unicodedata.normalize("NFKD", title).encode( + "ascii", "ignore") + slug = re.sub(r"[^\w]+", " ", slug) + slug = "-".join(slug.lower().strip().split()) + if not slug: slug = "entry" + while True: + existing = db.Query(Entry).filter("slug =", slug).get() + if not existing or str(existing.key()) == key: + break + slug += "-2" + entry = Entry( + author=self.current_user, + title=title, + slug=slug, + markdown=self.get_argument("markdown"), + html=markdown.markdown(self.get_argument("markdown")), + ) + entry.put() + self.redirect("/entry/" + entry.slug) + + +class EntryModule(tornado.web.UIModule): + def render(self, entry): + return self.render_string("modules/entry.html", entry=entry) + + +settings = { + "blog_title": u"Tornado Blog", + "template_path": os.path.join(os.path.dirname(__file__), "templates"), + "ui_modules": {"Entry": EntryModule}, + "xsrf_cookies": True, +} +application = tornado.wsgi.WSGIApplication([ + (r"/", HomeHandler), + (r"/archive", ArchiveHandler), + (r"/feed", FeedHandler), + (r"/entry/([^/]+)", EntryHandler), + (r"/compose", ComposeHandler), +], **settings) + + +def main(): + wsgiref.handlers.CGIHandler().run(application) + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/demos/appengine/markdown.py b/vendor/tornado/demos/appengine/markdown.py new file mode 100644 index 000000000000..59ba731bf0c5 --- /dev/null +++ b/vendor/tornado/demos/appengine/markdown.py @@ -0,0 +1,1877 @@ +#!/usr/bin/env python +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
                                and as well). + +Module usage: + + >>> import markdown2 + >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` + u'

                                boo!

                                \n' + + >>> markdowner = Markdown() + >>> markdowner.convert("*boo!*") + u'

                                boo!

                                \n' + >>> markdowner.convert("**boom!**") + u'

                                boom!

                                \n' + +This implementation of Markdown implements the full "core" syntax plus a +number of extras (e.g., code syntax coloring, footnotes) as described on +. +""" + +cmdln_desc = """A fast and complete Python implementation of Markdown, a +text-to-HTML conversion tool for web writers. +""" + +# Dev Notes: +# - There is already a Python markdown processor +# (http://www.freewisdom.org/projects/python-markdown/). +# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm +# not yet sure if there implications with this. Compare 'pydoc sre' +# and 'perldoc perlre'. + +__version_info__ = (1, 0, 1, 14) # first three nums match Markdown.pl +__version__ = '1.0.1.14' +__author__ = "Trent Mick" + +import os +import sys +from pprint import pprint +import re +import logging +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import optparse +from random import random +import codecs + + + +#---- Python version compat + +if sys.version_info[:2] < (2,4): + from sets import Set as set + def reversed(sequence): + for i in sequence[::-1]: + yield i + def _unicode_decode(s, encoding, errors='xmlcharrefreplace'): + return unicode(s, encoding, errors) +else: + def _unicode_decode(s, encoding, errors='strict'): + return s.decode(encoding, errors) + + +#---- globals + +DEBUG = False +log = logging.getLogger("markdown") + +DEFAULT_TAB_WIDTH = 4 + +# Table of hash values for escaped characters: +def _escape_hash(s): + # Lame attempt to avoid possible collision with someone actually + # using the MD5 hexdigest of one of these chars in there text. + # Other ideas: random.random(), uuid.uuid() + #return md5(s).hexdigest() # Markdown.pl effectively does this. + return 'md5-'+md5(s).hexdigest() +g_escape_table = dict([(ch, _escape_hash(ch)) + for ch in '\\`*_{}[]()>#+-.!']) + + + +#---- exceptions + +class MarkdownError(Exception): + pass + + + +#---- public api + +def markdown_path(path, encoding="utf-8", + html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + text = codecs.open(path, 'r', encoding).read() + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +class Markdown(object): + # The dict of "extras" to enable in processing -- a mapping of + # extra name to argument for the extra. Most extras do not have an + # argument, in which case the value is None. + # + # This can be set via (a) subclassing and (b) the constructor + # "extras" argument. + extras = None + + urls = None + titles = None + html_blocks = None + html_spans = None + html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py + + # Used to track when we're inside an ordered or unordered list + # (see _ProcessListItems() for details): + list_level = 0 + + _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) + + def __init__(self, html4tags=False, tab_width=4, safe_mode=None, + extras=None, link_patterns=None, use_file_vars=False): + if html4tags: + self.empty_element_suffix = ">" + else: + self.empty_element_suffix = " />" + self.tab_width = tab_width + + # For compatibility with earlier markdown2.py and with + # markdown.py's safe_mode being a boolean, + # safe_mode == True -> "replace" + if safe_mode is True: + self.safe_mode = "replace" + else: + self.safe_mode = safe_mode + + if self.extras is None: + self.extras = {} + elif not isinstance(self.extras, dict): + self.extras = dict([(e, None) for e in self.extras]) + if extras: + if not isinstance(extras, dict): + extras = dict([(e, None) for e in extras]) + self.extras.update(extras) + assert isinstance(self.extras, dict) + self._instance_extras = self.extras.copy() + self.link_patterns = link_patterns + self.use_file_vars = use_file_vars + self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) + + def reset(self): + self.urls = {} + self.titles = {} + self.html_blocks = {} + self.html_spans = {} + self.list_level = 0 + self.extras = self._instance_extras.copy() + if "footnotes" in self.extras: + self.footnotes = {} + self.footnote_ids = [] + + def convert(self, text): + """Convert the given text.""" + # Main function. The order in which other subs are called here is + # essential. Link and image substitutions need to happen before + # _EscapeSpecialChars(), so that any *'s or _'s in the + # and tags get encoded. + + # Clear the global hashes. If we don't clear these, you get conflicts + # from other articles when generating a page which contains more than + # one article (e.g. an index page that shows the N most recent + # articles): + self.reset() + + if not isinstance(text, unicode): + #TODO: perhaps shouldn't presume UTF-8 for string input? + text = unicode(text, 'utf-8') + + if self.use_file_vars: + # Look for emacs-style file variable hints. + emacs_vars = self._get_emacs_vars(text) + if "markdown-extras" in emacs_vars: + splitter = re.compile("[ ,]+") + for e in splitter.split(emacs_vars["markdown-extras"]): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + self.extras[ename] = earg + + # Standardize line endings: + text = re.sub("\r\n|\r", "\n", text) + + # Make sure $text ends with a couple of newlines: + text += "\n\n" + + # Convert all tabs to spaces. + text = self._detab(text) + + # Strip any lines consisting only of spaces and tabs. + # This makes subsequent regexen easier to write, because we can + # match consecutive blank lines with /\n+/ instead of something + # contorted like /[ \t]*\n+/ . + text = self._ws_only_line_re.sub("", text) + + if self.safe_mode: + text = self._hash_html_spans(text) + + # Turn block-level HTML blocks into hash entries + text = self._hash_html_blocks(text, raw=True) + + # Strip link definitions, store in hashes. + if "footnotes" in self.extras: + # Must do footnotes first because an unlucky footnote defn + # looks like a link defn: + # [^4]: this "looks like a link defn" + text = self._strip_footnote_definitions(text) + text = self._strip_link_definitions(text) + + text = self._run_block_gamut(text) + + if "footnotes" in self.extras: + text = self._add_footnotes(text) + + text = self._unescape_special_chars(text) + + if self.safe_mode: + text = self._unhash_html_spans(text) + + text += "\n" + return text + + _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) + # This regular expression is intended to match blocks like this: + # PREFIX Local Variables: SUFFIX + # PREFIX mode: Tcl SUFFIX + # PREFIX End: SUFFIX + # Some notes: + # - "[ \t]" is used instead of "\s" to specifically exclude newlines + # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does + # not like anything other than Unix-style line terminators. + _emacs_local_vars_pat = re.compile(r"""^ + (?P(?:[^\r\n|\n|\r])*?) + [\ \t]*Local\ Variables:[\ \t]* + (?P.*?)(?:\r\n|\n|\r) + (?P.*?\1End:) + """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) + + def _get_emacs_vars(self, text): + """Return a dictionary of emacs-style local variables. + + Parsing is done loosely according to this spec (and according to + some in-practice deviations from this): + http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables + """ + emacs_vars = {} + SIZE = pow(2, 13) # 8kB + + # Search near the start for a '-*-'-style one-liner of variables. + head = text[:SIZE] + if "-*-" in head: + match = self._emacs_oneliner_vars_pat.search(head) + if match: + emacs_vars_str = match.group(1) + assert '\n' not in emacs_vars_str + emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') + if s.strip()] + if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: + # While not in the spec, this form is allowed by emacs: + # -*- Tcl -*- + # where the implied "variable" is "mode". This form + # is only allowed if there are no other variables. + emacs_vars["mode"] = emacs_var_strs[0].strip() + else: + for emacs_var_str in emacs_var_strs: + try: + variable, value = emacs_var_str.strip().split(':', 1) + except ValueError: + log.debug("emacs variables error: malformed -*- " + "line: %r", emacs_var_str) + continue + # Lowercase the variable name because Emacs allows "Mode" + # or "mode" or "MoDe", etc. + emacs_vars[variable.lower()] = value.strip() + + tail = text[-SIZE:] + if "Local Variables" in tail: + match = self._emacs_local_vars_pat.search(tail) + if match: + prefix = match.group("prefix") + suffix = match.group("suffix") + lines = match.group("content").splitlines(0) + #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ + # % (prefix, suffix, match.group("content"), lines) + + # Validate the Local Variables block: proper prefix and suffix + # usage. + for i, line in enumerate(lines): + if not line.startswith(prefix): + log.debug("emacs variables error: line '%s' " + "does not use proper prefix '%s'" + % (line, prefix)) + return {} + # Don't validate suffix on last line. Emacs doesn't care, + # neither should we. + if i != len(lines)-1 and not line.endswith(suffix): + log.debug("emacs variables error: line '%s' " + "does not use proper suffix '%s'" + % (line, suffix)) + return {} + + # Parse out one emacs var per line. + continued_for = None + for line in lines[:-1]: # no var on the last line ("PREFIX End:") + if prefix: line = line[len(prefix):] # strip prefix + if suffix: line = line[:-len(suffix)] # strip suffix + line = line.strip() + if continued_for: + variable = continued_for + if line.endswith('\\'): + line = line[:-1].rstrip() + else: + continued_for = None + emacs_vars[variable] += ' ' + line + else: + try: + variable, value = line.split(':', 1) + except ValueError: + log.debug("local variables error: missing colon " + "in local variables entry: '%s'" % line) + continue + # Do NOT lowercase the variable name, because Emacs only + # allows "mode" (and not "Mode", "MoDe", etc.) in this block. + value = value.strip() + if value.endswith('\\'): + value = value[:-1].rstrip() + continued_for = variable + else: + continued_for = None + emacs_vars[variable] = value + + # Unquote values. + for var, val in emacs_vars.items(): + if len(val) > 1 and (val.startswith('"') and val.endswith('"') + or val.startswith('"') and val.endswith('"')): + emacs_vars[var] = val[1:-1] + + return emacs_vars + + # Cribbed from a post by Bart Lateur: + # + _detab_re = re.compile(r'(.*?)\t', re.M) + def _detab_sub(self, match): + g1 = match.group(1) + return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width)) + def _detab(self, text): + r"""Remove (leading?) tabs from a file. + + >>> m = Markdown() + >>> m._detab("\tfoo") + ' foo' + >>> m._detab(" \tfoo") + ' foo' + >>> m._detab("\t foo") + ' foo' + >>> m._detab(" foo") + ' foo' + >>> m._detab(" foo\n\tbar\tblam") + ' foo\n bar blam' + """ + if '\t' not in text: + return text + return self._detab_re.subn(self._detab_sub, text)[0] + + _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' + _strict_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_a, + re.X | re.M) + + _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' + _liberal_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + .* # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_b, + re.X | re.M) + + def _hash_html_block_sub(self, match, raw=False): + html = match.group(1) + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + return "\n\n" + key + "\n\n" + + def _hash_html_blocks(self, text, raw=False): + """Hashify HTML blocks + + We only want to do this for block-level HTML tags, such as headers, + lists, and tables. That's because we still want to wrap

                                s around + "paragraphs" that are wrapped in non-block-level tags, such as anchors, + phrase emphasis, and spans. The list of tags we're looking for is + hard-coded. + + @param raw {boolean} indicates if these are raw HTML blocks in + the original source. It makes a difference in "safe" mode. + """ + if '<' not in text: + return text + + # Pass `raw` value into our calls to self._hash_html_block_sub. + hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) + + # First, look for nested blocks, e.g.: + #

                                + #
                                + # tags for inner block must be indented. + #
                                + #
                                + # + # The outermost tags must start at the left margin for this to match, and + # the inner nested divs must be indented. + # We need to do this before the next, more liberal match, because the next + # match will start at the first `
                                ` and stop at the first `
                                `. + text = self._strict_tag_block_re.sub(hash_html_block_sub, text) + + # Now match more liberally, simply from `\n` to `\n` + text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) + + # Special case just for
                                . It was easier to make a special + # case than to make the other regex more complicated. + if "", start_idx) + 3 + except ValueError, ex: + break + + # Start position for next comment block search. + start = end_idx + + # Validate whitespace before comment. + if start_idx: + # - Up to `tab_width - 1` spaces before start_idx. + for i in range(self.tab_width - 1): + if text[start_idx - 1] != ' ': + break + start_idx -= 1 + if start_idx == 0: + break + # - Must be preceded by 2 newlines or hit the start of + # the document. + if start_idx == 0: + pass + elif start_idx == 1 and text[0] == '\n': + start_idx = 0 # to match minute detail of Markdown.pl regex + elif text[start_idx-2:start_idx] == '\n\n': + pass + else: + break + + # Validate whitespace after comment. + # - Any number of spaces and tabs. + while end_idx < len(text): + if text[end_idx] not in ' \t': + break + end_idx += 1 + # - Must be following by 2 newlines or hit end of text. + if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): + continue + + # Escape and hash (must match `_hash_html_block_sub`). + html = text[start_idx:end_idx] + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] + + if "xml" in self.extras: + # Treat XML processing instructions and namespaced one-liner + # tags as if they were block HTML tags. E.g., if standalone + # (i.e. are their own paragraph), the following do not get + # wrapped in a

                                tag: + # + # + # + _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) + text = _xml_oneliner_re.sub(hash_html_block_sub, text) + + return text + + def _strip_link_definitions(self, text): + # Strips link definitions from text, stores the URLs and titles in + # hash references. + less_than_tab = self.tab_width - 1 + + # Link defs are in the form: + # [id]: url "optional title" + _link_def_re = re.compile(r""" + ^[ ]{0,%d}\[(.+)\]: # id = \1 + [ \t]* + \n? # maybe *one* newline + [ \t]* + ? # url = \2 + [ \t]* + (?: + \n? # maybe one newline + [ \t]* + (?<=\s) # lookbehind for whitespace + ['"(] + ([^\n]*) # title = \3 + ['")] + [ \t]* + )? # title is optional + (?:\n+|\Z) + """ % less_than_tab, re.X | re.M | re.U) + return _link_def_re.sub(self._extract_link_def_sub, text) + + def _extract_link_def_sub(self, match): + id, url, title = match.groups() + key = id.lower() # Link IDs are case-insensitive + self.urls[key] = self._encode_amps_and_angles(url) + if title: + self.titles[key] = title.replace('"', '"') + return "" + + def _extract_footnote_def_sub(self, match): + id, text = match.groups() + text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() + normed_id = re.sub(r'\W', '-', id) + # Ensure footnote text ends with a couple newlines (for some + # block gamut matches). + self.footnotes[normed_id] = text + "\n\n" + return "" + + def _strip_footnote_definitions(self, text): + """A footnote definition looks like this: + + [^note-id]: Text of the note. + + May include one or more indented paragraphs. + + Where, + - The 'note-id' can be pretty much anything, though typically it + is the number of the footnote. + - The first paragraph may start on the next line, like so: + + [^note-id]: + Text of the note. + """ + less_than_tab = self.tab_width - 1 + footnote_def_re = re.compile(r''' + ^[ ]{0,%d}\[\^(.+)\]: # id = \1 + [ \t]* + ( # footnote text = \2 + # First line need not start with the spaces. + (?:\s*.*\n+) + (?: + (?:[ ]{%d} | \t) # Subsequent lines must be indented. + .*\n+ + )* + ) + # Lookahead for non-space at line-start, or end of doc. + (?:(?=^[ ]{0,%d}\S)|\Z) + ''' % (less_than_tab, self.tab_width, self.tab_width), + re.X | re.M) + return footnote_def_re.sub(self._extract_footnote_def_sub, text) + + + _hr_res = [ + re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M), + ] + + def _run_block_gamut(self, text): + # These are all the transformations that form block-level + # tags like paragraphs, headers, and list items. + + text = self._do_headers(text) + + # Do Horizontal Rules: + hr = "\n tags around block-level tags. + text = self._hash_html_blocks(text) + + text = self._form_paragraphs(text) + + return text + + def _pyshell_block_sub(self, match): + lines = match.group(0).splitlines(0) + _dedentlines(lines) + indent = ' ' * self.tab_width + s = ('\n' # separate from possible cuddled paragraph + + indent + ('\n'+indent).join(lines) + + '\n\n') + return s + + def _prepare_pyshell_blocks(self, text): + """Ensure that Python interactive shell sessions are put in + code blocks -- even if not properly indented. + """ + if ">>>" not in text: + return text + + less_than_tab = self.tab_width - 1 + _pyshell_block_re = re.compile(r""" + ^([ ]{0,%d})>>>[ ].*\n # first line + ^(\1.*\S+.*\n)* # any number of subsequent lines + ^\n # ends with a blank line + """ % less_than_tab, re.M | re.X) + + return _pyshell_block_re.sub(self._pyshell_block_sub, text) + + def _run_span_gamut(self, text): + # These are all the transformations that occur *within* block-level + # tags like paragraphs, headers, and list items. + + text = self._do_code_spans(text) + + text = self._escape_special_chars(text) + + # Process anchor and image tags. + text = self._do_links(text) + + # Make links out of things like `` + # Must come after _do_links(), because you can use < and > + # delimiters in inline links like [this](). + text = self._do_auto_links(text) + + if "link-patterns" in self.extras: + text = self._do_link_patterns(text) + + text = self._encode_amps_and_angles(text) + + text = self._do_italics_and_bold(text) + + # Do hard breaks: + text = re.sub(r" {2,}\n", " + | + # auto-link (e.g., ) + <\w+[^>]*> + | + # comment + | + <\?.*?\?> # processing instruction + ) + """, re.X) + + def _escape_special_chars(self, text): + # Python markdown note: the HTML tokenization here differs from + # that in Markdown.pl, hence the behaviour for subtle cases can + # differ (I believe the tokenizer here does a better job because + # it isn't susceptible to unmatched '<' and '>' in HTML tags). + # Note, however, that '>' is not allowed in an auto-link URL + # here. + escaped = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup: + # Within tags/HTML-comments/auto-links, encode * and _ + # so they don't conflict with their use in Markdown for + # italics and strong. We're replacing each such + # character with its corresponding MD5 checksum value; + # this is likely overkill, but it should prevent us from + # colliding with the escape values by accident. + escaped.append(token.replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + else: + escaped.append(self._encode_backslash_escapes(token)) + is_html_markup = not is_html_markup + return ''.join(escaped) + + def _hash_html_spans(self, text): + # Used for safe_mode. + + def _is_auto_link(s): + if ':' in s and self._auto_link_re.match(s): + return True + elif '@' in s and self._auto_email_link_re.match(s): + return True + return False + + tokens = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup and not _is_auto_link(token): + sanitized = self._sanitize_html(token) + key = _hash_text(sanitized) + self.html_spans[key] = sanitized + tokens.append(key) + else: + tokens.append(token) + is_html_markup = not is_html_markup + return ''.join(tokens) + + def _unhash_html_spans(self, text): + for key, sanitized in self.html_spans.items(): + text = text.replace(key, sanitized) + return text + + def _sanitize_html(self, s): + if self.safe_mode == "replace": + return self.html_removed_text + elif self.safe_mode == "escape": + replacements = [ + ('&', '&'), + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + s = s.replace(before, after) + return s + else: + raise MarkdownError("invalid value for 'safe_mode': %r (must be " + "'escape' or 'replace')" % self.safe_mode) + + _tail_of_inline_link_re = re.compile(r''' + # Match tail of: [text](/url/) or [text](/url/ "title") + \( # literal paren + [ \t]* + (?P # \1 + <.*?> + | + .*? + ) + [ \t]* + ( # \2 + (['"]) # quote char = \3 + (?P.*?) + \3 # matching quote + )? # title is optional + \) + ''', re.X | re.S) + _tail_of_reference_link_re = re.compile(r''' + # Match tail of: [text][id] + [ ]? # one optional space + (?:\n[ ]*)? # one optional newline followed by spaces + \[ + (?P<id>.*?) + \] + ''', re.X | re.S) + + def _do_links(self, text): + """Turn Markdown link shortcuts into XHTML <a> and <img> tags. + + This is a combination of Markdown.pl's _DoAnchors() and + _DoImages(). They are done together because that simplified the + approach. It was necessary to use a different approach than + Markdown.pl because of the lack of atomic matching support in + Python's regex engine used in $g_nested_brackets. + """ + MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 + + # `anchor_allowed_pos` is used to support img links inside + # anchors, but not anchors inside anchors. An anchor's start + # pos must be `>= anchor_allowed_pos`. + anchor_allowed_pos = 0 + + curr_pos = 0 + while True: # Handle the next link. + # The next '[' is the start of: + # - an inline anchor: [text](url "title") + # - a reference anchor: [text][id] + # - an inline img: ![text](url "title") + # - a reference img: ![text][id] + # - a footnote ref: [^id] + # (Only if 'footnotes' extra enabled) + # - a footnote defn: [^id]: ... + # (Only if 'footnotes' extra enabled) These have already + # been stripped in _strip_footnote_definitions() so no + # need to watch for them. + # - a link definition: [id]: url "title" + # These have already been stripped in + # _strip_link_definitions() so no need to watch for them. + # - not markup: [...anything else... + try: + start_idx = text.index('[', curr_pos) + except ValueError: + break + text_length = len(text) + + # Find the matching closing ']'. + # Markdown.pl allows *matching* brackets in link text so we + # will here too. Markdown.pl *doesn't* currently allow + # matching brackets in img alt text -- we'll differ in that + # regard. + bracket_depth = 0 + for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, + text_length)): + ch = text[p] + if ch == ']': + bracket_depth -= 1 + if bracket_depth < 0: + break + elif ch == '[': + bracket_depth += 1 + else: + # Closing bracket not found within sentinel length. + # This isn't markup. + curr_pos = start_idx + 1 + continue + link_text = text[start_idx+1:p] + + # Possibly a footnote ref? + if "footnotes" in self.extras and link_text.startswith("^"): + normed_id = re.sub(r'\W', '-', link_text[1:]) + if normed_id in self.footnotes: + self.footnote_ids.append(normed_id) + result = '<sup class="footnote-ref" id="fnref-%s">' \ + '<a href="#fn-%s">%s</a></sup>' \ + % (normed_id, normed_id, len(self.footnote_ids)) + text = text[:start_idx] + result + text[p+1:] + else: + # This id isn't defined, leave the markup alone. + curr_pos = p+1 + continue + + # Now determine what this is by the remainder. + p += 1 + if p == text_length: + return text + + # Inline anchor or img? + if text[p] == '(': # attempt at perf improvement + match = self._tail_of_inline_link_re.match(text, p) + if match: + # Handle an inline anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + + url, title = match.group("url"), match.group("title") + if url and url[0] == '<': + url = url[1:-1] # '<url>' -> 'url' + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + if title: + title_str = ' title="%s"' \ + % title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) \ + .replace('"', '"') + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url, link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + continue + + # Reference anchor or img? + else: + match = self._tail_of_reference_link_re.match(text, p) + if match: + # Handle a reference-style anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + link_id = match.group("id").lower() + if not link_id: + link_id = link_text.lower() # for links like [this][] + if link_id in self.urls: + url = self.urls[link_id] + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title = self.titles.get(link_id) + if title: + title = title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title_str = ' title="%s"' % title + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url, link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result = '<a href="%s"%s>%s</a>' \ + % (url, title_str, link_text) + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + else: + # This id isn't defined, leave the markup alone. + curr_pos = match.end() + continue + + # Otherwise, it isn't markup. + curr_pos = start_idx + 1 + + return text + + + _setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M) + def _setext_h_sub(self, match): + n = {"=": 1, "-": 2}[match.group(2)[0]] + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(1)), n) + + _atx_h_re = re.compile(r''' + ^(\#{1,6}) # \1 = string of #'s + [ \t]* + (.+?) # \2 = Header text + [ \t]* + (?<!\\) # ensure not an escaped trailing '#' + \#* # optional closing #'s (not counted) + \n+ + ''', re.X | re.M) + def _atx_h_sub(self, match): + n = len(match.group(1)) + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(2)), n) + + def _do_headers(self, text): + # Setext-style headers: + # Header 1 + # ======== + # + # Header 2 + # -------- + text = self._setext_h_re.sub(self._setext_h_sub, text) + + # atx-style headers: + # # Header 1 + # ## Header 2 + # ## Header 2 with closing hashes ## + # ... + # ###### Header 6 + text = self._atx_h_re.sub(self._atx_h_sub, text) + + return text + + + _marker_ul_chars = '*+-' + _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars + _marker_ul = '(?:[%s])' % _marker_ul_chars + _marker_ol = r'(?:\d+\.)' + + def _list_sub(self, match): + lst = match.group(1) + lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" + result = self._process_list_items(lst) + if self.list_level: + return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) + else: + return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) + + def _do_lists(self, text): + # Form HTML ordered (numbered) and unordered (bulleted) lists. + + for marker_pat in (self._marker_ul, self._marker_ol): + # Re-usable pattern to match any entire ul or ol list: + less_than_tab = self.tab_width - 1 + whole_list = r''' + ( # \1 = whole list + ( # \2 + [ ]{0,%d} + (%s) # \3 = first list item marker + [ \t]+ + ) + (?:.+?) + ( # \4 + \Z + | + \n{2,} + (?=\S) + (?! # Negative lookahead for another list item marker + [ \t]* + %s[ \t]+ + ) + ) + ) + ''' % (less_than_tab, marker_pat, marker_pat) + + # We use a different prefix before nested lists than top-level lists. + # See extended comment in _process_list_items(). + # + # Note: There's a bit of duplication here. My original implementation + # created a scalar regex pattern as the conditional result of the test on + # $g_list_level, and then only ran the $text =~ s{...}{...}egmx + # substitution once, using the scalar as the pattern. This worked, + # everywhere except when running under MT on my hosting account at Pair + # Networks. There, this caused all rebuilds to be killed by the reaper (or + # perhaps they crashed, but that seems incredibly unlikely given that the + # same script on the same server ran fine *except* under MT. I've spent + # more time trying to figure out why this is happening than I'd like to + # admit. My only guess, backed up by the fact that this workaround works, + # is that Perl optimizes the substition when it can figure out that the + # pattern will never change, and when this optimization isn't on, we run + # afoul of the reaper. Thus, the slightly redundant code to that uses two + # static s/// patterns rather than one conditional pattern. + + if self.list_level: + sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S) + text = sub_list_re.sub(self._list_sub, text) + else: + list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, + re.X | re.M | re.S) + text = list_re.sub(self._list_sub, text) + + return text + + _list_item_re = re.compile(r''' + (\n)? # leading line = \1 + (^[ \t]*) # leading whitespace = \2 + (%s) [ \t]+ # list marker = \3 + ((?:.+?) # list item text = \4 + (\n{1,2})) # eols = \5 + (?= \n* (\Z | \2 (%s) [ \t]+)) + ''' % (_marker_any, _marker_any), + re.M | re.X | re.S) + + _last_li_endswith_two_eols = False + def _list_item_sub(self, match): + item = match.group(4) + leading_line = match.group(1) + leading_space = match.group(2) + if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: + item = self._run_block_gamut(self._outdent(item)) + else: + # Recursion for sub-lists: + item = self._do_lists(self._outdent(item)) + if item.endswith('\n'): + item = item[:-1] + item = self._run_span_gamut(item) + self._last_li_endswith_two_eols = (len(match.group(5)) == 2) + return "<li>%s</li>\n" % item + + def _process_list_items(self, list_str): + # Process the contents of a single ordered or unordered list, + # splitting it into individual list items. + + # The $g_list_level global keeps track of when we're inside a list. + # Each time we enter a list, we increment it; when we leave a list, + # we decrement. If it's zero, we're not in a list anymore. + # + # We do this because when we're not inside a list, we want to treat + # something like this: + # + # I recommend upgrading to version + # 8. Oops, now this line is treated + # as a sub-list. + # + # As a single paragraph, despite the fact that the second line starts + # with a digit-period-space sequence. + # + # Whereas when we're inside a list (or sub-list), that line will be + # treated as the start of a sub-list. What a kludge, huh? This is + # an aspect of Markdown's syntax that's hard to parse perfectly + # without resorting to mind-reading. Perhaps the solution is to + # change the syntax rules such that sub-lists must start with a + # starting cardinal number; e.g. "1." or "a.". + self.list_level += 1 + self._last_li_endswith_two_eols = False + list_str = list_str.rstrip('\n') + '\n' + list_str = self._list_item_re.sub(self._list_item_sub, list_str) + self.list_level -= 1 + return list_str + + def _get_pygments_lexer(self, lexer_name): + try: + from pygments import lexers, util + except ImportError: + return None + try: + return lexers.get_lexer_by_name(lexer_name) + except util.ClassNotFound: + return None + + def _color_with_pygments(self, codeblock, lexer, **formatter_opts): + import pygments + import pygments.formatters + + class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): + def _wrap_code(self, inner): + """A function for use in a Pygments Formatter which + wraps in <code> tags. + """ + yield 0, "<code>" + for tup in inner: + yield tup + yield 0, "</code>" + + def wrap(self, source, outfile): + """Return the source with a code, pre, and div.""" + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + + formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts) + return pygments.highlight(codeblock, lexer, formatter) + + def _code_block_sub(self, match): + codeblock = match.group(1) + codeblock = self._outdent(codeblock) + codeblock = self._detab(codeblock) + codeblock = codeblock.lstrip('\n') # trim leading newlines + codeblock = codeblock.rstrip() # trim trailing whitespace + + if "code-color" in self.extras and codeblock.startswith(":::"): + lexer_name, rest = codeblock.split('\n', 1) + lexer_name = lexer_name[3:].strip() + lexer = self._get_pygments_lexer(lexer_name) + codeblock = rest.lstrip("\n") # Remove lexer declaration line. + if lexer: + formatter_opts = self.extras['code-color'] or {} + colored = self._color_with_pygments(codeblock, lexer, + **formatter_opts) + return "\n\n%s\n\n" % colored + + codeblock = self._encode_code(codeblock) + return "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock + + def _do_code_blocks(self, text): + """Process Markdown `<pre><code>` blocks.""" + code_block_re = re.compile(r''' + (?:\n\n|\A) + ( # $1 = the code block -- one or more lines, starting with a space/tab + (?: + (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces + .*\n+ + )+ + ) + ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc + ''' % (self.tab_width, self.tab_width), + re.M | re.X) + + return code_block_re.sub(self._code_block_sub, text) + + + # Rules for a code span: + # - backslash escapes are not interpreted in a code span + # - to include one or or a run of more backticks the delimiters must + # be a longer run of backticks + # - cannot start or end a code span with a backtick; pad with a + # space and that space will be removed in the emitted HTML + # See `test/tm-cases/escapes.text` for a number of edge-case + # examples. + _code_span_re = re.compile(r''' + (?<!\\) + (`+) # \1 = Opening run of ` + (?!`) # See Note A test/tm-cases/escapes.text + (.+?) # \2 = The code block + (?<!`) + \1 # Matching closer + (?!`) + ''', re.X | re.S) + + def _code_span_sub(self, match): + c = match.group(2).strip(" \t") + c = self._encode_code(c) + return "<code>%s</code>" % c + + def _do_code_spans(self, text): + # * Backtick quotes are used for <code></code> spans. + # + # * You can use multiple backticks as the delimiters if you want to + # include literal backticks in the code span. So, this input: + # + # Just type ``foo `bar` baz`` at the prompt. + # + # Will translate to: + # + # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> + # + # There's no arbitrary limit to the number of backticks you + # can use as delimters. If you need three consecutive backticks + # in your code, use four for delimiters, etc. + # + # * You can use spaces to get literal backticks at the edges: + # + # ... type `` `bar` `` ... + # + # Turns to: + # + # ... type <code>`bar`</code> ... + return self._code_span_re.sub(self._code_span_sub, text) + + def _encode_code(self, text): + """Encode/escape certain characters inside Markdown code runs. + The point is that in code, these characters are literals, + and lose their special Markdown meanings. + """ + replacements = [ + # Encode all ampersands; HTML entities are not + # entities within a Markdown code span. + ('&', '&'), + # Do the angle bracket song and dance: + ('<', '<'), + ('>', '>'), + # Now, escape characters that are magic in Markdown: + ('*', g_escape_table['*']), + ('_', g_escape_table['_']), + ('{', g_escape_table['{']), + ('}', g_escape_table['}']), + ('[', g_escape_table['[']), + (']', g_escape_table[']']), + ('\\', g_escape_table['\\']), + ] + for before, after in replacements: + text = text.replace(before, after) + return text + + _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S) + _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S) + _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) + _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) + def _do_italics_and_bold(self, text): + # <strong> must go first: + if "code-friendly" in self.extras: + text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) + text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) + else: + text = self._strong_re.sub(r"<strong>\2</strong>", text) + text = self._em_re.sub(r"<em>\2</em>", text) + return text + + + _block_quote_re = re.compile(r''' + ( # Wrap whole match in \1 + ( + ^[ \t]*>[ \t]? # '>' at the start of a line + .+\n # rest of the first line + (.+\n)* # subsequent consecutive lines + \n* # blanks + )+ + ) + ''', re.M | re.X) + _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M); + + _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) + def _dedent_two_spaces_sub(self, match): + return re.sub(r'(?m)^ ', '', match.group(1)) + + def _block_quote_sub(self, match): + bq = match.group(1) + bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting + bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines + bq = self._run_block_gamut(bq) # recurse + + bq = re.sub('(?m)^', ' ', bq) + # These leading spaces screw with <pre> content, so we need to fix that: + bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) + + return "<blockquote>\n%s\n</blockquote>\n\n" % bq + + def _do_block_quotes(self, text): + if '>' not in text: + return text + return self._block_quote_re.sub(self._block_quote_sub, text) + + def _form_paragraphs(self, text): + # Strip leading and trailing lines: + text = text.strip('\n') + + # Wrap <p> tags. + grafs = re.split(r"\n{2,}", text) + for i, graf in enumerate(grafs): + if graf in self.html_blocks: + # Unhashify HTML blocks + grafs[i] = self.html_blocks[graf] + else: + # Wrap <p> tags. + graf = self._run_span_gamut(graf) + grafs[i] = "<p>" + graf.lstrip(" \t") + "</p>" + + return "\n\n".join(grafs) + + def _add_footnotes(self, text): + if self.footnotes: + footer = [ + '<div class="footnotes">', + '<hr' + self.empty_element_suffix, + '<ol>', + ] + for i, id in enumerate(self.footnote_ids): + if i != 0: + footer.append('') + footer.append('<li id="fn-%s">' % id) + footer.append(self._run_block_gamut(self.footnotes[id])) + backlink = ('<a href="#fnref-%s" ' + 'class="footnoteBackLink" ' + 'title="Jump back to footnote %d in the text.">' + '↩</a>' % (id, i+1)) + if footer[-1].endswith("</p>"): + footer[-1] = footer[-1][:-len("</p>")] \ + + ' ' + backlink + "</p>" + else: + footer.append("\n<p>%s</p>" % backlink) + footer.append('</li>') + footer.append('</ol>') + footer.append('</div>') + return text + '\n\n' + '\n'.join(footer) + else: + return text + + # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: + # http://bumppo.net/projects/amputator/ + _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') + _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) + _naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I) + + def _encode_amps_and_angles(self, text): + # Smart processing for ampersands and angle brackets that need + # to be encoded. + text = self._ampersand_re.sub('&', text) + + # Encode naked <'s + text = self._naked_lt_re.sub('<', text) + + # Encode naked >'s + # Note: Other markdown implementations (e.g. Markdown.pl, PHP + # Markdown) don't do this. + text = self._naked_gt_re.sub('>', text) + return text + + def _encode_backslash_escapes(self, text): + for ch, escape in g_escape_table.items(): + text = text.replace("\\"+ch, escape) + return text + + _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) + def _auto_link_sub(self, match): + g1 = match.group(1) + return '<a href="%s">%s</a>' % (g1, g1) + + _auto_email_link_re = re.compile(r""" + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-\w]+(\.[-\w]+)*\.[a-z]+ + ) + > + """, re.I | re.X | re.U) + def _auto_email_link_sub(self, match): + return self._encode_email_address( + self._unescape_special_chars(match.group(1))) + + def _do_auto_links(self, text): + text = self._auto_link_re.sub(self._auto_link_sub, text) + text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) + return text + + def _encode_email_address(self, addr): + # Input: an email address, e.g. "foo@example.com" + # + # Output: the email address as a mailto link, with each character + # of the address encoded as either a decimal or hex entity, in + # the hopes of foiling most address harvesting spam bots. E.g.: + # + # <a href="mailto:foo@e + # xample.com">foo + # @example.com</a> + # + # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk + # mailing list: <http://tinyurl.com/yu7ue> + chars = [_xml_encode_email_char_at_random(ch) + for ch in "mailto:" + addr] + # Strip the mailto: from the visible part. + addr = '<a href="%s">%s</a>' \ + % (''.join(chars), ''.join(chars[7:])) + return addr + + def _do_link_patterns(self, text): + """Caveat emptor: there isn't much guarding against link + patterns being formed inside other standard Markdown links, e.g. + inside a [link def][like this]. + + Dev Notes: *Could* consider prefixing regexes with a negative + lookbehind assertion to attempt to guard against this. + """ + link_from_hash = {} + for regex, repl in self.link_patterns: + replacements = [] + for match in regex.finditer(text): + if hasattr(repl, "__call__"): + href = repl(match) + else: + href = match.expand(repl) + replacements.append((match.span(), href)) + for (start, end), href in reversed(replacements): + escaped_href = ( + href.replace('"', '"') # b/c of attr quote + # To avoid markdown <em> and <strong>: + .replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) + hash = md5(link).hexdigest() + link_from_hash[hash] = link + text = text[:start] + hash + text[end:] + for hash, link in link_from_hash.items(): + text = text.replace(hash, link) + return text + + def _unescape_special_chars(self, text): + # Swap back in all the special characters we've hidden. + for ch, hash in g_escape_table.items(): + text = text.replace(hash, ch) + return text + + def _outdent(self, text): + # Remove one level of line-leading tabs or spaces + return self._outdent_re.sub('', text) + + +class MarkdownWithExtras(Markdown): + """A markdowner class that enables most extras: + + - footnotes + - code-color (only has effect if 'pygments' Python module on path) + + These are not included: + - pyshell (specific to Python-related documenting) + - code-friendly (because it *disables* part of the syntax) + - link-patterns (because you need to specify some actual + link-patterns anyway) + """ + extras = ["footnotes", "code-color"] + + +#---- internal support functions + +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 +def _curry(*args, **kwargs): + function, args = args[0], args[1:] + def result(*rest, **kwrest): + combined = kwargs.copy() + combined.update(kwrest) + return function(*args + rest, **combined) + return result + +# Recipe: regex_from_encoded_pattern (1.0) +def _regex_from_encoded_pattern(s): + """'foo' -> re.compile(re.escape('foo')) + '/foo/' -> re.compile('foo') + '/foo/i' -> re.compile('foo', re.I) + """ + if s.startswith('/') and s.rfind('/') != 0: + # Parse it: /PATTERN/FLAGS + idx = s.rfind('/') + pattern, flags_str = s[1:idx], s[idx+1:] + flag_from_char = { + "i": re.IGNORECASE, + "l": re.LOCALE, + "s": re.DOTALL, + "m": re.MULTILINE, + "u": re.UNICODE, + } + flags = 0 + for char in flags_str: + try: + flags |= flag_from_char[char] + except KeyError: + raise ValueError("unsupported regex flag: '%s' in '%s' " + "(must be one of '%s')" + % (char, s, ''.join(flag_from_char.keys()))) + return re.compile(s[1:idx], flags) + else: # not an encoded regex + return re.compile(re.escape(s)) + +# Recipe: dedent (0.1.2) +def _dedentlines(lines, tabsize=8, skip_first_line=False): + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line) + indents = [] + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: print "dedent: indent=%d: %r" % (indent, line) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: print "dedent: margin=%r" % margin + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print "dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + else: + if removed: + lines[i] = lines[i][removed:] + return lines + +def _dedent(text, tabsize=8, skip_first_line=False): + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(1) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +class _memoized(object): + """Decorator that caches a function's return value each time it is called. + If called later with the same arguments, the cached value is returned, and + not re-evaluated. + + http://wiki.python.org/moin/PythonDecoratorLibrary + """ + def __init__(self, func): + self.func = func + self.cache = {} + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + self.cache[args] = value = self.func(*args) + return value + except TypeError: + # uncachable -- for instance, passing a list as an argument. + # Better to not cache than to blow up entirely. + return self.func(*args) + def __repr__(self): + """Return the function's docstring.""" + return self.func.__doc__ + + +def _xml_oneliner_re_from_tab_width(tab_width): + """Standalone XML processing instruction regex.""" + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in $1 + [ ]{0,%d} + (?: + <\?\w+\b\s+.*?\?> # XML processing instruction + | + <\w+:\w+\b\s+.*?/> # namespaced single tag + ) + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) + +def _hr_tag_re_from_tab_width(tab_width): + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in \1 + [ ]{0,%d} + <(hr) # start tag = \2 + \b # word break + ([^<>])*? # + /?> # the matching end tag + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) + + +def _xml_encode_email_char_at_random(ch): + r = random() + # Roughly 10% raw, 45% hex, 45% dec. + # '@' *must* be encoded. I [John Gruber] insist. + # Issue 26: '_' must be encoded. + if r > 0.9 and ch not in "@_": + return ch + elif r < 0.45: + # The [1:] is to drop leading '0': 0x63 -> x63 + return '&#%s;' % hex(ord(ch))[1:] + else: + return '&#%s;' % ord(ch) + +def _hash_text(text): + return 'md5:'+md5(text.encode("utf-8")).hexdigest() + + +#---- mainline + +class _NoReflowFormatter(optparse.IndentedHelpFormatter): + """An optparse formatter that does NOT reflow the description.""" + def format_description(self, description): + return description or "" + +def _test(): + import doctest + doctest.testmod() + +def main(argv=None): + if argv is None: + argv = sys.argv + if not logging.root.handlers: + logging.basicConfig() + + usage = "usage: %prog [PATHS...]" + version = "%prog "+__version__ + parser = optparse.OptionParser(prog="markdown2", usage=usage, + version=version, description=cmdln_desc, + formatter=_NoReflowFormatter()) + parser.add_option("-v", "--verbose", dest="log_level", + action="store_const", const=logging.DEBUG, + help="more verbose output") + parser.add_option("--encoding", + help="specify encoding of text content") + parser.add_option("--html4tags", action="store_true", default=False, + help="use HTML 4 style for empty element tags") + parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", + help="sanitize literal HTML: 'escape' escapes " + "HTML meta chars, 'replace' replaces with an " + "[HTML_REMOVED] note") + parser.add_option("-x", "--extras", action="append", + help="Turn on specific extra features (not part of " + "the core Markdown spec). Supported values: " + "'code-friendly' disables _/__ for emphasis; " + "'code-color' adds code-block syntax coloring; " + "'link-patterns' adds auto-linking based on patterns; " + "'footnotes' adds the footnotes syntax;" + "'xml' passes one-liner processing instructions and namespaced XML tags;" + "'pyshell' to put unindented Python interactive shell sessions in a <code> block.") + parser.add_option("--use-file-vars", + help="Look for and use Emacs-style 'markdown-extras' " + "file var to turn on extras. See " + "<http://code.google.com/p/python-markdown2/wiki/Extras>.") + parser.add_option("--link-patterns-file", + help="path to a link pattern file") + parser.add_option("--self-test", action="store_true", + help="run internal self-tests (some doctests)") + parser.add_option("--compare", action="store_true", + help="run against Markdown.pl as well (for testing)") + parser.set_defaults(log_level=logging.INFO, compare=False, + encoding="utf-8", safe_mode=None, use_file_vars=False) + opts, paths = parser.parse_args() + log.setLevel(opts.log_level) + + if opts.self_test: + return _test() + + if opts.extras: + extras = {} + for s in opts.extras: + splitter = re.compile("[,;: ]+") + for e in splitter.split(s): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + extras[ename] = earg + else: + extras = None + + if opts.link_patterns_file: + link_patterns = [] + f = open(opts.link_patterns_file) + try: + for i, line in enumerate(f.readlines()): + if not line.strip(): continue + if line.lstrip().startswith("#"): continue + try: + pat, href = line.rstrip().rsplit(None, 1) + except ValueError: + raise MarkdownError("%s:%d: invalid link pattern line: %r" + % (opts.link_patterns_file, i+1, line)) + link_patterns.append( + (_regex_from_encoded_pattern(pat), href)) + finally: + f.close() + else: + link_patterns = None + + from os.path import join, dirname, abspath, exists + markdown_pl = join(dirname(dirname(abspath(__file__))), "test", + "Markdown.pl") + for path in paths: + if opts.compare: + print "==== Markdown.pl ====" + perl_cmd = 'perl %s "%s"' % (markdown_pl, path) + o = os.popen(perl_cmd) + perl_html = o.read() + o.close() + sys.stdout.write(perl_html) + print "==== markdown2.py ====" + html = markdown_path(path, encoding=opts.encoding, + html4tags=opts.html4tags, + safe_mode=opts.safe_mode, + extras=extras, link_patterns=link_patterns, + use_file_vars=opts.use_file_vars) + sys.stdout.write( + html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) + if opts.compare: + test_dir = join(dirname(dirname(abspath(__file__))), "test") + if exists(join(test_dir, "test_markdown2.py")): + sys.path.insert(0, test_dir) + from test_markdown2 import norm_html_from_html + norm_html = norm_html_from_html(html) + norm_perl_html = norm_html_from_html(perl_html) + else: + norm_html = html + norm_perl_html = perl_html + print "==== match? %r ====" % (norm_perl_html == norm_html) + + +if __name__ == "__main__": + sys.exit( main(sys.argv) ) + diff --git a/vendor/tornado/demos/appengine/static/blog.css b/vendor/tornado/demos/appengine/static/blog.css new file mode 100644 index 000000000000..8902ec1f22e5 --- /dev/null +++ b/vendor/tornado/demos/appengine/static/blog.css @@ -0,0 +1,153 @@ +/* + * Copyright 2009 Facebook + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +body { + background: white; + color: black; + margin: 15px; + margin-top: 0; +} + +body, +input, +textarea { + font-family: Georgia, serif; + font-size: 12pt; +} + +table { + border-collapse: collapse; + border: 0; +} + +td { + border: 0; + padding: 0; +} + +h1, +h2, +h3, +h4 { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + margin: 0; +} + +h1 { + font-size: 20pt; +} + +pre, +code { + font-family: monospace; + color: #060; +} + +pre { + margin-left: 1em; + padding-left: 1em; + border-left: 1px solid silver; + line-height: 14pt; +} + +a, +a code { + color: #00c; +} + +#body { + max-width: 800px; + margin: auto; +} + +#header { + background-color: #3b5998; + padding: 5px; + padding-left: 10px; + padding-right: 10px; + margin-bottom: 1em; +} + +#header, +#header a { + color: white; +} + +#header h1 a { + text-decoration: none; +} + +#footer, +#content { + margin-left: 10px; + margin-right: 10px; +} + +#footer { + margin-top: 3em; +} + +.entry h1 a { + color: black; + text-decoration: none; +} + +.entry { + margin-bottom: 2em; +} + +.entry .date { + margin-top: 3px; +} + +.entry p { + margin: 0; + margin-bottom: 1em; +} + +.entry .body { + margin-top: 1em; + line-height: 16pt; +} + +.compose td { + vertical-align: middle; + padding-bottom: 5px; +} + +.compose td.field { + padding-right: 10px; +} + +.compose .title, +.compose .submit { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + font-weight: bold; +} + +.compose .title { + font-size: 20pt; +} + +.compose .title, +.compose .markdown { + width: 100%; +} + +.compose .markdown { + height: 500px; + line-height: 16pt; +} diff --git a/vendor/tornado/demos/appengine/templates/archive.html b/vendor/tornado/demos/appengine/templates/archive.html new file mode 100644 index 000000000000..dcca9511a489 --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/archive.html @@ -0,0 +1,31 @@ +{% extends "base.html" %} + +{% block head %} + <style type="text/css"> + ul.archive { + list-style-type: none; + margin: 0; + padding: 0; + } + + ul.archive li { + margin-bottom: 1em; + } + + ul.archive .title { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + font-size: 14pt; + } + </style> +{% end %} + +{% block body %} + <ul class="archive"> + {% for entry in entries %} + <li> + <div class="title"><a href="/entry/{{ entry.slug }}">{{ escape(entry.title) }}</a></div> + <div class="date">{{ locale.format_date(entry.published, full_format=True, shorter=True) }}</div> + </li> + {% end %} + </ul> +{% end %} diff --git a/vendor/tornado/demos/appengine/templates/base.html b/vendor/tornado/demos/appengine/templates/base.html new file mode 100644 index 000000000000..0154aea8ca5d --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/base.html @@ -0,0 +1,29 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> + <head> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> + <title>{{ escape(handler.settings["blog_title"]) }} + + + {% block head %}{% end %} + + +

                                + +
                                {% block body %}{% end %}
                                +
                                + {% block bottom %}{% end %} + + diff --git a/vendor/tornado/demos/appengine/templates/compose.html b/vendor/tornado/demos/appengine/templates/compose.html new file mode 100644 index 000000000000..5ad548307cbc --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/compose.html @@ -0,0 +1,42 @@ +{% extends "base.html" %} + +{% block body %} +
                                +
                                +
                                + + {% if entry %} + + {% end %} + {{ xsrf_form_html() }} + +{% end %} + +{% block bottom %} + + +{% end %} + diff --git a/vendor/tornado/demos/appengine/templates/entry.html b/vendor/tornado/demos/appengine/templates/entry.html new file mode 100644 index 000000000000..43c835deadad --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/entry.html @@ -0,0 +1,5 @@ +{% extends "base.html" %} + +{% block body %} + {{ modules.Entry(entry) }} +{% end %} diff --git a/vendor/tornado/demos/appengine/templates/feed.xml b/vendor/tornado/demos/appengine/templates/feed.xml new file mode 100644 index 000000000000..c6c368656c70 --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/feed.xml @@ -0,0 +1,26 @@ + + + {% set date_format = "%Y-%m-%dT%H:%M:%SZ" %} + {{ escape(handler.settings["blog_title"]) }} + {% if len(entries) > 0 %} + {{ max(e.updated for e in entries).strftime(date_format) }} + {% else %} + {{ datetime.datetime.utcnow().strftime(date_format) }} + {% end %} + http://{{ request.host }}/ + + + {{ escape(handler.settings["blog_title"]) }} + {% for entry in entries %} + + http://{{ request.host }}/entry/{{ entry.slug }} + {{ escape(entry.title) }} + + {{ entry.updated.strftime(date_format) }} + {{ entry.published.strftime(date_format) }} + +
                                {{ entry.html }}
                                +
                                +
                                + {% end %} +
                                diff --git a/vendor/tornado/demos/appengine/templates/home.html b/vendor/tornado/demos/appengine/templates/home.html new file mode 100644 index 000000000000..dd069a97f3f3 --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/home.html @@ -0,0 +1,8 @@ +{% extends "base.html" %} + +{% block body %} + {% for entry in entries %} + {{ modules.Entry(entry) }} + {% end %} + +{% end %} diff --git a/vendor/tornado/demos/appengine/templates/modules/entry.html b/vendor/tornado/demos/appengine/templates/modules/entry.html new file mode 100644 index 000000000000..06237657c867 --- /dev/null +++ b/vendor/tornado/demos/appengine/templates/modules/entry.html @@ -0,0 +1,8 @@ +
                                +

                                {{ escape(entry.title) }}

                                +
                                {{ locale.format_date(entry.published, full_format=True, shorter=True) }}
                                +
                                {{ entry.html }}
                                + {% if current_user and current_user.administrator %} + + {% end %} +
                                diff --git a/vendor/tornado/demos/auth/authdemo.py b/vendor/tornado/demos/auth/authdemo.py new file mode 100755 index 000000000000..e6136d1b5366 --- /dev/null +++ b/vendor/tornado/demos/auth/authdemo.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tornado.auth +import tornado.escape +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web + +from tornado.options import define, options + +define("port", default=8888, help="run on the given port", type=int) + + +class Application(tornado.web.Application): + def __init__(self): + handlers = [ + (r"/", MainHandler), + (r"/auth/login", AuthHandler), + ] + settings = dict( + cookie_secret="32oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + login_url="/auth/login", + ) + tornado.web.Application.__init__(self, handlers, **settings) + + +class BaseHandler(tornado.web.RequestHandler): + def get_current_user(self): + user_json = self.get_secure_cookie("user") + if not user_json: return None + return tornado.escape.json_decode(user_json) + + +class MainHandler(BaseHandler): + @tornado.web.authenticated + def get(self): + name = tornado.escape.xhtml_escape(self.current_user["name"]) + self.write("Hello, " + name) + + +class AuthHandler(BaseHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Google auth failed") + self.set_secure_cookie("user", tornado.escape.json_encode(user)) + self.redirect("/") + + +def main(): + tornado.options.parse_command_line() + http_server = tornado.httpserver.HTTPServer(Application()) + http_server.listen(options.port) + tornado.ioloop.IOLoop.instance().start() + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/demos/blog/README b/vendor/tornado/demos/blog/README new file mode 100644 index 000000000000..a033e7a11ca5 --- /dev/null +++ b/vendor/tornado/demos/blog/README @@ -0,0 +1,57 @@ +Running the Tornado Blog example app +==================================== +This demo is a simple blogging engine that uses MySQL to store posts and +Google Accounts for author authentication. Since it depends on MySQL, you +need to set up MySQL and the database schema for the demo to run. + +1. Install prerequisites and build tornado + + See http://www.tornadoweb.org/ for installation instructions. If you can + run the "helloworld" example application, your environment is set up + correctly. + +2. Install MySQL if needed + + Consult the documentation for your platform. Under Ubuntu Linux you + can run "apt-get install mysql". Under OS X you can download the + MySQL PKG file from http://dev.mysql.com/downloads/mysql/ + +3. Connect to MySQL and create a database and user for the blog. + + Connect to MySQL as a user that can create databases and users: + mysql -u root + + Create a database named "blog": + mysql> CREATE DATABASE blog; + + Allow the "blog" user to connect with the password "blog": + mysql> GRANT ALL PRIVILEGES ON blog.* TO 'blog'@'localhost' IDENTIFIED BY 'blog'; + +4. Create the tables in your new database. + + You can use the provided schema.sql file by running this command: + mysql --user=blog --password=blog --database=blog < schema.sql + + You can run the above command again later if you want to delete the + contents of the blog and start over after testing. + +5. Run the blog example + + With the default user, password, and database you can just run: + ./blog.py + + If you've changed anything, you can alter the default MySQL settings + with arguments on the command line, e.g.: + ./blog.py --mysql_user=casey --mysql_password=happiness --mysql_database=foodblog + +6. Visit your new blog + + Open http://localhost:8888/ in your web browser. You will be redirected to + a Google account sign-in page because the blog uses Google accounts for + authentication. + + Currently the first user to connect will automatically be given the + ability to create and edit posts. + + Once you've created one blog post, subsequent users will not be + prompted to sign in. diff --git a/vendor/tornado/demos/blog/blog.py b/vendor/tornado/demos/blog/blog.py new file mode 100755 index 000000000000..808a9afc55ad --- /dev/null +++ b/vendor/tornado/demos/blog/blog.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import markdown +import os.path +import re +import tornado.auth +import tornado.database +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web +import unicodedata + +from tornado.options import define, options + +define("port", default=8888, help="run on the given port", type=int) +define("mysql_host", default="127.0.0.1:3306", help="blog database host") +define("mysql_database", default="blog", help="blog database name") +define("mysql_user", default="blog", help="blog database user") +define("mysql_password", default="blog", help="blog database password") + + +class Application(tornado.web.Application): + def __init__(self): + handlers = [ + (r"/", HomeHandler), + (r"/archive", ArchiveHandler), + (r"/feed", FeedHandler), + (r"/entry/([^/]+)", EntryHandler), + (r"/compose", ComposeHandler), + (r"/auth/login", AuthLoginHandler), + (r"/auth/logout", AuthLogoutHandler), + ] + settings = dict( + blog_title=u"Tornado Blog", + template_path=os.path.join(os.path.dirname(__file__), "templates"), + static_path=os.path.join(os.path.dirname(__file__), "static"), + ui_modules={"Entry": EntryModule}, + xsrf_cookies=True, + cookie_secret="11oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + login_url="/auth/login", + ) + tornado.web.Application.__init__(self, handlers, **settings) + + # Have one global connection to the blog DB across all handlers + self.db = tornado.database.Connection( + host=options.mysql_host, database=options.mysql_database, + user=options.mysql_user, password=options.mysql_password) + + +class BaseHandler(tornado.web.RequestHandler): + @property + def db(self): + return self.application.db + + def get_current_user(self): + user_id = self.get_secure_cookie("user") + if not user_id: return None + return self.db.get("SELECT * FROM authors WHERE id = %s", int(user_id)) + + +class HomeHandler(BaseHandler): + def get(self): + entries = self.db.query("SELECT * FROM entries ORDER BY published " + "DESC LIMIT 5") + if not entries: + self.redirect("/compose") + return + self.render("home.html", entries=entries) + + +class EntryHandler(BaseHandler): + def get(self, slug): + entry = self.db.get("SELECT * FROM entries WHERE slug = %s", slug) + if not entry: raise tornado.web.HTTPError(404) + self.render("entry.html", entry=entry) + + +class ArchiveHandler(BaseHandler): + def get(self): + entries = self.db.query("SELECT * FROM entries ORDER BY published " + "DESC") + self.render("archive.html", entries=entries) + + +class FeedHandler(BaseHandler): + def get(self): + entries = self.db.query("SELECT * FROM entries ORDER BY published " + "DESC LIMIT 10") + self.set_header("Content-Type", "application/atom+xml") + self.render("feed.xml", entries=entries) + + +class ComposeHandler(BaseHandler): + @tornado.web.authenticated + def get(self): + id = self.get_argument("id", None) + entry = None + if id: + entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id)) + self.render("compose.html", entry=entry) + + @tornado.web.authenticated + def post(self): + id = self.get_argument("id", None) + title = self.get_argument("title") + text = self.get_argument("markdown") + html = markdown.markdown(text) + if id: + entry = self.db.get("SELECT * FROM entries WHERE id = %s", int(id)) + if not entry: raise tornado.web.HTTPError(404) + slug = entry.slug + self.db.execute( + "UPDATE entries SET title = %s, markdown = %s, html = %s " + "WHERE id = %s", title, text, html, int(id)) + else: + slug = unicodedata.normalize("NFKD", title).encode( + "ascii", "ignore") + slug = re.sub(r"[^\w]+", " ", slug) + slug = "-".join(slug.lower().strip().split()) + if not slug: slug = "entry" + while True: + e = self.db.get("SELECT * FROM entries WHERE slug = %s", slug) + if not e: break + slug += "-2" + self.db.execute( + "INSERT INTO entries (author_id,title,slug,markdown,html," + "published) VALUES (%s,%s,%s,%s,%s,UTC_TIMESTAMP())", + self.current_user.id, title, slug, text, html) + self.redirect("/entry/" + slug) + + +class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Google auth failed") + author = self.db.get("SELECT * FROM authors WHERE email = %s", + user["email"]) + if not author: + # Auto-create first author + any_author = self.db.get("SELECT * FROM authors LIMIT 1") + if not any_author: + author_id = self.db.execute( + "INSERT INTO authors (email,name) VALUES (%s,%s)", + user["email"], user["name"]) + else: + self.redirect("/") + return + else: + author_id = author["id"] + self.set_secure_cookie("user", str(author_id)) + self.redirect(self.get_argument("next", "/")) + + +class AuthLogoutHandler(BaseHandler): + def get(self): + self.clear_cookie("user") + self.redirect(self.get_argument("next", "/")) + + +class EntryModule(tornado.web.UIModule): + def render(self, entry): + return self.render_string("modules/entry.html", entry=entry) + + +def main(): + tornado.options.parse_command_line() + http_server = tornado.httpserver.HTTPServer(Application()) + http_server.listen(options.port) + tornado.ioloop.IOLoop.instance().start() + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/demos/blog/markdown.py b/vendor/tornado/demos/blog/markdown.py new file mode 100644 index 000000000000..59ba731bf0c5 --- /dev/null +++ b/vendor/tornado/demos/blog/markdown.py @@ -0,0 +1,1877 @@ +#!/usr/bin/env python +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
                                and
                                as well). + +Module usage: + + >>> import markdown2 + >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` + u'

                                boo!

                                \n' + + >>> markdowner = Markdown() + >>> markdowner.convert("*boo!*") + u'

                                boo!

                                \n' + >>> markdowner.convert("**boom!**") + u'

                                boom!

                                \n' + +This implementation of Markdown implements the full "core" syntax plus a +number of extras (e.g., code syntax coloring, footnotes) as described on +. +""" + +cmdln_desc = """A fast and complete Python implementation of Markdown, a +text-to-HTML conversion tool for web writers. +""" + +# Dev Notes: +# - There is already a Python markdown processor +# (http://www.freewisdom.org/projects/python-markdown/). +# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm +# not yet sure if there implications with this. Compare 'pydoc sre' +# and 'perldoc perlre'. + +__version_info__ = (1, 0, 1, 14) # first three nums match Markdown.pl +__version__ = '1.0.1.14' +__author__ = "Trent Mick" + +import os +import sys +from pprint import pprint +import re +import logging +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import optparse +from random import random +import codecs + + + +#---- Python version compat + +if sys.version_info[:2] < (2,4): + from sets import Set as set + def reversed(sequence): + for i in sequence[::-1]: + yield i + def _unicode_decode(s, encoding, errors='xmlcharrefreplace'): + return unicode(s, encoding, errors) +else: + def _unicode_decode(s, encoding, errors='strict'): + return s.decode(encoding, errors) + + +#---- globals + +DEBUG = False +log = logging.getLogger("markdown") + +DEFAULT_TAB_WIDTH = 4 + +# Table of hash values for escaped characters: +def _escape_hash(s): + # Lame attempt to avoid possible collision with someone actually + # using the MD5 hexdigest of one of these chars in there text. + # Other ideas: random.random(), uuid.uuid() + #return md5(s).hexdigest() # Markdown.pl effectively does this. + return 'md5-'+md5(s).hexdigest() +g_escape_table = dict([(ch, _escape_hash(ch)) + for ch in '\\`*_{}[]()>#+-.!']) + + + +#---- exceptions + +class MarkdownError(Exception): + pass + + + +#---- public api + +def markdown_path(path, encoding="utf-8", + html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + text = codecs.open(path, 'r', encoding).read() + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +class Markdown(object): + # The dict of "extras" to enable in processing -- a mapping of + # extra name to argument for the extra. Most extras do not have an + # argument, in which case the value is None. + # + # This can be set via (a) subclassing and (b) the constructor + # "extras" argument. + extras = None + + urls = None + titles = None + html_blocks = None + html_spans = None + html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py + + # Used to track when we're inside an ordered or unordered list + # (see _ProcessListItems() for details): + list_level = 0 + + _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) + + def __init__(self, html4tags=False, tab_width=4, safe_mode=None, + extras=None, link_patterns=None, use_file_vars=False): + if html4tags: + self.empty_element_suffix = ">" + else: + self.empty_element_suffix = " />" + self.tab_width = tab_width + + # For compatibility with earlier markdown2.py and with + # markdown.py's safe_mode being a boolean, + # safe_mode == True -> "replace" + if safe_mode is True: + self.safe_mode = "replace" + else: + self.safe_mode = safe_mode + + if self.extras is None: + self.extras = {} + elif not isinstance(self.extras, dict): + self.extras = dict([(e, None) for e in self.extras]) + if extras: + if not isinstance(extras, dict): + extras = dict([(e, None) for e in extras]) + self.extras.update(extras) + assert isinstance(self.extras, dict) + self._instance_extras = self.extras.copy() + self.link_patterns = link_patterns + self.use_file_vars = use_file_vars + self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) + + def reset(self): + self.urls = {} + self.titles = {} + self.html_blocks = {} + self.html_spans = {} + self.list_level = 0 + self.extras = self._instance_extras.copy() + if "footnotes" in self.extras: + self.footnotes = {} + self.footnote_ids = [] + + def convert(self, text): + """Convert the given text.""" + # Main function. The order in which other subs are called here is + # essential. Link and image substitutions need to happen before + # _EscapeSpecialChars(), so that any *'s or _'s in the + # and tags get encoded. + + # Clear the global hashes. If we don't clear these, you get conflicts + # from other articles when generating a page which contains more than + # one article (e.g. an index page that shows the N most recent + # articles): + self.reset() + + if not isinstance(text, unicode): + #TODO: perhaps shouldn't presume UTF-8 for string input? + text = unicode(text, 'utf-8') + + if self.use_file_vars: + # Look for emacs-style file variable hints. + emacs_vars = self._get_emacs_vars(text) + if "markdown-extras" in emacs_vars: + splitter = re.compile("[ ,]+") + for e in splitter.split(emacs_vars["markdown-extras"]): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + self.extras[ename] = earg + + # Standardize line endings: + text = re.sub("\r\n|\r", "\n", text) + + # Make sure $text ends with a couple of newlines: + text += "\n\n" + + # Convert all tabs to spaces. + text = self._detab(text) + + # Strip any lines consisting only of spaces and tabs. + # This makes subsequent regexen easier to write, because we can + # match consecutive blank lines with /\n+/ instead of something + # contorted like /[ \t]*\n+/ . + text = self._ws_only_line_re.sub("", text) + + if self.safe_mode: + text = self._hash_html_spans(text) + + # Turn block-level HTML blocks into hash entries + text = self._hash_html_blocks(text, raw=True) + + # Strip link definitions, store in hashes. + if "footnotes" in self.extras: + # Must do footnotes first because an unlucky footnote defn + # looks like a link defn: + # [^4]: this "looks like a link defn" + text = self._strip_footnote_definitions(text) + text = self._strip_link_definitions(text) + + text = self._run_block_gamut(text) + + if "footnotes" in self.extras: + text = self._add_footnotes(text) + + text = self._unescape_special_chars(text) + + if self.safe_mode: + text = self._unhash_html_spans(text) + + text += "\n" + return text + + _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) + # This regular expression is intended to match blocks like this: + # PREFIX Local Variables: SUFFIX + # PREFIX mode: Tcl SUFFIX + # PREFIX End: SUFFIX + # Some notes: + # - "[ \t]" is used instead of "\s" to specifically exclude newlines + # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does + # not like anything other than Unix-style line terminators. + _emacs_local_vars_pat = re.compile(r"""^ + (?P(?:[^\r\n|\n|\r])*?) + [\ \t]*Local\ Variables:[\ \t]* + (?P.*?)(?:\r\n|\n|\r) + (?P.*?\1End:) + """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) + + def _get_emacs_vars(self, text): + """Return a dictionary of emacs-style local variables. + + Parsing is done loosely according to this spec (and according to + some in-practice deviations from this): + http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables + """ + emacs_vars = {} + SIZE = pow(2, 13) # 8kB + + # Search near the start for a '-*-'-style one-liner of variables. + head = text[:SIZE] + if "-*-" in head: + match = self._emacs_oneliner_vars_pat.search(head) + if match: + emacs_vars_str = match.group(1) + assert '\n' not in emacs_vars_str + emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') + if s.strip()] + if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: + # While not in the spec, this form is allowed by emacs: + # -*- Tcl -*- + # where the implied "variable" is "mode". This form + # is only allowed if there are no other variables. + emacs_vars["mode"] = emacs_var_strs[0].strip() + else: + for emacs_var_str in emacs_var_strs: + try: + variable, value = emacs_var_str.strip().split(':', 1) + except ValueError: + log.debug("emacs variables error: malformed -*- " + "line: %r", emacs_var_str) + continue + # Lowercase the variable name because Emacs allows "Mode" + # or "mode" or "MoDe", etc. + emacs_vars[variable.lower()] = value.strip() + + tail = text[-SIZE:] + if "Local Variables" in tail: + match = self._emacs_local_vars_pat.search(tail) + if match: + prefix = match.group("prefix") + suffix = match.group("suffix") + lines = match.group("content").splitlines(0) + #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ + # % (prefix, suffix, match.group("content"), lines) + + # Validate the Local Variables block: proper prefix and suffix + # usage. + for i, line in enumerate(lines): + if not line.startswith(prefix): + log.debug("emacs variables error: line '%s' " + "does not use proper prefix '%s'" + % (line, prefix)) + return {} + # Don't validate suffix on last line. Emacs doesn't care, + # neither should we. + if i != len(lines)-1 and not line.endswith(suffix): + log.debug("emacs variables error: line '%s' " + "does not use proper suffix '%s'" + % (line, suffix)) + return {} + + # Parse out one emacs var per line. + continued_for = None + for line in lines[:-1]: # no var on the last line ("PREFIX End:") + if prefix: line = line[len(prefix):] # strip prefix + if suffix: line = line[:-len(suffix)] # strip suffix + line = line.strip() + if continued_for: + variable = continued_for + if line.endswith('\\'): + line = line[:-1].rstrip() + else: + continued_for = None + emacs_vars[variable] += ' ' + line + else: + try: + variable, value = line.split(':', 1) + except ValueError: + log.debug("local variables error: missing colon " + "in local variables entry: '%s'" % line) + continue + # Do NOT lowercase the variable name, because Emacs only + # allows "mode" (and not "Mode", "MoDe", etc.) in this block. + value = value.strip() + if value.endswith('\\'): + value = value[:-1].rstrip() + continued_for = variable + else: + continued_for = None + emacs_vars[variable] = value + + # Unquote values. + for var, val in emacs_vars.items(): + if len(val) > 1 and (val.startswith('"') and val.endswith('"') + or val.startswith('"') and val.endswith('"')): + emacs_vars[var] = val[1:-1] + + return emacs_vars + + # Cribbed from a post by Bart Lateur: + # + _detab_re = re.compile(r'(.*?)\t', re.M) + def _detab_sub(self, match): + g1 = match.group(1) + return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width)) + def _detab(self, text): + r"""Remove (leading?) tabs from a file. + + >>> m = Markdown() + >>> m._detab("\tfoo") + ' foo' + >>> m._detab(" \tfoo") + ' foo' + >>> m._detab("\t foo") + ' foo' + >>> m._detab(" foo") + ' foo' + >>> m._detab(" foo\n\tbar\tblam") + ' foo\n bar blam' + """ + if '\t' not in text: + return text + return self._detab_re.subn(self._detab_sub, text)[0] + + _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' + _strict_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_a, + re.X | re.M) + + _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' + _liberal_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + .* # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_b, + re.X | re.M) + + def _hash_html_block_sub(self, match, raw=False): + html = match.group(1) + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + return "\n\n" + key + "\n\n" + + def _hash_html_blocks(self, text, raw=False): + """Hashify HTML blocks + + We only want to do this for block-level HTML tags, such as headers, + lists, and tables. That's because we still want to wrap

                                s around + "paragraphs" that are wrapped in non-block-level tags, such as anchors, + phrase emphasis, and spans. The list of tags we're looking for is + hard-coded. + + @param raw {boolean} indicates if these are raw HTML blocks in + the original source. It makes a difference in "safe" mode. + """ + if '<' not in text: + return text + + # Pass `raw` value into our calls to self._hash_html_block_sub. + hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) + + # First, look for nested blocks, e.g.: + #

                                + #
                                + # tags for inner block must be indented. + #
                                + #
                                + # + # The outermost tags must start at the left margin for this to match, and + # the inner nested divs must be indented. + # We need to do this before the next, more liberal match, because the next + # match will start at the first `
                                ` and stop at the first `
                                `. + text = self._strict_tag_block_re.sub(hash_html_block_sub, text) + + # Now match more liberally, simply from `\n` to `\n` + text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) + + # Special case just for
                                . It was easier to make a special + # case than to make the other regex more complicated. + if "", start_idx) + 3 + except ValueError, ex: + break + + # Start position for next comment block search. + start = end_idx + + # Validate whitespace before comment. + if start_idx: + # - Up to `tab_width - 1` spaces before start_idx. + for i in range(self.tab_width - 1): + if text[start_idx - 1] != ' ': + break + start_idx -= 1 + if start_idx == 0: + break + # - Must be preceded by 2 newlines or hit the start of + # the document. + if start_idx == 0: + pass + elif start_idx == 1 and text[0] == '\n': + start_idx = 0 # to match minute detail of Markdown.pl regex + elif text[start_idx-2:start_idx] == '\n\n': + pass + else: + break + + # Validate whitespace after comment. + # - Any number of spaces and tabs. + while end_idx < len(text): + if text[end_idx] not in ' \t': + break + end_idx += 1 + # - Must be following by 2 newlines or hit end of text. + if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): + continue + + # Escape and hash (must match `_hash_html_block_sub`). + html = text[start_idx:end_idx] + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] + + if "xml" in self.extras: + # Treat XML processing instructions and namespaced one-liner + # tags as if they were block HTML tags. E.g., if standalone + # (i.e. are their own paragraph), the following do not get + # wrapped in a

                                tag: + # + # + # + _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) + text = _xml_oneliner_re.sub(hash_html_block_sub, text) + + return text + + def _strip_link_definitions(self, text): + # Strips link definitions from text, stores the URLs and titles in + # hash references. + less_than_tab = self.tab_width - 1 + + # Link defs are in the form: + # [id]: url "optional title" + _link_def_re = re.compile(r""" + ^[ ]{0,%d}\[(.+)\]: # id = \1 + [ \t]* + \n? # maybe *one* newline + [ \t]* + ? # url = \2 + [ \t]* + (?: + \n? # maybe one newline + [ \t]* + (?<=\s) # lookbehind for whitespace + ['"(] + ([^\n]*) # title = \3 + ['")] + [ \t]* + )? # title is optional + (?:\n+|\Z) + """ % less_than_tab, re.X | re.M | re.U) + return _link_def_re.sub(self._extract_link_def_sub, text) + + def _extract_link_def_sub(self, match): + id, url, title = match.groups() + key = id.lower() # Link IDs are case-insensitive + self.urls[key] = self._encode_amps_and_angles(url) + if title: + self.titles[key] = title.replace('"', '"') + return "" + + def _extract_footnote_def_sub(self, match): + id, text = match.groups() + text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() + normed_id = re.sub(r'\W', '-', id) + # Ensure footnote text ends with a couple newlines (for some + # block gamut matches). + self.footnotes[normed_id] = text + "\n\n" + return "" + + def _strip_footnote_definitions(self, text): + """A footnote definition looks like this: + + [^note-id]: Text of the note. + + May include one or more indented paragraphs. + + Where, + - The 'note-id' can be pretty much anything, though typically it + is the number of the footnote. + - The first paragraph may start on the next line, like so: + + [^note-id]: + Text of the note. + """ + less_than_tab = self.tab_width - 1 + footnote_def_re = re.compile(r''' + ^[ ]{0,%d}\[\^(.+)\]: # id = \1 + [ \t]* + ( # footnote text = \2 + # First line need not start with the spaces. + (?:\s*.*\n+) + (?: + (?:[ ]{%d} | \t) # Subsequent lines must be indented. + .*\n+ + )* + ) + # Lookahead for non-space at line-start, or end of doc. + (?:(?=^[ ]{0,%d}\S)|\Z) + ''' % (less_than_tab, self.tab_width, self.tab_width), + re.X | re.M) + return footnote_def_re.sub(self._extract_footnote_def_sub, text) + + + _hr_res = [ + re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M), + ] + + def _run_block_gamut(self, text): + # These are all the transformations that form block-level + # tags like paragraphs, headers, and list items. + + text = self._do_headers(text) + + # Do Horizontal Rules: + hr = "\n tags around block-level tags. + text = self._hash_html_blocks(text) + + text = self._form_paragraphs(text) + + return text + + def _pyshell_block_sub(self, match): + lines = match.group(0).splitlines(0) + _dedentlines(lines) + indent = ' ' * self.tab_width + s = ('\n' # separate from possible cuddled paragraph + + indent + ('\n'+indent).join(lines) + + '\n\n') + return s + + def _prepare_pyshell_blocks(self, text): + """Ensure that Python interactive shell sessions are put in + code blocks -- even if not properly indented. + """ + if ">>>" not in text: + return text + + less_than_tab = self.tab_width - 1 + _pyshell_block_re = re.compile(r""" + ^([ ]{0,%d})>>>[ ].*\n # first line + ^(\1.*\S+.*\n)* # any number of subsequent lines + ^\n # ends with a blank line + """ % less_than_tab, re.M | re.X) + + return _pyshell_block_re.sub(self._pyshell_block_sub, text) + + def _run_span_gamut(self, text): + # These are all the transformations that occur *within* block-level + # tags like paragraphs, headers, and list items. + + text = self._do_code_spans(text) + + text = self._escape_special_chars(text) + + # Process anchor and image tags. + text = self._do_links(text) + + # Make links out of things like `` + # Must come after _do_links(), because you can use < and > + # delimiters in inline links like [this](). + text = self._do_auto_links(text) + + if "link-patterns" in self.extras: + text = self._do_link_patterns(text) + + text = self._encode_amps_and_angles(text) + + text = self._do_italics_and_bold(text) + + # Do hard breaks: + text = re.sub(r" {2,}\n", " + | + # auto-link (e.g., ) + <\w+[^>]*> + | + # comment + | + <\?.*?\?> # processing instruction + ) + """, re.X) + + def _escape_special_chars(self, text): + # Python markdown note: the HTML tokenization here differs from + # that in Markdown.pl, hence the behaviour for subtle cases can + # differ (I believe the tokenizer here does a better job because + # it isn't susceptible to unmatched '<' and '>' in HTML tags). + # Note, however, that '>' is not allowed in an auto-link URL + # here. + escaped = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup: + # Within tags/HTML-comments/auto-links, encode * and _ + # so they don't conflict with their use in Markdown for + # italics and strong. We're replacing each such + # character with its corresponding MD5 checksum value; + # this is likely overkill, but it should prevent us from + # colliding with the escape values by accident. + escaped.append(token.replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + else: + escaped.append(self._encode_backslash_escapes(token)) + is_html_markup = not is_html_markup + return ''.join(escaped) + + def _hash_html_spans(self, text): + # Used for safe_mode. + + def _is_auto_link(s): + if ':' in s and self._auto_link_re.match(s): + return True + elif '@' in s and self._auto_email_link_re.match(s): + return True + return False + + tokens = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup and not _is_auto_link(token): + sanitized = self._sanitize_html(token) + key = _hash_text(sanitized) + self.html_spans[key] = sanitized + tokens.append(key) + else: + tokens.append(token) + is_html_markup = not is_html_markup + return ''.join(tokens) + + def _unhash_html_spans(self, text): + for key, sanitized in self.html_spans.items(): + text = text.replace(key, sanitized) + return text + + def _sanitize_html(self, s): + if self.safe_mode == "replace": + return self.html_removed_text + elif self.safe_mode == "escape": + replacements = [ + ('&', '&'), + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + s = s.replace(before, after) + return s + else: + raise MarkdownError("invalid value for 'safe_mode': %r (must be " + "'escape' or 'replace')" % self.safe_mode) + + _tail_of_inline_link_re = re.compile(r''' + # Match tail of: [text](/url/) or [text](/url/ "title") + \( # literal paren + [ \t]* + (?P # \1 + <.*?> + | + .*? + ) + [ \t]* + ( # \2 + (['"]) # quote char = \3 + (?P.*?) + \3 # matching quote + )? # title is optional + \) + ''', re.X | re.S) + _tail_of_reference_link_re = re.compile(r''' + # Match tail of: [text][id] + [ ]? # one optional space + (?:\n[ ]*)? # one optional newline followed by spaces + \[ + (?P<id>.*?) + \] + ''', re.X | re.S) + + def _do_links(self, text): + """Turn Markdown link shortcuts into XHTML <a> and <img> tags. + + This is a combination of Markdown.pl's _DoAnchors() and + _DoImages(). They are done together because that simplified the + approach. It was necessary to use a different approach than + Markdown.pl because of the lack of atomic matching support in + Python's regex engine used in $g_nested_brackets. + """ + MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 + + # `anchor_allowed_pos` is used to support img links inside + # anchors, but not anchors inside anchors. An anchor's start + # pos must be `>= anchor_allowed_pos`. + anchor_allowed_pos = 0 + + curr_pos = 0 + while True: # Handle the next link. + # The next '[' is the start of: + # - an inline anchor: [text](url "title") + # - a reference anchor: [text][id] + # - an inline img: ![text](url "title") + # - a reference img: ![text][id] + # - a footnote ref: [^id] + # (Only if 'footnotes' extra enabled) + # - a footnote defn: [^id]: ... + # (Only if 'footnotes' extra enabled) These have already + # been stripped in _strip_footnote_definitions() so no + # need to watch for them. + # - a link definition: [id]: url "title" + # These have already been stripped in + # _strip_link_definitions() so no need to watch for them. + # - not markup: [...anything else... + try: + start_idx = text.index('[', curr_pos) + except ValueError: + break + text_length = len(text) + + # Find the matching closing ']'. + # Markdown.pl allows *matching* brackets in link text so we + # will here too. Markdown.pl *doesn't* currently allow + # matching brackets in img alt text -- we'll differ in that + # regard. + bracket_depth = 0 + for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, + text_length)): + ch = text[p] + if ch == ']': + bracket_depth -= 1 + if bracket_depth < 0: + break + elif ch == '[': + bracket_depth += 1 + else: + # Closing bracket not found within sentinel length. + # This isn't markup. + curr_pos = start_idx + 1 + continue + link_text = text[start_idx+1:p] + + # Possibly a footnote ref? + if "footnotes" in self.extras and link_text.startswith("^"): + normed_id = re.sub(r'\W', '-', link_text[1:]) + if normed_id in self.footnotes: + self.footnote_ids.append(normed_id) + result = '<sup class="footnote-ref" id="fnref-%s">' \ + '<a href="#fn-%s">%s</a></sup>' \ + % (normed_id, normed_id, len(self.footnote_ids)) + text = text[:start_idx] + result + text[p+1:] + else: + # This id isn't defined, leave the markup alone. + curr_pos = p+1 + continue + + # Now determine what this is by the remainder. + p += 1 + if p == text_length: + return text + + # Inline anchor or img? + if text[p] == '(': # attempt at perf improvement + match = self._tail_of_inline_link_re.match(text, p) + if match: + # Handle an inline anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + + url, title = match.group("url"), match.group("title") + if url and url[0] == '<': + url = url[1:-1] # '<url>' -> 'url' + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + if title: + title_str = ' title="%s"' \ + % title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) \ + .replace('"', '"') + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url, link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + continue + + # Reference anchor or img? + else: + match = self._tail_of_reference_link_re.match(text, p) + if match: + # Handle a reference-style anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + link_id = match.group("id").lower() + if not link_id: + link_id = link_text.lower() # for links like [this][] + if link_id in self.urls: + url = self.urls[link_id] + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title = self.titles.get(link_id) + if title: + title = title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title_str = ' title="%s"' % title + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url, link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result = '<a href="%s"%s>%s</a>' \ + % (url, title_str, link_text) + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + else: + # This id isn't defined, leave the markup alone. + curr_pos = match.end() + continue + + # Otherwise, it isn't markup. + curr_pos = start_idx + 1 + + return text + + + _setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M) + def _setext_h_sub(self, match): + n = {"=": 1, "-": 2}[match.group(2)[0]] + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(1)), n) + + _atx_h_re = re.compile(r''' + ^(\#{1,6}) # \1 = string of #'s + [ \t]* + (.+?) # \2 = Header text + [ \t]* + (?<!\\) # ensure not an escaped trailing '#' + \#* # optional closing #'s (not counted) + \n+ + ''', re.X | re.M) + def _atx_h_sub(self, match): + n = len(match.group(1)) + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(2)), n) + + def _do_headers(self, text): + # Setext-style headers: + # Header 1 + # ======== + # + # Header 2 + # -------- + text = self._setext_h_re.sub(self._setext_h_sub, text) + + # atx-style headers: + # # Header 1 + # ## Header 2 + # ## Header 2 with closing hashes ## + # ... + # ###### Header 6 + text = self._atx_h_re.sub(self._atx_h_sub, text) + + return text + + + _marker_ul_chars = '*+-' + _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars + _marker_ul = '(?:[%s])' % _marker_ul_chars + _marker_ol = r'(?:\d+\.)' + + def _list_sub(self, match): + lst = match.group(1) + lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" + result = self._process_list_items(lst) + if self.list_level: + return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) + else: + return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) + + def _do_lists(self, text): + # Form HTML ordered (numbered) and unordered (bulleted) lists. + + for marker_pat in (self._marker_ul, self._marker_ol): + # Re-usable pattern to match any entire ul or ol list: + less_than_tab = self.tab_width - 1 + whole_list = r''' + ( # \1 = whole list + ( # \2 + [ ]{0,%d} + (%s) # \3 = first list item marker + [ \t]+ + ) + (?:.+?) + ( # \4 + \Z + | + \n{2,} + (?=\S) + (?! # Negative lookahead for another list item marker + [ \t]* + %s[ \t]+ + ) + ) + ) + ''' % (less_than_tab, marker_pat, marker_pat) + + # We use a different prefix before nested lists than top-level lists. + # See extended comment in _process_list_items(). + # + # Note: There's a bit of duplication here. My original implementation + # created a scalar regex pattern as the conditional result of the test on + # $g_list_level, and then only ran the $text =~ s{...}{...}egmx + # substitution once, using the scalar as the pattern. This worked, + # everywhere except when running under MT on my hosting account at Pair + # Networks. There, this caused all rebuilds to be killed by the reaper (or + # perhaps they crashed, but that seems incredibly unlikely given that the + # same script on the same server ran fine *except* under MT. I've spent + # more time trying to figure out why this is happening than I'd like to + # admit. My only guess, backed up by the fact that this workaround works, + # is that Perl optimizes the substition when it can figure out that the + # pattern will never change, and when this optimization isn't on, we run + # afoul of the reaper. Thus, the slightly redundant code to that uses two + # static s/// patterns rather than one conditional pattern. + + if self.list_level: + sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S) + text = sub_list_re.sub(self._list_sub, text) + else: + list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, + re.X | re.M | re.S) + text = list_re.sub(self._list_sub, text) + + return text + + _list_item_re = re.compile(r''' + (\n)? # leading line = \1 + (^[ \t]*) # leading whitespace = \2 + (%s) [ \t]+ # list marker = \3 + ((?:.+?) # list item text = \4 + (\n{1,2})) # eols = \5 + (?= \n* (\Z | \2 (%s) [ \t]+)) + ''' % (_marker_any, _marker_any), + re.M | re.X | re.S) + + _last_li_endswith_two_eols = False + def _list_item_sub(self, match): + item = match.group(4) + leading_line = match.group(1) + leading_space = match.group(2) + if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: + item = self._run_block_gamut(self._outdent(item)) + else: + # Recursion for sub-lists: + item = self._do_lists(self._outdent(item)) + if item.endswith('\n'): + item = item[:-1] + item = self._run_span_gamut(item) + self._last_li_endswith_two_eols = (len(match.group(5)) == 2) + return "<li>%s</li>\n" % item + + def _process_list_items(self, list_str): + # Process the contents of a single ordered or unordered list, + # splitting it into individual list items. + + # The $g_list_level global keeps track of when we're inside a list. + # Each time we enter a list, we increment it; when we leave a list, + # we decrement. If it's zero, we're not in a list anymore. + # + # We do this because when we're not inside a list, we want to treat + # something like this: + # + # I recommend upgrading to version + # 8. Oops, now this line is treated + # as a sub-list. + # + # As a single paragraph, despite the fact that the second line starts + # with a digit-period-space sequence. + # + # Whereas when we're inside a list (or sub-list), that line will be + # treated as the start of a sub-list. What a kludge, huh? This is + # an aspect of Markdown's syntax that's hard to parse perfectly + # without resorting to mind-reading. Perhaps the solution is to + # change the syntax rules such that sub-lists must start with a + # starting cardinal number; e.g. "1." or "a.". + self.list_level += 1 + self._last_li_endswith_two_eols = False + list_str = list_str.rstrip('\n') + '\n' + list_str = self._list_item_re.sub(self._list_item_sub, list_str) + self.list_level -= 1 + return list_str + + def _get_pygments_lexer(self, lexer_name): + try: + from pygments import lexers, util + except ImportError: + return None + try: + return lexers.get_lexer_by_name(lexer_name) + except util.ClassNotFound: + return None + + def _color_with_pygments(self, codeblock, lexer, **formatter_opts): + import pygments + import pygments.formatters + + class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): + def _wrap_code(self, inner): + """A function for use in a Pygments Formatter which + wraps in <code> tags. + """ + yield 0, "<code>" + for tup in inner: + yield tup + yield 0, "</code>" + + def wrap(self, source, outfile): + """Return the source with a code, pre, and div.""" + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + + formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts) + return pygments.highlight(codeblock, lexer, formatter) + + def _code_block_sub(self, match): + codeblock = match.group(1) + codeblock = self._outdent(codeblock) + codeblock = self._detab(codeblock) + codeblock = codeblock.lstrip('\n') # trim leading newlines + codeblock = codeblock.rstrip() # trim trailing whitespace + + if "code-color" in self.extras and codeblock.startswith(":::"): + lexer_name, rest = codeblock.split('\n', 1) + lexer_name = lexer_name[3:].strip() + lexer = self._get_pygments_lexer(lexer_name) + codeblock = rest.lstrip("\n") # Remove lexer declaration line. + if lexer: + formatter_opts = self.extras['code-color'] or {} + colored = self._color_with_pygments(codeblock, lexer, + **formatter_opts) + return "\n\n%s\n\n" % colored + + codeblock = self._encode_code(codeblock) + return "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock + + def _do_code_blocks(self, text): + """Process Markdown `<pre><code>` blocks.""" + code_block_re = re.compile(r''' + (?:\n\n|\A) + ( # $1 = the code block -- one or more lines, starting with a space/tab + (?: + (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces + .*\n+ + )+ + ) + ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc + ''' % (self.tab_width, self.tab_width), + re.M | re.X) + + return code_block_re.sub(self._code_block_sub, text) + + + # Rules for a code span: + # - backslash escapes are not interpreted in a code span + # - to include one or or a run of more backticks the delimiters must + # be a longer run of backticks + # - cannot start or end a code span with a backtick; pad with a + # space and that space will be removed in the emitted HTML + # See `test/tm-cases/escapes.text` for a number of edge-case + # examples. + _code_span_re = re.compile(r''' + (?<!\\) + (`+) # \1 = Opening run of ` + (?!`) # See Note A test/tm-cases/escapes.text + (.+?) # \2 = The code block + (?<!`) + \1 # Matching closer + (?!`) + ''', re.X | re.S) + + def _code_span_sub(self, match): + c = match.group(2).strip(" \t") + c = self._encode_code(c) + return "<code>%s</code>" % c + + def _do_code_spans(self, text): + # * Backtick quotes are used for <code></code> spans. + # + # * You can use multiple backticks as the delimiters if you want to + # include literal backticks in the code span. So, this input: + # + # Just type ``foo `bar` baz`` at the prompt. + # + # Will translate to: + # + # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> + # + # There's no arbitrary limit to the number of backticks you + # can use as delimters. If you need three consecutive backticks + # in your code, use four for delimiters, etc. + # + # * You can use spaces to get literal backticks at the edges: + # + # ... type `` `bar` `` ... + # + # Turns to: + # + # ... type <code>`bar`</code> ... + return self._code_span_re.sub(self._code_span_sub, text) + + def _encode_code(self, text): + """Encode/escape certain characters inside Markdown code runs. + The point is that in code, these characters are literals, + and lose their special Markdown meanings. + """ + replacements = [ + # Encode all ampersands; HTML entities are not + # entities within a Markdown code span. + ('&', '&'), + # Do the angle bracket song and dance: + ('<', '<'), + ('>', '>'), + # Now, escape characters that are magic in Markdown: + ('*', g_escape_table['*']), + ('_', g_escape_table['_']), + ('{', g_escape_table['{']), + ('}', g_escape_table['}']), + ('[', g_escape_table['[']), + (']', g_escape_table[']']), + ('\\', g_escape_table['\\']), + ] + for before, after in replacements: + text = text.replace(before, after) + return text + + _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S) + _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S) + _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) + _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) + def _do_italics_and_bold(self, text): + # <strong> must go first: + if "code-friendly" in self.extras: + text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) + text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) + else: + text = self._strong_re.sub(r"<strong>\2</strong>", text) + text = self._em_re.sub(r"<em>\2</em>", text) + return text + + + _block_quote_re = re.compile(r''' + ( # Wrap whole match in \1 + ( + ^[ \t]*>[ \t]? # '>' at the start of a line + .+\n # rest of the first line + (.+\n)* # subsequent consecutive lines + \n* # blanks + )+ + ) + ''', re.M | re.X) + _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M); + + _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) + def _dedent_two_spaces_sub(self, match): + return re.sub(r'(?m)^ ', '', match.group(1)) + + def _block_quote_sub(self, match): + bq = match.group(1) + bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting + bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines + bq = self._run_block_gamut(bq) # recurse + + bq = re.sub('(?m)^', ' ', bq) + # These leading spaces screw with <pre> content, so we need to fix that: + bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) + + return "<blockquote>\n%s\n</blockquote>\n\n" % bq + + def _do_block_quotes(self, text): + if '>' not in text: + return text + return self._block_quote_re.sub(self._block_quote_sub, text) + + def _form_paragraphs(self, text): + # Strip leading and trailing lines: + text = text.strip('\n') + + # Wrap <p> tags. + grafs = re.split(r"\n{2,}", text) + for i, graf in enumerate(grafs): + if graf in self.html_blocks: + # Unhashify HTML blocks + grafs[i] = self.html_blocks[graf] + else: + # Wrap <p> tags. + graf = self._run_span_gamut(graf) + grafs[i] = "<p>" + graf.lstrip(" \t") + "</p>" + + return "\n\n".join(grafs) + + def _add_footnotes(self, text): + if self.footnotes: + footer = [ + '<div class="footnotes">', + '<hr' + self.empty_element_suffix, + '<ol>', + ] + for i, id in enumerate(self.footnote_ids): + if i != 0: + footer.append('') + footer.append('<li id="fn-%s">' % id) + footer.append(self._run_block_gamut(self.footnotes[id])) + backlink = ('<a href="#fnref-%s" ' + 'class="footnoteBackLink" ' + 'title="Jump back to footnote %d in the text.">' + '↩</a>' % (id, i+1)) + if footer[-1].endswith("</p>"): + footer[-1] = footer[-1][:-len("</p>")] \ + + ' ' + backlink + "</p>" + else: + footer.append("\n<p>%s</p>" % backlink) + footer.append('</li>') + footer.append('</ol>') + footer.append('</div>') + return text + '\n\n' + '\n'.join(footer) + else: + return text + + # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: + # http://bumppo.net/projects/amputator/ + _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') + _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) + _naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I) + + def _encode_amps_and_angles(self, text): + # Smart processing for ampersands and angle brackets that need + # to be encoded. + text = self._ampersand_re.sub('&', text) + + # Encode naked <'s + text = self._naked_lt_re.sub('<', text) + + # Encode naked >'s + # Note: Other markdown implementations (e.g. Markdown.pl, PHP + # Markdown) don't do this. + text = self._naked_gt_re.sub('>', text) + return text + + def _encode_backslash_escapes(self, text): + for ch, escape in g_escape_table.items(): + text = text.replace("\\"+ch, escape) + return text + + _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) + def _auto_link_sub(self, match): + g1 = match.group(1) + return '<a href="%s">%s</a>' % (g1, g1) + + _auto_email_link_re = re.compile(r""" + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-\w]+(\.[-\w]+)*\.[a-z]+ + ) + > + """, re.I | re.X | re.U) + def _auto_email_link_sub(self, match): + return self._encode_email_address( + self._unescape_special_chars(match.group(1))) + + def _do_auto_links(self, text): + text = self._auto_link_re.sub(self._auto_link_sub, text) + text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) + return text + + def _encode_email_address(self, addr): + # Input: an email address, e.g. "foo@example.com" + # + # Output: the email address as a mailto link, with each character + # of the address encoded as either a decimal or hex entity, in + # the hopes of foiling most address harvesting spam bots. E.g.: + # + # <a href="mailto:foo@e + # xample.com">foo + # @example.com</a> + # + # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk + # mailing list: <http://tinyurl.com/yu7ue> + chars = [_xml_encode_email_char_at_random(ch) + for ch in "mailto:" + addr] + # Strip the mailto: from the visible part. + addr = '<a href="%s">%s</a>' \ + % (''.join(chars), ''.join(chars[7:])) + return addr + + def _do_link_patterns(self, text): + """Caveat emptor: there isn't much guarding against link + patterns being formed inside other standard Markdown links, e.g. + inside a [link def][like this]. + + Dev Notes: *Could* consider prefixing regexes with a negative + lookbehind assertion to attempt to guard against this. + """ + link_from_hash = {} + for regex, repl in self.link_patterns: + replacements = [] + for match in regex.finditer(text): + if hasattr(repl, "__call__"): + href = repl(match) + else: + href = match.expand(repl) + replacements.append((match.span(), href)) + for (start, end), href in reversed(replacements): + escaped_href = ( + href.replace('"', '"') # b/c of attr quote + # To avoid markdown <em> and <strong>: + .replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) + hash = md5(link).hexdigest() + link_from_hash[hash] = link + text = text[:start] + hash + text[end:] + for hash, link in link_from_hash.items(): + text = text.replace(hash, link) + return text + + def _unescape_special_chars(self, text): + # Swap back in all the special characters we've hidden. + for ch, hash in g_escape_table.items(): + text = text.replace(hash, ch) + return text + + def _outdent(self, text): + # Remove one level of line-leading tabs or spaces + return self._outdent_re.sub('', text) + + +class MarkdownWithExtras(Markdown): + """A markdowner class that enables most extras: + + - footnotes + - code-color (only has effect if 'pygments' Python module on path) + + These are not included: + - pyshell (specific to Python-related documenting) + - code-friendly (because it *disables* part of the syntax) + - link-patterns (because you need to specify some actual + link-patterns anyway) + """ + extras = ["footnotes", "code-color"] + + +#---- internal support functions + +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 +def _curry(*args, **kwargs): + function, args = args[0], args[1:] + def result(*rest, **kwrest): + combined = kwargs.copy() + combined.update(kwrest) + return function(*args + rest, **combined) + return result + +# Recipe: regex_from_encoded_pattern (1.0) +def _regex_from_encoded_pattern(s): + """'foo' -> re.compile(re.escape('foo')) + '/foo/' -> re.compile('foo') + '/foo/i' -> re.compile('foo', re.I) + """ + if s.startswith('/') and s.rfind('/') != 0: + # Parse it: /PATTERN/FLAGS + idx = s.rfind('/') + pattern, flags_str = s[1:idx], s[idx+1:] + flag_from_char = { + "i": re.IGNORECASE, + "l": re.LOCALE, + "s": re.DOTALL, + "m": re.MULTILINE, + "u": re.UNICODE, + } + flags = 0 + for char in flags_str: + try: + flags |= flag_from_char[char] + except KeyError: + raise ValueError("unsupported regex flag: '%s' in '%s' " + "(must be one of '%s')" + % (char, s, ''.join(flag_from_char.keys()))) + return re.compile(s[1:idx], flags) + else: # not an encoded regex + return re.compile(re.escape(s)) + +# Recipe: dedent (0.1.2) +def _dedentlines(lines, tabsize=8, skip_first_line=False): + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line) + indents = [] + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: print "dedent: indent=%d: %r" % (indent, line) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: print "dedent: margin=%r" % margin + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print "dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + else: + if removed: + lines[i] = lines[i][removed:] + return lines + +def _dedent(text, tabsize=8, skip_first_line=False): + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(1) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +class _memoized(object): + """Decorator that caches a function's return value each time it is called. + If called later with the same arguments, the cached value is returned, and + not re-evaluated. + + http://wiki.python.org/moin/PythonDecoratorLibrary + """ + def __init__(self, func): + self.func = func + self.cache = {} + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + self.cache[args] = value = self.func(*args) + return value + except TypeError: + # uncachable -- for instance, passing a list as an argument. + # Better to not cache than to blow up entirely. + return self.func(*args) + def __repr__(self): + """Return the function's docstring.""" + return self.func.__doc__ + + +def _xml_oneliner_re_from_tab_width(tab_width): + """Standalone XML processing instruction regex.""" + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in $1 + [ ]{0,%d} + (?: + <\?\w+\b\s+.*?\?> # XML processing instruction + | + <\w+:\w+\b\s+.*?/> # namespaced single tag + ) + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) + +def _hr_tag_re_from_tab_width(tab_width): + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in \1 + [ ]{0,%d} + <(hr) # start tag = \2 + \b # word break + ([^<>])*? # + /?> # the matching end tag + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) + + +def _xml_encode_email_char_at_random(ch): + r = random() + # Roughly 10% raw, 45% hex, 45% dec. + # '@' *must* be encoded. I [John Gruber] insist. + # Issue 26: '_' must be encoded. + if r > 0.9 and ch not in "@_": + return ch + elif r < 0.45: + # The [1:] is to drop leading '0': 0x63 -> x63 + return '&#%s;' % hex(ord(ch))[1:] + else: + return '&#%s;' % ord(ch) + +def _hash_text(text): + return 'md5:'+md5(text.encode("utf-8")).hexdigest() + + +#---- mainline + +class _NoReflowFormatter(optparse.IndentedHelpFormatter): + """An optparse formatter that does NOT reflow the description.""" + def format_description(self, description): + return description or "" + +def _test(): + import doctest + doctest.testmod() + +def main(argv=None): + if argv is None: + argv = sys.argv + if not logging.root.handlers: + logging.basicConfig() + + usage = "usage: %prog [PATHS...]" + version = "%prog "+__version__ + parser = optparse.OptionParser(prog="markdown2", usage=usage, + version=version, description=cmdln_desc, + formatter=_NoReflowFormatter()) + parser.add_option("-v", "--verbose", dest="log_level", + action="store_const", const=logging.DEBUG, + help="more verbose output") + parser.add_option("--encoding", + help="specify encoding of text content") + parser.add_option("--html4tags", action="store_true", default=False, + help="use HTML 4 style for empty element tags") + parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", + help="sanitize literal HTML: 'escape' escapes " + "HTML meta chars, 'replace' replaces with an " + "[HTML_REMOVED] note") + parser.add_option("-x", "--extras", action="append", + help="Turn on specific extra features (not part of " + "the core Markdown spec). Supported values: " + "'code-friendly' disables _/__ for emphasis; " + "'code-color' adds code-block syntax coloring; " + "'link-patterns' adds auto-linking based on patterns; " + "'footnotes' adds the footnotes syntax;" + "'xml' passes one-liner processing instructions and namespaced XML tags;" + "'pyshell' to put unindented Python interactive shell sessions in a <code> block.") + parser.add_option("--use-file-vars", + help="Look for and use Emacs-style 'markdown-extras' " + "file var to turn on extras. See " + "<http://code.google.com/p/python-markdown2/wiki/Extras>.") + parser.add_option("--link-patterns-file", + help="path to a link pattern file") + parser.add_option("--self-test", action="store_true", + help="run internal self-tests (some doctests)") + parser.add_option("--compare", action="store_true", + help="run against Markdown.pl as well (for testing)") + parser.set_defaults(log_level=logging.INFO, compare=False, + encoding="utf-8", safe_mode=None, use_file_vars=False) + opts, paths = parser.parse_args() + log.setLevel(opts.log_level) + + if opts.self_test: + return _test() + + if opts.extras: + extras = {} + for s in opts.extras: + splitter = re.compile("[,;: ]+") + for e in splitter.split(s): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + extras[ename] = earg + else: + extras = None + + if opts.link_patterns_file: + link_patterns = [] + f = open(opts.link_patterns_file) + try: + for i, line in enumerate(f.readlines()): + if not line.strip(): continue + if line.lstrip().startswith("#"): continue + try: + pat, href = line.rstrip().rsplit(None, 1) + except ValueError: + raise MarkdownError("%s:%d: invalid link pattern line: %r" + % (opts.link_patterns_file, i+1, line)) + link_patterns.append( + (_regex_from_encoded_pattern(pat), href)) + finally: + f.close() + else: + link_patterns = None + + from os.path import join, dirname, abspath, exists + markdown_pl = join(dirname(dirname(abspath(__file__))), "test", + "Markdown.pl") + for path in paths: + if opts.compare: + print "==== Markdown.pl ====" + perl_cmd = 'perl %s "%s"' % (markdown_pl, path) + o = os.popen(perl_cmd) + perl_html = o.read() + o.close() + sys.stdout.write(perl_html) + print "==== markdown2.py ====" + html = markdown_path(path, encoding=opts.encoding, + html4tags=opts.html4tags, + safe_mode=opts.safe_mode, + extras=extras, link_patterns=link_patterns, + use_file_vars=opts.use_file_vars) + sys.stdout.write( + html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) + if opts.compare: + test_dir = join(dirname(dirname(abspath(__file__))), "test") + if exists(join(test_dir, "test_markdown2.py")): + sys.path.insert(0, test_dir) + from test_markdown2 import norm_html_from_html + norm_html = norm_html_from_html(html) + norm_perl_html = norm_html_from_html(perl_html) + else: + norm_html = html + norm_perl_html = perl_html + print "==== match? %r ====" % (norm_perl_html == norm_html) + + +if __name__ == "__main__": + sys.exit( main(sys.argv) ) + diff --git a/vendor/tornado/demos/blog/schema.sql b/vendor/tornado/demos/blog/schema.sql new file mode 100644 index 000000000000..86bff9a8ad30 --- /dev/null +++ b/vendor/tornado/demos/blog/schema.sql @@ -0,0 +1,44 @@ +-- Copyright 2009 FriendFeed +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); you may +-- not use this file except in compliance with the License. You may obtain +-- a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +-- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +-- License for the specific language governing permissions and limitations +-- under the License. + +-- To create the database: +-- CREATE DATABASE blog; +-- GRANT ALL PRIVILEGES ON blog.* TO 'blog'@'localhost' IDENTIFIED BY 'blog'; +-- +-- To reload the tables: +-- mysql --user=blog --password=blog --database=blog < schema.sql + +SET SESSION storage_engine = "InnoDB"; +SET SESSION time_zone = "+0:00"; +ALTER DATABASE CHARACTER SET "utf8"; + +DROP TABLE IF EXISTS entries; +CREATE TABLE entries ( + id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + author_id INT NOT NULL REFERENCES authors(id), + slug VARCHAR(100) NOT NULL UNIQUE, + title VARCHAR(512) NOT NULL, + markdown MEDIUMTEXT NOT NULL, + html MEDIUMTEXT NOT NULL, + published DATETIME NOT NULL, + updated TIMESTAMP NOT NULL, + KEY (published) +); + +DROP TABLE IF EXISTS authors; +CREATE TABLE authors ( + id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + email VARCHAR(100) NOT NULL UNIQUE, + name VARCHAR(100) NOT NULL +); diff --git a/vendor/tornado/demos/blog/static/blog.css b/vendor/tornado/demos/blog/static/blog.css new file mode 100644 index 000000000000..8902ec1f22e5 --- /dev/null +++ b/vendor/tornado/demos/blog/static/blog.css @@ -0,0 +1,153 @@ +/* + * Copyright 2009 Facebook + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +body { + background: white; + color: black; + margin: 15px; + margin-top: 0; +} + +body, +input, +textarea { + font-family: Georgia, serif; + font-size: 12pt; +} + +table { + border-collapse: collapse; + border: 0; +} + +td { + border: 0; + padding: 0; +} + +h1, +h2, +h3, +h4 { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + margin: 0; +} + +h1 { + font-size: 20pt; +} + +pre, +code { + font-family: monospace; + color: #060; +} + +pre { + margin-left: 1em; + padding-left: 1em; + border-left: 1px solid silver; + line-height: 14pt; +} + +a, +a code { + color: #00c; +} + +#body { + max-width: 800px; + margin: auto; +} + +#header { + background-color: #3b5998; + padding: 5px; + padding-left: 10px; + padding-right: 10px; + margin-bottom: 1em; +} + +#header, +#header a { + color: white; +} + +#header h1 a { + text-decoration: none; +} + +#footer, +#content { + margin-left: 10px; + margin-right: 10px; +} + +#footer { + margin-top: 3em; +} + +.entry h1 a { + color: black; + text-decoration: none; +} + +.entry { + margin-bottom: 2em; +} + +.entry .date { + margin-top: 3px; +} + +.entry p { + margin: 0; + margin-bottom: 1em; +} + +.entry .body { + margin-top: 1em; + line-height: 16pt; +} + +.compose td { + vertical-align: middle; + padding-bottom: 5px; +} + +.compose td.field { + padding-right: 10px; +} + +.compose .title, +.compose .submit { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + font-weight: bold; +} + +.compose .title { + font-size: 20pt; +} + +.compose .title, +.compose .markdown { + width: 100%; +} + +.compose .markdown { + height: 500px; + line-height: 16pt; +} diff --git a/vendor/tornado/demos/blog/templates/archive.html b/vendor/tornado/demos/blog/templates/archive.html new file mode 100644 index 000000000000..dcca9511a489 --- /dev/null +++ b/vendor/tornado/demos/blog/templates/archive.html @@ -0,0 +1,31 @@ +{% extends "base.html" %} + +{% block head %} + <style type="text/css"> + ul.archive { + list-style-type: none; + margin: 0; + padding: 0; + } + + ul.archive li { + margin-bottom: 1em; + } + + ul.archive .title { + font-family: "Helvetica Nue", Helvetica, Arial, sans-serif; + font-size: 14pt; + } + </style> +{% end %} + +{% block body %} + <ul class="archive"> + {% for entry in entries %} + <li> + <div class="title"><a href="/entry/{{ entry.slug }}">{{ escape(entry.title) }}</a></div> + <div class="date">{{ locale.format_date(entry.published, full_format=True, shorter=True) }}</div> + </li> + {% end %} + </ul> +{% end %} diff --git a/vendor/tornado/demos/blog/templates/base.html b/vendor/tornado/demos/blog/templates/base.html new file mode 100644 index 000000000000..038c5b3ffffc --- /dev/null +++ b/vendor/tornado/demos/blog/templates/base.html @@ -0,0 +1,27 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml"> + <head> + <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> + <title>{{ escape(handler.settings["blog_title"]) }} + + + {% block head %}{% end %} + + +

                                + +
                                {% block body %}{% end %}
                                +
                                + {% block bottom %}{% end %} + + diff --git a/vendor/tornado/demos/blog/templates/compose.html b/vendor/tornado/demos/blog/templates/compose.html new file mode 100644 index 000000000000..bc054b334945 --- /dev/null +++ b/vendor/tornado/demos/blog/templates/compose.html @@ -0,0 +1,42 @@ +{% extends "base.html" %} + +{% block body %} +
                                +
                                +
                                + + {% if entry %} + + {% end %} + {{ xsrf_form_html() }} + +{% end %} + +{% block bottom %} + + +{% end %} + diff --git a/vendor/tornado/demos/blog/templates/entry.html b/vendor/tornado/demos/blog/templates/entry.html new file mode 100644 index 000000000000..43c835deadad --- /dev/null +++ b/vendor/tornado/demos/blog/templates/entry.html @@ -0,0 +1,5 @@ +{% extends "base.html" %} + +{% block body %} + {{ modules.Entry(entry) }} +{% end %} diff --git a/vendor/tornado/demos/blog/templates/feed.xml b/vendor/tornado/demos/blog/templates/feed.xml new file mode 100644 index 000000000000..c6c368656c70 --- /dev/null +++ b/vendor/tornado/demos/blog/templates/feed.xml @@ -0,0 +1,26 @@ + + + {% set date_format = "%Y-%m-%dT%H:%M:%SZ" %} + {{ escape(handler.settings["blog_title"]) }} + {% if len(entries) > 0 %} + {{ max(e.updated for e in entries).strftime(date_format) }} + {% else %} + {{ datetime.datetime.utcnow().strftime(date_format) }} + {% end %} + http://{{ request.host }}/ + + + {{ escape(handler.settings["blog_title"]) }} + {% for entry in entries %} + + http://{{ request.host }}/entry/{{ entry.slug }} + {{ escape(entry.title) }} + + {{ entry.updated.strftime(date_format) }} + {{ entry.published.strftime(date_format) }} + +
                                {{ entry.html }}
                                +
                                +
                                + {% end %} +
                                diff --git a/vendor/tornado/demos/blog/templates/home.html b/vendor/tornado/demos/blog/templates/home.html new file mode 100644 index 000000000000..dd069a97f3f3 --- /dev/null +++ b/vendor/tornado/demos/blog/templates/home.html @@ -0,0 +1,8 @@ +{% extends "base.html" %} + +{% block body %} + {% for entry in entries %} + {{ modules.Entry(entry) }} + {% end %} + +{% end %} diff --git a/vendor/tornado/demos/blog/templates/modules/entry.html b/vendor/tornado/demos/blog/templates/modules/entry.html new file mode 100644 index 000000000000..27ea0d76c2ff --- /dev/null +++ b/vendor/tornado/demos/blog/templates/modules/entry.html @@ -0,0 +1,8 @@ +
                                +

                                {{ escape(entry.title) }}

                                +
                                {{ locale.format_date(entry.published, full_format=True, shorter=True) }}
                                +
                                {{ entry.html }}
                                + {% if current_user %} + + {% end %} +
                                diff --git a/vendor/tornado/demos/chat/chatdemo.py b/vendor/tornado/demos/chat/chatdemo.py new file mode 100755 index 000000000000..7086592ec424 --- /dev/null +++ b/vendor/tornado/demos/chat/chatdemo.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import tornado.auth +import tornado.escape +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web +import os.path +import uuid + +from tornado.options import define, options + +define("port", default=8888, help="run on the given port", type=int) + + +class Application(tornado.web.Application): + def __init__(self): + handlers = [ + (r"/", MainHandler), + (r"/auth/login", AuthLoginHandler), + (r"/auth/logout", AuthLogoutHandler), + (r"/a/message/new", MessageNewHandler), + (r"/a/message/updates", MessageUpdatesHandler), + ] + settings = dict( + cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + login_url="/auth/login", + template_path=os.path.join(os.path.dirname(__file__), "templates"), + static_path=os.path.join(os.path.dirname(__file__), "static"), + xsrf_cookies=True, + ) + tornado.web.Application.__init__(self, handlers, **settings) + + +class BaseHandler(tornado.web.RequestHandler): + def get_current_user(self): + user_json = self.get_secure_cookie("user") + if not user_json: return None + return tornado.escape.json_decode(user_json) + + +class MainHandler(BaseHandler): + @tornado.web.authenticated + def get(self): + self.render("index.html", messages=MessageMixin.cache) + + +class MessageMixin(object): + waiters = [] + cache = [] + cache_size = 200 + + def wait_for_messages(self, callback, cursor=None): + cls = MessageMixin + if cursor: + index = 0 + for i in xrange(len(cls.cache)): + index = len(cls.cache) - i - 1 + if cls.cache[index]["id"] == cursor: break + recent = cls.cache[index + 1:] + if recent: + callback(recent) + return + cls.waiters.append(callback) + + def new_messages(self, messages): + cls = MessageMixin + logging.info("Sending new message to %r listeners", len(cls.waiters)) + for callback in cls.waiters: + try: + callback(messages) + except: + logging.error("Error in waiter callback", exc_info=True) + cls.waiters = [] + cls.cache.extend(messages) + if len(cls.cache) > self.cache_size: + cls.cache = cls.cache[-self.cache_size:] + + +class MessageNewHandler(BaseHandler, MessageMixin): + @tornado.web.authenticated + def post(self): + message = { + "id": str(uuid.uuid4()), + "from": self.current_user["first_name"], + "body": self.get_argument("body"), + } + message["html"] = self.render_string("message.html", message=message) + if self.get_argument("next", None): + self.redirect(self.get_argument("next")) + else: + self.write(message) + self.new_messages([message]) + + +class MessageUpdatesHandler(BaseHandler, MessageMixin): + @tornado.web.authenticated + @tornado.web.asynchronous + def post(self): + cursor = self.get_argument("cursor", None) + self.wait_for_messages(self.async_callback(self.on_new_messages), + cursor=cursor) + + def on_new_messages(self, messages): + # Closed client connection + if self.request.connection.stream.closed(): + return + self.finish(dict(messages=messages)) + + +class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect(ax_attrs=["name"]) + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Google auth failed") + self.set_secure_cookie("user", tornado.escape.json_encode(user)) + self.redirect("/") + + +class AuthLogoutHandler(BaseHandler): + def get(self): + self.clear_cookie("user") + self.write("You are now logged out") + + +def main(): + tornado.options.parse_command_line() + http_server = tornado.httpserver.HTTPServer(Application()) + http_server.listen(options.port) + tornado.ioloop.IOLoop.instance().start() + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/demos/chat/static/chat.css b/vendor/tornado/demos/chat/static/chat.css new file mode 100644 index 000000000000..a400c326057f --- /dev/null +++ b/vendor/tornado/demos/chat/static/chat.css @@ -0,0 +1,56 @@ +/* + * Copyright 2009 FriendFeed + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +body { + background: white; + margin: 10px; +} + +body, +input { + font-family: sans-serif; + font-size: 10pt; + color: black; +} + +table { + border-collapse: collapse; + border: 0; +} + +td { + border: 0; + padding: 0; +} + +#body { + position: absolute; + bottom: 10px; + left: 10px; +} + +#input { + margin-top: 0.5em; +} + +#inbox .message { + padding-top: 0.25em; +} + +#nav { + float: right; + z-index: 99; +} diff --git a/vendor/tornado/demos/chat/static/chat.js b/vendor/tornado/demos/chat/static/chat.js new file mode 100644 index 000000000000..0054c710d63d --- /dev/null +++ b/vendor/tornado/demos/chat/static/chat.js @@ -0,0 +1,135 @@ +// Copyright 2009 FriendFeed +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +$(document).ready(function() { + if (!window.console) window.console = {}; + if (!window.console.log) window.console.log = function() {}; + + $("#messageform").live("submit", function() { + newMessage($(this)); + return false; + }); + $("#messageform").live("keypress", function(e) { + if (e.keyCode == 13) { + newMessage($(this)); + return false; + } + }); + $("#message").select(); + updater.poll(); +}); + +function newMessage(form) { + var message = form.formToDict(); + var disabled = form.find("input[type=submit]"); + disabled.disable(); + $.postJSON("/a/message/new", message, function(response) { + updater.showMessage(response); + if (message.id) { + form.parent().remove(); + } else { + form.find("input[type=text]").val("").select(); + disabled.enable(); + } + }); +} + +function getCookie(name) { + var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); + return r ? r[1] : undefined; +} + +jQuery.postJSON = function(url, args, callback) { + args._xsrf = getCookie("_xsrf"); + $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", + success: function(response) { + if (callback) callback(eval("(" + response + ")")); + }, error: function(response) { + console.log("ERROR:", response) + }}); +}; + +jQuery.fn.formToDict = function() { + var fields = this.serializeArray(); + var json = {} + for (var i = 0; i < fields.length; i++) { + json[fields[i].name] = fields[i].value; + } + if (json.next) delete json.next; + return json; +}; + +jQuery.fn.disable = function() { + this.enable(false); + return this; +}; + +jQuery.fn.enable = function(opt_enable) { + if (arguments.length && !opt_enable) { + this.attr("disabled", "disabled"); + } else { + this.removeAttr("disabled"); + } + return this; +}; + +var updater = { + errorSleepTime: 500, + cursor: null, + + poll: function() { + var args = {"_xsrf": getCookie("_xsrf")}; + if (updater.cursor) args.cursor = updater.cursor; + $.ajax({url: "/a/message/updates", type: "POST", dataType: "text", + data: $.param(args), success: updater.onSuccess, + error: updater.onError}); + }, + + onSuccess: function(response) { + try { + updater.newMessages(eval("(" + response + ")")); + } catch (e) { + updater.onError(); + return; + } + updater.errorSleepTime = 500; + window.setTimeout(updater.poll, 0); + }, + + onError: function(response) { + updater.errorSleepTime *= 2; + console.log("Poll error; sleeping for", updater.errorSleepTime, "ms"); + window.setTimeout(updater.poll, updater.errorSleepTime); + }, + + newMessages: function(response) { + if (!response.messages) return; + updater.cursor = response.cursor; + var messages = response.messages; + updater.cursor = messages[messages.length - 1].id; + console.log(messages.length, "new messages, cursor:", updater.cursor); + for (var i = 0; i < messages.length; i++) { + updater.showMessage(messages[i]); + } + }, + + showMessage: function(message) { + var existing = $("#m" + message.id); + if (existing.length > 0) return; + var node = $(message.html); + node.hide(); + $("#inbox").append(node); + node.slideDown(); + } +}; diff --git a/vendor/tornado/demos/chat/templates/index.html b/vendor/tornado/demos/chat/templates/index.html new file mode 100644 index 000000000000..de051d852bb7 --- /dev/null +++ b/vendor/tornado/demos/chat/templates/index.html @@ -0,0 +1,37 @@ + + + + + Tornado Chat Demo + + + + +
                                +
                                + {% for message in messages %} + {% include "message.html" %} + {% end %} +
                                +
                                +
                                +
                                + + + + +
                                + + + {{ xsrf_form_html() }} +
                                + +
                                +
                                + + + + diff --git a/vendor/tornado/demos/chat/templates/message.html b/vendor/tornado/demos/chat/templates/message.html new file mode 100644 index 000000000000..4445cbdfaff7 --- /dev/null +++ b/vendor/tornado/demos/chat/templates/message.html @@ -0,0 +1 @@ +
                                {{ escape(message["from"]) }}: {{ escape(message["body"]) }}
                                diff --git a/vendor/tornado/demos/facebook/README b/vendor/tornado/demos/facebook/README new file mode 100644 index 000000000000..2f0dc28e84f5 --- /dev/null +++ b/vendor/tornado/demos/facebook/README @@ -0,0 +1,8 @@ +Running the Tornado Facebook example +===================================== +To work with the provided Facebook api key, this example must be +accessed at http://localhost:8888/ to match the Connect URL set in the +example application. + +To use any other domain, a new Facebook application must be registered +with a Connect URL set to that domain. diff --git a/vendor/tornado/demos/facebook/facebook.py b/vendor/tornado/demos/facebook/facebook.py new file mode 100755 index 000000000000..0c984ddaa0a0 --- /dev/null +++ b/vendor/tornado/demos/facebook/facebook.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os.path +import tornado.auth +import tornado.escape +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web +import uimodules + +from tornado.options import define, options + +define("port", default=8888, help="run on the given port", type=int) +define("facebook_api_key", help="your Facebook application API key", + default="9e2ada1b462142c4dfcc8e894ea1e37c") +define("facebook_secret", help="your Facebook application secret", + default="32fc6114554e3c53d5952594510021e2") + + +class Application(tornado.web.Application): + def __init__(self): + handlers = [ + (r"/", MainHandler), + (r"/auth/login", AuthLoginHandler), + (r"/auth/logout", AuthLogoutHandler), + ] + settings = dict( + cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + login_url="/auth/login", + template_path=os.path.join(os.path.dirname(__file__), "templates"), + static_path=os.path.join(os.path.dirname(__file__), "static"), + xsrf_cookies=True, + facebook_api_key=options.facebook_api_key, + facebook_secret=options.facebook_secret, + ui_modules= {"Post": PostModule}, + debug=True, + ) + tornado.web.Application.__init__(self, handlers, **settings) + + +class BaseHandler(tornado.web.RequestHandler): + def get_current_user(self): + user_json = self.get_secure_cookie("user") + if not user_json: return None + return tornado.escape.json_decode(user_json) + + +class MainHandler(BaseHandler, tornado.auth.FacebookMixin): + @tornado.web.authenticated + @tornado.web.asynchronous + def get(self): + self.facebook_request( + method="stream.get", + callback=self.async_callback(self._on_stream), + session_key=self.current_user["session_key"]) + + def _on_stream(self, stream): + if stream is None: + # Session may have expired + self.redirect("/auth/login") + return + # Turn profiles into a dict mapping id => profile + stream["profiles"] = dict((p["id"], p) for p in stream["profiles"]) + self.render("stream.html", stream=stream) + + +class AuthLoginHandler(BaseHandler, tornado.auth.FacebookMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("session", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authorize_redirect("read_stream") + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Facebook auth failed") + self.set_secure_cookie("user", tornado.escape.json_encode(user)) + self.redirect(self.get_argument("next", "/")) + + +class AuthLogoutHandler(BaseHandler, tornado.auth.FacebookMixin): + @tornado.web.asynchronous + def get(self): + self.clear_cookie("user") + if not self.current_user: + self.redirect(self.get_argument("next", "/")) + return + self.facebook_request( + method="auth.revokeAuthorization", + callback=self.async_callback(self._on_deauthorize), + session_key=self.current_user["session_key"]) + + def _on_deauthorize(self, response): + self.redirect(self.get_argument("next", "/")) + + +class PostModule(tornado.web.UIModule): + def render(self, post, actor): + return self.render_string("modules/post.html", post=post, actor=actor) + + +def main(): + tornado.options.parse_command_line() + http_server = tornado.httpserver.HTTPServer(Application()) + http_server.listen(options.port) + tornado.ioloop.IOLoop.instance().start() + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/demos/facebook/static/facebook.css b/vendor/tornado/demos/facebook/static/facebook.css new file mode 100644 index 000000000000..4fee72678fad --- /dev/null +++ b/vendor/tornado/demos/facebook/static/facebook.css @@ -0,0 +1,97 @@ +/* + * Copyright 2009 Facebook + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +body { + background: white; + color: black; + margin: 15px; +} + +body, +input, +textarea { + font-family: "Lucida Grande", Tahoma, Verdana, sans-serif; + font-size: 10pt; +} + +table { + border-collapse: collapse; + border: 0; +} + +td { + border: 0; + padding: 0; +} + +img { + border: 0; +} + +a { + text-decoration: none; + color: #3b5998; +} + +a:hover { + text-decoration: underline; +} + +.post { + border-bottom: 1px solid #eeeeee; + min-height: 50px; + padding-bottom: 10px; + margin-top: 10px; +} + +.post .picture { + float: left; +} + +.post .picture img { + height: 50px; + width: 50px; +} + +.post .body { + margin-left: 60px; +} + +.post .media img { + border: 1px solid #cccccc; + padding: 3px; +} + +.post .media:hover img { + border: 1px solid #3b5998; +} + +.post a.actor { + font-weight: bold; +} + +.post .meta { + font-size: 11px; +} + +.post a.permalink { + color: #777777; +} + +#body { + max-width: 700px; + margin: auto; +} diff --git a/vendor/tornado/demos/facebook/static/facebook.js b/vendor/tornado/demos/facebook/static/facebook.js new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/tornado/demos/facebook/templates/modules/post.html b/vendor/tornado/demos/facebook/templates/modules/post.html new file mode 100644 index 000000000000..6b50ac0f726e --- /dev/null +++ b/vendor/tornado/demos/facebook/templates/modules/post.html @@ -0,0 +1,29 @@ +
                                +
                                + +
                                +
                                + {{ escape(actor["name"]) }} + {% if post["message"] %} + {{ escape(post["message"]) }} + {% end %} + {% if post["attachment"] %} +
                                + {% if post["attachment"].get("name") %} + + {% end %} + {% if post["attachment"].get("description") %} +
                                {{ post["attachment"]["description"] }}
                                + {% end %} + {% for media in filter(lambda m: m.get("src") and m["type"] in ("photo", "link"), post["attachment"].get("media", [])) %} + + {{ escape(media.get( + + {% end %} +
                                + {% end %} + +
                                +
                                diff --git a/vendor/tornado/demos/facebook/templates/stream.html b/vendor/tornado/demos/facebook/templates/stream.html new file mode 100644 index 000000000000..19baa28cbf01 --- /dev/null +++ b/vendor/tornado/demos/facebook/templates/stream.html @@ -0,0 +1,22 @@ + + + + + Tornado Facebook Stream Demo + + + +
                                +
                                + {{ escape(current_user["name"]) }} - + {{ _("Sign out") }} +
                                + +
                                + {% for post in stream["posts"] %} + {{ modules.Post(post, stream["profiles"][post["actor_id"]]) }} + {% end %} +
                                +
                                + + diff --git a/vendor/tornado/demos/facebook/uimodules.py b/vendor/tornado/demos/facebook/uimodules.py new file mode 100644 index 000000000000..1173db634e22 --- /dev/null +++ b/vendor/tornado/demos/facebook/uimodules.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tornado.web + + +class Entry(tornado.web.UIModule): + def render(self): + return '
                                ENTRY
                                ' diff --git a/vendor/tornado/demos/helloworld/helloworld.py b/vendor/tornado/demos/helloworld/helloworld.py new file mode 100755 index 000000000000..0f1ed61ff5e0 --- /dev/null +++ b/vendor/tornado/demos/helloworld/helloworld.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tornado.httpserver +import tornado.ioloop +import tornado.options +import tornado.web + +from tornado.options import define, options + +define("port", default=8888, help="run on the given port", type=int) + + +class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write("Hello, world") + + +def main(): + tornado.options.parse_command_line() + application = tornado.web.Application([ + (r"/", MainHandler), + ]) + http_server = tornado.httpserver.HTTPServer(application) + http_server.listen(options.port) + tornado.ioloop.IOLoop.instance().start() + + +if __name__ == "__main__": + main() diff --git a/vendor/tornado/setup.py b/vendor/tornado/setup.py new file mode 100644 index 000000000000..5cb69df2dafc --- /dev/null +++ b/vendor/tornado/setup.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import distutils.core +import sys +# Importing setuptools adds some features like "setup.py develop", but +# it's optional so swallow the error if it's not there. +try: + import setuptools +except ImportError: + pass + +# Build the epoll extension for Linux systems with Python < 2.6 +extensions = [] +major, minor = sys.version_info[:2] +python_26 = (major > 2 or (major == 2 and minor >= 6)) +if "linux" in sys.platform.lower() and not python_26: + extensions.append(distutils.core.Extension( + "tornado.epoll", ["tornado/epoll.c"])) + +distutils.core.setup( + name="tornado", + version="0.2", + packages = ["tornado"], + ext_modules = extensions, + author="Facebook", + author_email="python-tornado@googlegroups.com", + url="http://www.tornadoweb.org/", + license="http://www.apache.org/licenses/LICENSE-2.0", + description="Tornado is an open source version of the scalable, non-blocking web server and and tools that power FriendFeed", +) diff --git a/vendor/tornado/tornado/__init__.py b/vendor/tornado/tornado/__init__.py new file mode 100644 index 000000000000..8f73764eb224 --- /dev/null +++ b/vendor/tornado/tornado/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The Tornado web server and tools.""" diff --git a/vendor/tornado/tornado/auth.py b/vendor/tornado/tornado/auth.py new file mode 100644 index 000000000000..f67d9e54829f --- /dev/null +++ b/vendor/tornado/tornado/auth.py @@ -0,0 +1,883 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementations of various third-party authentication schemes. + +All the classes in this file are class Mixins designed to be used with +web.py RequestHandler classes. The primary methods for each service are +authenticate_redirect(), authorize_redirect(), and get_authenticated_user(). +The former should be called to redirect the user to, e.g., the OpenID +authentication page on the third party service, and the latter should +be called upon return to get the user data from the data returned by +the third party service. + +They all take slightly different arguments due to the fact all these +services implement authentication and authorization slightly differently. +See the individual service classes below for complete documentation. + +Example usage for Google OpenID: + +class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Google auth failed") + # Save the user with, e.g., set_secure_cookie() + +""" + +import base64 +import binascii +import cgi +import hashlib +import hmac +import httpclient +import escape +import logging +import time +import urllib +import urlparse +import uuid + +_log = logging.getLogger("tornado.auth") + +class OpenIdMixin(object): + """Abstract implementation of OpenID and Attribute Exchange. + + See GoogleMixin below for example implementations. + """ + def authenticate_redirect(self, callback_uri=None, + ax_attrs=["name","email","language","username"]): + """Returns the authentication URL for this service. + + After authentication, the service will redirect back to the given + callback URI. + + We request the given attributes for the authenticated user by + default (name, email, language, and username). If you don't need + all those attributes for your app, you can request fewer with + the ax_attrs keyword argument. + """ + callback_uri = callback_uri or self.request.path + args = self._openid_args(callback_uri, ax_attrs=ax_attrs) + self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args)) + + def get_authenticated_user(self, callback): + """Fetches the authenticated user data upon redirect. + + This method should be called by the handler that receives the + redirect from the authenticate_redirect() or authorize_redirect() + methods. + """ + # Verify the OpenID response via direct request to the OP + args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems()) + args["openid.mode"] = u"check_authentication" + url = self._OPENID_ENDPOINT + "?" + urllib.urlencode(args) + http = httpclient.AsyncHTTPClient() + http.fetch(url, self.async_callback( + self._on_authentication_verified, callback)) + + def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): + url = urlparse.urljoin(self.request.full_url(), callback_uri) + args = { + "openid.ns": "http://specs.openid.net/auth/2.0", + "openid.claimed_id": + "http://specs.openid.net/auth/2.0/identifier_select", + "openid.identity": + "http://specs.openid.net/auth/2.0/identifier_select", + "openid.return_to": url, + "openid.realm": "http://" + self.request.host + "/", + "openid.mode": "checkid_setup", + } + if ax_attrs: + args.update({ + "openid.ns.ax": "http://openid.net/srv/ax/1.0", + "openid.ax.mode": "fetch_request", + }) + ax_attrs = set(ax_attrs) + required = [] + if "name" in ax_attrs: + ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) + required += ["firstname", "fullname", "lastname"] + args.update({ + "openid.ax.type.firstname": + "http://axschema.org/namePerson/first", + "openid.ax.type.fullname": + "http://axschema.org/namePerson", + "openid.ax.type.lastname": + "http://axschema.org/namePerson/last", + }) + known_attrs = { + "email": "http://axschema.org/contact/email", + "language": "http://axschema.org/pref/language", + "username": "http://axschema.org/namePerson/friendly", + } + for name in ax_attrs: + args["openid.ax.type." + name] = known_attrs[name] + required.append(name) + args["openid.ax.required"] = ",".join(required) + if oauth_scope: + args.update({ + "openid.ns.oauth": + "http://specs.openid.net/extensions/oauth/1.0", + "openid.oauth.consumer": self.request.host.split(":")[0], + "openid.oauth.scope": oauth_scope, + }) + return args + + def _on_authentication_verified(self, callback, response): + if response.error or u"is_valid:true" not in response.body: + _log.warning("Invalid OpenID response: %s", response.error or + response.body) + callback(None) + return + + # Make sure we got back at least an email from attribute exchange + ax_ns = None + for name, values in self.request.arguments.iteritems(): + if name.startswith("openid.ns.") and \ + values[-1] == u"http://openid.net/srv/ax/1.0": + ax_ns = name[10:] + break + def get_ax_arg(uri): + if not ax_ns: return u"" + prefix = "openid." + ax_ns + ".type." + ax_name = None + for name, values in self.request.arguments.iteritems(): + if values[-1] == uri and name.startswith(prefix): + part = name[len(prefix):] + ax_name = "openid." + ax_ns + ".value." + part + break + if not ax_name: return u"" + return self.get_argument(ax_name, u"") + + email = get_ax_arg("http://axschema.org/contact/email") + name = get_ax_arg("http://axschema.org/namePerson") + first_name = get_ax_arg("http://axschema.org/namePerson/first") + last_name = get_ax_arg("http://axschema.org/namePerson/last") + username = get_ax_arg("http://axschema.org/namePerson/friendly") + locale = get_ax_arg("http://axschema.org/pref/language").lower() + user = dict() + name_parts = [] + if first_name: + user["first_name"] = first_name + name_parts.append(first_name) + if last_name: + user["last_name"] = last_name + name_parts.append(last_name) + if name: + user["name"] = name + elif name_parts: + user["name"] = u" ".join(name_parts) + elif email: + user["name"] = email.split("@")[0] + if email: user["email"] = email + if locale: user["locale"] = locale + if username: user["username"] = username + callback(user) + + +class OAuthMixin(object): + """Abstract implementation of OAuth. + + See TwitterMixin and FriendFeedMixin below for example implementations. + """ + def authorize_redirect(self, callback_uri=None): + """Redirects the user to obtain OAuth authorization for this service. + + Twitter and FriendFeed both require that you register a Callback + URL with your application. You should call this method to log the + user in, and then call get_authenticated_user() in the handler + you registered as your Callback URL to complete the authorization + process. + + This method sets a cookie called _oauth_request_token which is + subsequently used (and cleared) in get_authenticated_user for + security purposes. + """ + if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): + raise Exception("This service does not support oauth_callback") + http = httpclient.AsyncHTTPClient() + http.fetch(self._oauth_request_token_url(), self.async_callback( + self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri)) + + def get_authenticated_user(self, callback): + """Gets the OAuth authorized user and access token on callback. + + This method should be called from the handler for your registered + OAuth Callback URL to complete the registration process. We call + callback with the authenticated user, which in addition to standard + attributes like 'name' includes the 'access_key' attribute, which + contains the OAuth access you can use to make authorized requests + to this service on behalf of the user. + """ + request_key = self.get_argument("oauth_token") + request_cookie = self.get_cookie("_oauth_request_token") + if not request_cookie: + _log.warning("Missing OAuth request token cookie") + callback(None) + return + cookie_key, cookie_secret = request_cookie.split("|") + if cookie_key != request_key: + _log.warning("Request token does not match cookie") + callback(None) + return + token = dict(key=cookie_key, secret=cookie_secret) + http = httpclient.AsyncHTTPClient() + http.fetch(self._oauth_access_token_url(token), self.async_callback( + self._on_access_token, callback)) + + def _oauth_request_token_url(self): + consumer_token = self._oauth_consumer_token() + url = self._OAUTH_REQUEST_TOKEN_URL + args = dict( + oauth_consumer_key=consumer_token["key"], + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), + oauth_version="1.0", + ) + signature = _oauth_signature(consumer_token, "GET", url, args) + args["oauth_signature"] = signature + return url + "?" + urllib.urlencode(args) + + def _on_request_token(self, authorize_url, callback_uri, response): + if response.error: + raise Exception("Could not get request token") + request_token = _oauth_parse_response(response.body) + data = "|".join([request_token["key"], request_token["secret"]]) + self.set_cookie("_oauth_request_token", data) + args = dict(oauth_token=request_token["key"]) + if callback_uri: + args["oauth_callback"] = urlparse.urljoin( + self.request.full_url(), callback_uri) + self.redirect(authorize_url + "?" + urllib.urlencode(args)) + + def _oauth_access_token_url(self, request_token): + consumer_token = self._oauth_consumer_token() + url = self._OAUTH_ACCESS_TOKEN_URL + args = dict( + oauth_consumer_key=consumer_token["key"], + oauth_token=request_token["key"], + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), + oauth_version="1.0", + ) + signature = _oauth_signature(consumer_token, "GET", url, args, + request_token) + args["oauth_signature"] = signature + return url + "?" + urllib.urlencode(args) + + def _on_access_token(self, callback, response): + if response.error: + _log.warning("Could not fetch access token") + callback(None) + return + access_token = _oauth_parse_response(response.body) + user = self._oauth_get_user(access_token, self.async_callback( + self._on_oauth_get_user, access_token, callback)) + + def _oauth_get_user(self, access_token, callback): + raise NotImplementedError() + + def _on_oauth_get_user(self, access_token, callback, user): + if not user: + callback(None) + return + user["access_token"] = access_token + callback(user) + + def _oauth_request_parameters(self, url, access_token, parameters={}, + method="GET"): + """Returns the OAuth parameters as a dict for the given request. + + parameters should include all POST arguments and query string arguments + that will be sent with the request. + """ + consumer_token = self._oauth_consumer_token() + base_args = dict( + oauth_consumer_key=consumer_token["key"], + oauth_token=access_token["key"], + oauth_signature_method="HMAC-SHA1", + oauth_timestamp=str(int(time.time())), + oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), + oauth_version="1.0", + ) + args = {} + args.update(base_args) + args.update(parameters) + signature = _oauth_signature(consumer_token, method, url, args, + access_token) + base_args["oauth_signature"] = signature + return base_args + + +class TwitterMixin(OAuthMixin): + """Twitter OAuth authentication. + + To authenticate with Twitter, register your application with + Twitter at http://twitter.com/apps. Then copy your Consumer Key and + Consumer Secret to the application settings 'twitter_consumer_key' and + 'twitter_consumer_secret'. Use this Mixin on the handler for the URL + you registered as your application's Callback URL. + + When your application is set up, you can use this Mixin like this + to authenticate the user with Twitter and get access to their stream: + + class TwitterHandler(tornado.web.RequestHandler, + tornado.auth.TwitterMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("oauth_token", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authorize_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Twitter auth failed") + # Save the user using, e.g., set_secure_cookie() + + The user object returned by get_authenticated_user() includes the + attributes 'username', 'name', and all of the custom Twitter user + attributes describe at + http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show + in addition to 'access_token'. You should save the access token with + the user; it is required to make requests on behalf of the user later + with twitter_request(). + """ + _OAUTH_REQUEST_TOKEN_URL = "http://twitter.com/oauth/request_token" + _OAUTH_ACCESS_TOKEN_URL = "http://twitter.com/oauth/access_token" + _OAUTH_AUTHORIZE_URL = "http://twitter.com/oauth/authorize" + _OAUTH_AUTHENTICATE_URL = "http://twitter.com/oauth/authenticate" + _OAUTH_NO_CALLBACKS = True + + def authenticate_redirect(self): + """Just like authorize_redirect(), but auto-redirects if authorized. + + This is generally the right interface to use if you are using + Twitter for single-sign on. + """ + http = httpclient.AsyncHTTPClient() + http.fetch(self._oauth_request_token_url(), self.async_callback( + self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None)) + + def twitter_request(self, path, callback, access_token=None, + post_args=None, **args): + """Fetches the given API path, e.g., "/statuses/user_timeline/btaylor" + + The path should not include the format (we automatically append + ".json" and parse the JSON output). + + If the request is a POST, post_args should be provided. Query + string arguments should be given as keyword arguments. + + All the Twitter methods are documented at + http://apiwiki.twitter.com/Twitter-API-Documentation. + + Many methods require an OAuth access token which you can obtain + through authorize_redirect() and get_authenticated_user(). The + user returned through that process includes an 'access_token' + attribute that can be used to make authenticated requests via + this method. Example usage: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.TwitterMixin): + @tornado.web.authenticated + @tornado.web.asynchronous + def get(self): + self.twitter_request( + "/statuses/update", + post_args={"status": "Testing Tornado Web Server"}, + access_token=user["access_token"], + callback=self.async_callback(self._on_post)) + + def _on_post(self, new_entry): + if not new_entry: + # Call failed; perhaps missing permission? + self.authorize_redirect() + return + self.finish("Posted a message!") + + """ + # Add the OAuth resource request signature if we have credentials + url = "http://twitter.com" + path + ".json" + if access_token: + all_args = {} + all_args.update(args) + all_args.update(post_args or {}) + consumer_token = self._oauth_consumer_token() + method = "POST" if post_args is not None else "GET" + oauth = self._oauth_request_parameters( + url, access_token, all_args, method=method) + args.update(oauth) + if args: url += "?" + urllib.urlencode(args) + callback = self.async_callback(self._on_twitter_request, callback) + http = httpclient.AsyncHTTPClient() + if post_args is not None: + http.fetch(url, method="POST", body=urllib.urlencode(post_args), + callback=callback) + else: + http.fetch(url, callback=callback) + + def _on_twitter_request(self, callback, response): + if response.error: + _log.warning("Error response %s fetching %s", response.error, + response.request.url) + callback(None) + return + callback(escape.json_decode(response.body)) + + def _oauth_consumer_token(self): + self.require_setting("twitter_consumer_key", "Twitter OAuth") + self.require_setting("twitter_consumer_secret", "Twitter OAuth") + return dict( + key=self.settings["twitter_consumer_key"], + secret=self.settings["twitter_consumer_secret"]) + + def _oauth_get_user(self, access_token, callback): + callback = self.async_callback(self._parse_user_response, callback) + self.twitter_request( + "/users/show/" + access_token["screen_name"], + access_token=access_token, callback=callback) + + def _parse_user_response(self, callback, user): + if user: + user["username"] = user["screen_name"] + callback(user) + + +class FriendFeedMixin(OAuthMixin): + """FriendFeed OAuth authentication. + + To authenticate with FriendFeed, register your application with + FriendFeed at http://friendfeed.com/api/applications. Then + copy your Consumer Key and Consumer Secret to the application settings + 'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use + this Mixin on the handler for the URL you registered as your + application's Callback URL. + + When your application is set up, you can use this Mixin like this + to authenticate the user with FriendFeed and get access to their feed: + + class FriendFeedHandler(tornado.web.RequestHandler, + tornado.auth.FriendFeedMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("oauth_token", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authorize_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "FriendFeed auth failed") + # Save the user using, e.g., set_secure_cookie() + + The user object returned by get_authenticated_user() includes the + attributes 'username', 'name', and 'description' in addition to + 'access_token'. You should save the access token with the user; + it is required to make requests on behalf of the user later with + friendfeed_request(). + """ + _OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token" + _OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token" + _OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize" + _OAUTH_NO_CALLBACKS = True + + def friendfeed_request(self, path, callback, access_token=None, + post_args=None, **args): + """Fetches the given relative API path, e.g., "/bret/friends" + + If the request is a POST, post_args should be provided. Query + string arguments should be given as keyword arguments. + + All the FriendFeed methods are documented at + http://friendfeed.com/api/documentation. + + Many methods require an OAuth access token which you can obtain + through authorize_redirect() and get_authenticated_user(). The + user returned through that process includes an 'access_token' + attribute that can be used to make authenticated requests via + this method. Example usage: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.FriendFeedMixin): + @tornado.web.authenticated + @tornado.web.asynchronous + def get(self): + self.friendfeed_request( + "/entry", + post_args={"body": "Testing Tornado Web Server"}, + access_token=self.current_user["access_token"], + callback=self.async_callback(self._on_post)) + + def _on_post(self, new_entry): + if not new_entry: + # Call failed; perhaps missing permission? + self.authorize_redirect() + return + self.finish("Posted a message!") + + """ + # Add the OAuth resource request signature if we have credentials + url = "http://friendfeed-api.com/v2" + path + if access_token: + all_args = {} + all_args.update(args) + all_args.update(post_args or {}) + consumer_token = self._oauth_consumer_token() + method = "POST" if post_args is not None else "GET" + oauth = self._oauth_request_parameters( + url, access_token, all_args, method=method) + args.update(oauth) + if args: url += "?" + urllib.urlencode(args) + callback = self.async_callback(self._on_friendfeed_request, callback) + http = httpclient.AsyncHTTPClient() + if post_args is not None: + http.fetch(url, method="POST", body=urllib.urlencode(post_args), + callback=callback) + else: + http.fetch(url, callback=callback) + + def _on_friendfeed_request(self, callback, response): + if response.error: + _log.warning("Error response %s fetching %s", response.error, + response.request.url) + callback(None) + return + callback(escape.json_decode(response.body)) + + def _oauth_consumer_token(self): + self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth") + self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth") + return dict( + key=self.settings["friendfeed_consumer_key"], + secret=self.settings["friendfeed_consumer_secret"]) + + def _oauth_get_user(self, access_token, callback): + callback = self.async_callback(self._parse_user_response, callback) + self.friendfeed_request( + "/feedinfo/" + access_token["username"], + include="id,name,description", access_token=access_token, + callback=callback) + + def _parse_user_response(self, callback, user): + if user: + user["username"] = user["id"] + callback(user) + + +class GoogleMixin(OpenIdMixin, OAuthMixin): + """Google Open ID / OAuth authentication. + + No application registration is necessary to use Google for authentication + or to access Google resources on behalf of a user. To authenticate with + Google, redirect with authenticate_redirect(). On return, parse the + response with get_authenticated_user(). We send a dict containing the + values for the user, including 'email', 'name', and 'locale'. + Example usage: + + class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Google auth failed") + # Save the user with, e.g., set_secure_cookie() + + """ + _OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud" + _OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken" + + def authorize_redirect(self, oauth_scope, callback_uri=None, + ax_attrs=["name","email","language","username"]): + """Authenticates and authorizes for the given Google resource. + + Some of the available resources are: + + Gmail Contacts - http://www.google.com/m8/feeds/ + Calendar - http://www.google.com/calendar/feeds/ + Finance - http://finance.google.com/finance/feeds/ + + You can authorize multiple resources by separating the resource + URLs with a space. + """ + callback_uri = callback_uri or self.request.path + args = self._openid_args(callback_uri, ax_attrs=ax_attrs, + oauth_scope=oauth_scope) + self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args)) + + def get_authenticated_user(self, callback): + """Fetches the authenticated user data upon redirect.""" + # Look to see if we are doing combined OpenID/OAuth + oauth_ns = "" + for name, values in self.request.arguments.iteritems(): + if name.startswith("openid.ns.") and \ + values[-1] == u"http://specs.openid.net/extensions/oauth/1.0": + oauth_ns = name[10:] + break + token = self.get_argument("openid." + oauth_ns + ".request_token", "") + if token: + http = httpclient.AsyncHTTPClient() + token = dict(key=token, secret="") + http.fetch(self._oauth_access_token_url(token), + self.async_callback(self._on_access_token, callback)) + else: + OpenIdMixin.get_authenticated_user(self, callback) + + def _oauth_consumer_token(self): + self.require_setting("google_consumer_key", "Google OAuth") + self.require_setting("google_consumer_secret", "Google OAuth") + return dict( + key=self.settings["google_consumer_key"], + secret=self.settings["google_consumer_secret"]) + + def _oauth_get_user(self, access_token, callback): + OpenIdMixin.get_authenticated_user(self, callback) + + +class FacebookMixin(object): + """Facebook Connect authentication. + + To authenticate with Facebook, register your application with + Facebook at http://www.facebook.com/developers/apps.php. Then + copy your API Key and Application Secret to the application settings + 'facebook_api_key' and 'facebook_secret'. + + When your application is set up, you can use this Mixin like this + to authenticate the user with Facebook: + + class FacebookHandler(tornado.web.RequestHandler, + tornado.auth.FacebookMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("session", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + raise tornado.web.HTTPError(500, "Facebook auth failed") + # Save the user using, e.g., set_secure_cookie() + + The user object returned by get_authenticated_user() includes the + attributes 'facebook_uid' and 'name' in addition to session attributes + like 'session_key'. You should save the session key with the user; it is + required to make requests on behalf of the user later with + facebook_request(). + """ + def authenticate_redirect(self, callback_uri=None, cancel_uri=None, + extended_permissions=None): + """Authenticates/installs this app for the current user.""" + self.require_setting("facebook_api_key", "Facebook Connect") + callback_uri = callback_uri or self.request.path + args = { + "api_key": self.settings["facebook_api_key"], + "v": "1.0", + "fbconnect": "true", + "display": "page", + "next": urlparse.urljoin(self.request.full_url(), callback_uri), + "return_session": "true", + } + if cancel_uri: + args["cancel_url"] = urlparse.urljoin( + self.request.full_url(), cancel_uri) + if extended_permissions: + if isinstance(extended_permissions, basestring): + extended_permissions = [extended_permissions] + args["req_perms"] = ",".join(extended_permissions) + self.redirect("http://www.facebook.com/login.php?" + + urllib.urlencode(args)) + + def authorize_redirect(self, extended_permissions, callback_uri=None, + cancel_uri=None): + """Redirects to an authorization request for the given FB resource. + + The available resource names are listed at + http://wiki.developers.facebook.com/index.php/Extended_permission. + The most common resource types include: + + publish_stream + read_stream + email + sms + + extended_permissions can be a single permission name or a list of + names. To get the session secret and session key, call + get_authenticated_user() just as you would with + authenticate_redirect(). + """ + self.authenticate_redirect(callback_uri, cancel_uri, + extended_permissions) + + def get_authenticated_user(self, callback): + """Fetches the authenticated Facebook user. + + The authenticated user includes the special Facebook attributes + 'session_key' and 'facebook_uid' in addition to the standard + user attributes like 'name'. + """ + self.require_setting("facebook_api_key", "Facebook Connect") + session = escape.json_decode(self.get_argument("session")) + self.facebook_request( + method="facebook.users.getInfo", + callback=self.async_callback( + self._on_get_user_info, callback, session), + session_key=session["session_key"], + uids=session["uid"], + fields="uid,first_name,last_name,name,locale,pic_square," \ + "profile_url,username") + + def facebook_request(self, method, callback, **args): + """Makes a Facebook API REST request. + + We automatically include the Facebook API key and signature, but + it is the callers responsibility to include 'session_key' and any + other required arguments to the method. + + The available Facebook methods are documented here: + http://wiki.developers.facebook.com/index.php/API + + Here is an example for the stream.get() method: + + class MainHandler(tornado.web.RequestHandler, + tornado.auth.FacebookMixin): + @tornado.web.authenticated + @tornado.web.asynchronous + def get(self): + self.facebook_request( + method="stream.get", + callback=self.async_callback(self._on_stream), + session_key=self.current_user["session_key"]) + + def _on_stream(self, stream): + if stream is None: + # Not authorized to read the stream yet? + self.redirect(self.authorize_redirect("read_stream")) + return + self.render("stream.html", stream=stream) + + """ + self.require_setting("facebook_api_key", "Facebook Connect") + self.require_setting("facebook_secret", "Facebook Connect") + if not method.startswith("facebook."): + method = "facebook." + method + args["api_key"] = self.settings["facebook_api_key"] + args["v"] = "1.0" + args["method"] = method + args["call_id"] = str(long(time.time() * 1e6)) + args["format"] = "json" + args["sig"] = self._signature(args) + url = "http://api.facebook.com/restserver.php?" + \ + urllib.urlencode(args) + http = httpclient.AsyncHTTPClient() + http.fetch(url, callback=self.async_callback( + self._parse_response, callback)) + + def _on_get_user_info(self, callback, session, users): + if users is None: + callback(None) + return + callback({ + "name": users[0]["name"], + "first_name": users[0]["first_name"], + "last_name": users[0]["last_name"], + "uid": users[0]["uid"], + "locale": users[0]["locale"], + "pic_square": users[0]["pic_square"], + "profile_url": users[0]["profile_url"], + "username": users[0].get("username"), + "session_key": session["session_key"], + "session_expires": session["expires"], + }) + + def _parse_response(self, callback, response): + if response.error: + _log.warning("HTTP error from Facebook: %s", response.error) + callback(None) + return + try: + json = escape.json_decode(response.body) + except: + _log.warning("Invalid JSON from Facebook: %r", response.body) + callback(None) + return + if isinstance(json, dict) and json.get("error_code"): + _log.warning("Facebook error: %d: %r", json["error_code"], + json.get("error_msg")) + callback(None) + return + callback(json) + + def _signature(self, args): + parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())] + body = "".join(parts) + self.settings["facebook_secret"] + if isinstance(body, unicode): body = body.encode("utf-8") + return hashlib.md5(body).hexdigest() + + +def _oauth_signature(consumer_token, method, url, parameters={}, token=None): + """Calculates the HMAC-SHA1 OAuth signature for the given request. + + See http://oauth.net/core/1.0/#signing_process + """ + parts = urlparse.urlparse(url) + scheme, netloc, path = parts[:3] + normalized_url = scheme.lower() + "://" + netloc.lower() + path + + base_elems = [] + base_elems.append(method.upper()) + base_elems.append(normalized_url) + base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) + for k, v in sorted(parameters.items()))) + base_string = "&".join(_oauth_escape(e) for e in base_elems) + + key_elems = [consumer_token["secret"]] + key_elems.append(token["secret"] if token else "") + key = "&".join(key_elems) + + hash = hmac.new(key, base_string, hashlib.sha1) + return binascii.b2a_base64(hash.digest())[:-1] + + +def _oauth_escape(val): + if isinstance(val, unicode): + val = val.encode("utf-8") + return urllib.quote(val, safe="~") + + +def _oauth_parse_response(body): + p = cgi.parse_qs(body, keep_blank_values=False) + token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) + + # Add the extra parameters the Provider included to the token + special = ("oauth_token", "oauth_token_secret") + token.update((k, p[k][0]) for k in p if k not in special) + return token diff --git a/vendor/tornado/tornado/autoreload.py b/vendor/tornado/tornado/autoreload.py new file mode 100644 index 000000000000..231cfe892ceb --- /dev/null +++ b/vendor/tornado/tornado/autoreload.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A module to automatically restart the server when a module is modified. + +This module depends on IOLoop, so it will not work in WSGI applications +and Google AppEngine. +""" + +import functools +import errno +import ioloop +import logging +import os +import os.path +import sys +import types + +_log = logging.getLogger('tornado.autoreload') + +def start(io_loop=None, check_time=500): + """Restarts the process automatically when a module is modified. + + We run on the I/O loop, and restarting is a destructive operation, + so will terminate any pending requests. + """ + io_loop = io_loop or ioloop.IOLoop.instance() + modify_times = {} + callback = functools.partial(_reload_on_update, io_loop, modify_times) + scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) + scheduler.start() + + +_reload_attempted = False + +def _reload_on_update(io_loop, modify_times): + global _reload_attempted + if _reload_attempted: + # We already tried to reload and it didn't work, so don't try again. + return + for module in sys.modules.values(): + # Some modules play games with sys.modules (e.g. email/__init__.py + # in the standard library), and occasionally this can cause strange + # failures in getattr. Just ignore anything that's not an ordinary + # module. + if not isinstance(module, types.ModuleType): continue + path = getattr(module, "__file__", None) + if not path: continue + if path.endswith(".pyc") or path.endswith(".pyo"): + path = path[:-1] + try: + modified = os.stat(path).st_mtime + except: + continue + if path not in modify_times: + modify_times[path] = modified + continue + if modify_times[path] != modified: + _log.info("%s modified; restarting server", path) + _reload_attempted = True + for fd in io_loop._handlers.keys(): + try: + os.close(fd) + except: + pass + try: + os.execv(sys.executable, [sys.executable] + sys.argv) + except OSError, e: + # Mac OS X versions prior to 10.6 do not support execv in + # a process that contains multiple threads. Instead of + # re-executing in the current process, start a new one + # and cause the current process to exit. This isn't + # ideal since the new process is detached from the parent + # terminal and thus cannot easily be killed with ctrl-C, + # but it's better than not being able to autoreload at + # all. + # Unfortunately the errno returned in this case does not + # appear to be consistent, so we can't easily check for + # this error specifically. + os.spawnv(os.P_NOWAIT, sys.executable, + [sys.executable] + sys.argv) + sys.exit(0) diff --git a/vendor/tornado/tornado/database.py b/vendor/tornado/tornado/database.py new file mode 100644 index 000000000000..3f78e00b94ec --- /dev/null +++ b/vendor/tornado/tornado/database.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A lightweight wrapper around MySQLdb.""" + +import copy +import MySQLdb +import MySQLdb.constants +import MySQLdb.converters +import MySQLdb.cursors +import itertools +import logging + +_log = logging.getLogger('tornado.database') + +class Connection(object): + """A lightweight wrapper around MySQLdb DB-API connections. + + The main value we provide is wrapping rows in a dict/object so that + columns can be accessed by name. Typical usage: + + db = database.Connection("localhost", "mydatabase") + for article in db.query("SELECT * FROM articles"): + print article.title + + Cursors are hidden by the implementation, but other than that, the methods + are very similar to the DB-API. + + We explicitly set the timezone to UTC and the character encoding to + UTF-8 on all connections to avoid time zone and encoding errors. + """ + def __init__(self, host, database, user=None, password=None): + self.host = host + self.database = database + + args = dict(conv=CONVERSIONS, use_unicode=True, charset="utf8", + db=database, init_command='SET time_zone = "+0:00"', + sql_mode="TRADITIONAL") + if user is not None: + args["user"] = user + if password is not None: + args["passwd"] = password + + # We accept a path to a MySQL socket file or a host(:port) string + if "/" in host: + args["unix_socket"] = host + else: + self.socket = None + pair = host.split(":") + if len(pair) == 2: + args["host"] = pair[0] + args["port"] = int(pair[1]) + else: + args["host"] = host + args["port"] = 3306 + + self._db = None + self._db_args = args + try: + self.reconnect() + except: + _log.error("Cannot connect to MySQL on %s", self.host, + exc_info=True) + + def __del__(self): + self.close() + + def close(self): + """Closes this database connection.""" + if getattr(self, "_db", None) is not None: + self._db.close() + self._db = None + + def reconnect(self): + """Closes the existing database connection and re-opens it.""" + self.close() + self._db = MySQLdb.connect(**self._db_args) + self._db.autocommit(True) + + def iter(self, query, *parameters): + """Returns an iterator for the given query and parameters.""" + if self._db is None: self.reconnect() + cursor = MySQLdb.cursors.SSCursor(self._db) + try: + self._execute(cursor, query, parameters) + column_names = [d[0] for d in cursor.description] + for row in cursor: + yield Row(zip(column_names, row)) + finally: + cursor.close() + + def query(self, query, *parameters): + """Returns a row list for the given query and parameters.""" + cursor = self._cursor() + try: + self._execute(cursor, query, parameters) + column_names = [d[0] for d in cursor.description] + return [Row(itertools.izip(column_names, row)) for row in cursor] + finally: + cursor.close() + + def get(self, query, *parameters): + """Returns the first row returned for the given query.""" + rows = self.query(query, *parameters) + if not rows: + return None + elif len(rows) > 1: + raise Exception("Multiple rows returned for Database.get() query") + else: + return rows[0] + + def execute(self, query, *parameters): + """Executes the given query, returning the lastrowid from the query.""" + cursor = self._cursor() + try: + self._execute(cursor, query, parameters) + return cursor.lastrowid + finally: + cursor.close() + + def executemany(self, query, parameters): + """Executes the given query against all the given param sequences. + + We return the lastrowid from the query. + """ + cursor = self._cursor() + try: + cursor.executemany(query, parameters) + return cursor.lastrowid + finally: + cursor.close() + + def _cursor(self): + if self._db is None: self.reconnect() + return self._db.cursor() + + def _execute(self, cursor, query, parameters): + try: + return cursor.execute(query, parameters) + except OperationalError: + _log.error("Error connecting to MySQL on %s", self.host) + self.close() + raise + + +class Row(dict): + """A dict that allows for object-like property access syntax.""" + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + +# Fix the access conversions to properly recognize unicode/binary +FIELD_TYPE = MySQLdb.constants.FIELD_TYPE +FLAG = MySQLdb.constants.FLAG +CONVERSIONS = copy.deepcopy(MySQLdb.converters.conversions) +for field_type in \ + [FIELD_TYPE.BLOB, FIELD_TYPE.STRING, FIELD_TYPE.VAR_STRING] + \ + ([FIELD_TYPE.VARCHAR] if 'VARCHAR' in vars(FIELD_TYPE) else []): + CONVERSIONS[field_type].insert(0, (FLAG.BINARY, str)) + + +# Alias some common MySQL exceptions +IntegrityError = MySQLdb.IntegrityError +OperationalError = MySQLdb.OperationalError diff --git a/vendor/tornado/tornado/epoll.c b/vendor/tornado/tornado/epoll.c new file mode 100644 index 000000000000..9a2e3a3747f7 --- /dev/null +++ b/vendor/tornado/tornado/epoll.c @@ -0,0 +1,112 @@ +/* + * Copyright 2009 Facebook + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. You may obtain + * a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include "Python.h" +#include +#include + +#define MAX_EVENTS 24 + +/* + * Simple wrapper around epoll_create. + */ +static PyObject* _epoll_create(void) { + int fd = epoll_create(MAX_EVENTS); + if (fd == -1) { + PyErr_SetFromErrno(PyExc_Exception); + return NULL; + } + + return PyInt_FromLong(fd); +} + +/* + * Simple wrapper around epoll_ctl. We throw an exception if the call fails + * rather than returning the error code since it is an infrequent (and likely + * catastrophic) event when it does happen. + */ +static PyObject* _epoll_ctl(PyObject* self, PyObject* args) { + int epfd, op, fd, events; + struct epoll_event event; + + if (!PyArg_ParseTuple(args, "iiiI", &epfd, &op, &fd, &events)) { + return NULL; + } + + memset(&event, 0, sizeof(event)); + event.events = events; + event.data.fd = fd; + if (epoll_ctl(epfd, op, fd, &event) == -1) { + PyErr_SetFromErrno(PyExc_OSError); + return NULL; + } + + Py_INCREF(Py_None); + return Py_None; +} + +/* + * Simple wrapper around epoll_wait. We return None if the call times out and + * throw an exception if an error occurs. Otherwise, we return a list of + * (fd, event) tuples. + */ +static PyObject* _epoll_wait(PyObject* self, PyObject* args) { + struct epoll_event events[MAX_EVENTS]; + int epfd, timeout, num_events, i; + PyObject* list; + PyObject* tuple; + + if (!PyArg_ParseTuple(args, "ii", &epfd, &timeout)) { + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + num_events = epoll_wait(epfd, events, MAX_EVENTS, timeout); + Py_END_ALLOW_THREADS + if (num_events == -1) { + PyErr_SetFromErrno(PyExc_Exception); + return NULL; + } + + list = PyList_New(num_events); + for (i = 0; i < num_events; i++) { + tuple = PyTuple_New(2); + PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(events[i].data.fd)); + PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(events[i].events)); + PyList_SET_ITEM(list, i, tuple); + } + return list; +} + +/* + * Our method declararations + */ +static PyMethodDef kEpollMethods[] = { + {"epoll_create", (PyCFunction)_epoll_create, METH_NOARGS, + "Create an epoll file descriptor"}, + {"epoll_ctl", _epoll_ctl, METH_VARARGS, + "Control an epoll file descriptor"}, + {"epoll_wait", _epoll_wait, METH_VARARGS, + "Wait for events on an epoll file descriptor"}, + {NULL, NULL, 0, NULL} +}; + +/* + * Module initialization + */ +PyMODINIT_FUNC initepoll(void) { + Py_InitModule("epoll", kEpollMethods); +} diff --git a/vendor/tornado/tornado/escape.py b/vendor/tornado/tornado/escape.py new file mode 100644 index 000000000000..bacb1c51d004 --- /dev/null +++ b/vendor/tornado/tornado/escape.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Escaping/unescaping methods for HTML, JSON, URLs, and others.""" + +import htmlentitydefs +import re +import xml.sax.saxutils +import urllib + +try: + import json + assert hasattr(json, "loads") and hasattr(json, "dumps") + _json_decode = lambda s: json.loads(s) + _json_encode = lambda v: json.dumps(v) +except: + try: + import simplejson + _json_decode = lambda s: simplejson.loads(_unicode(s)) + _json_encode = lambda v: simplejson.dumps(v) + except ImportError: + try: + # For Google AppEngine + from django.utils import simplejson + _json_decode = lambda s: simplejson.loads(_unicode(s)) + _json_encode = lambda v: simplejson.dumps(v) + except ImportError: + raise Exception("A JSON parser is required, e.g., simplejson at " + "http://pypi.python.org/pypi/simplejson/") + + +def xhtml_escape(value): + """Escapes a string so it is valid within XML or XHTML.""" + return utf8(xml.sax.saxutils.escape(value, {'"': """})) + + +def xhtml_unescape(value): + """Un-escapes an XML-escaped string.""" + return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) + + +def json_encode(value): + """JSON-encodes the given Python object.""" + return _json_encode(value) + + +def json_decode(value): + """Returns Python objects for the given JSON string.""" + return _json_decode(value) + + +def squeeze(value): + """Replace all sequences of whitespace chars with a single space.""" + return re.sub(r"[\x00-\x20]+", " ", value).strip() + + +def url_escape(value): + """Returns a valid URL-encoded version of the given value.""" + return urllib.quote_plus(utf8(value)) + + +def url_unescape(value): + """Decodes the given value from a URL.""" + return _unicode(urllib.unquote_plus(value)) + + +def utf8(value): + if isinstance(value, unicode): + return value.encode("utf-8") + assert isinstance(value, str) + return value + + +def _unicode(value): + if isinstance(value, str): + return value.decode("utf-8") + assert isinstance(value, unicode) + return value + + +def _convert_entity(m): + if m.group(1) == "#": + try: + return unichr(int(m.group(2))) + except ValueError: + return "&#%s;" % m.group(2) + try: + return _HTML_UNICODE_MAP[m.group(2)] + except KeyError: + return "&%s;" % m.group(2) + + +def _build_unicode_map(): + unicode_map = {} + for name, value in htmlentitydefs.name2codepoint.iteritems(): + unicode_map[name] = unichr(value) + return unicode_map + +_HTML_UNICODE_MAP = _build_unicode_map() diff --git a/vendor/tornado/tornado/httpclient.py b/vendor/tornado/tornado/httpclient.py new file mode 100644 index 000000000000..2c9155eb9e88 --- /dev/null +++ b/vendor/tornado/tornado/httpclient.py @@ -0,0 +1,465 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Blocking and non-blocking HTTP client implementations using pycurl.""" + +import calendar +import collections +import cStringIO +import email.utils +import errno +import functools +import httplib +import ioloop +import logging +import pycurl +import time +import weakref + +_log = logging.getLogger('tornado.httpclient') + +class HTTPClient(object): + """A blocking HTTP client backed with pycurl. + + Typical usage looks like this: + + http_client = httpclient.HTTPClient() + try: + response = http_client.fetch("http://www.google.com/") + print response.body + except httpclient.HTTPError, e: + print "Error:", e + + fetch() can take a string URL or an HTTPRequest instance, which offers + more options, like executing POST/PUT/DELETE requests. + """ + def __init__(self, max_simultaneous_connections=None): + self._curl = _curl_create(max_simultaneous_connections) + + def __del__(self): + self._curl.close() + + def fetch(self, request, **kwargs): + """Executes an HTTPRequest, returning an HTTPResponse. + + If an error occurs during the fetch, we raise an HTTPError. + """ + if not isinstance(request, HTTPRequest): + request = HTTPRequest(url=request, **kwargs) + buffer = cStringIO.StringIO() + headers = {} + try: + _curl_setup_request(self._curl, request, buffer, headers) + self._curl.perform() + code = self._curl.getinfo(pycurl.HTTP_CODE) + if code < 200 or code >= 300: + raise HTTPError(code) + effective_url = self._curl.getinfo(pycurl.EFFECTIVE_URL) + return HTTPResponse( + request=request, code=code, headers=headers, + body=buffer.getvalue(), effective_url=effective_url) + except pycurl.error, e: + raise CurlError(*e) + finally: + buffer.close() + + +class AsyncHTTPClient(object): + """An non-blocking HTTP client backed with pycurl. + + Example usage: + + import ioloop + + def handle_request(response): + if response.error: + print "Error:", response.error + else: + print response.body + ioloop.IOLoop.instance().stop() + + http_client = httpclient.AsyncHTTPClient() + http_client.fetch("http://www.google.com/", handle_request) + ioloop.IOLoop.instance().start() + + fetch() can take a string URL or an HTTPRequest instance, which offers + more options, like executing POST/PUT/DELETE requests. + + The keyword argument max_clients to the AsyncHTTPClient constructor + determines the maximum number of simultaneous fetch() operations that + can execute in parallel on each IOLoop. + """ + _ASYNC_CLIENTS = weakref.WeakKeyDictionary() + + def __new__(cls, io_loop=None, max_clients=10, + max_simultaneous_connections=None): + # There is one client per IOLoop since they share curl instances + io_loop = io_loop or ioloop.IOLoop.instance() + if io_loop in cls._ASYNC_CLIENTS: + return cls._ASYNC_CLIENTS[io_loop] + else: + instance = super(AsyncHTTPClient, cls).__new__(cls) + instance.io_loop = io_loop + instance._multi = pycurl.CurlMulti() + instance._curls = [_curl_create(max_simultaneous_connections) + for i in xrange(max_clients)] + instance._free_list = instance._curls[:] + instance._requests = collections.deque() + instance._fds = {} + instance._events = {} + instance._added_perform_callback = False + instance._timeout = None + cls._ASYNC_CLIENTS[io_loop] = instance + return instance + + def close(self): + """Destroys this http client, freeing any file descriptors used. + Not needed in normal use, but may be helpful in unittests that + create and destroy http clients. No other methods may be called + on the AsyncHTTPClient after close(). + """ + del AsyncHTTPClient._ASYNC_CLIENTS[self.io_loop] + for curl in self._curls: + curl.close() + self._multi.close() + + def fetch(self, request, callback, **kwargs): + """Executes an HTTPRequest, calling callback with an HTTPResponse. + + If an error occurs during the fetch, the HTTPResponse given to the + callback has a non-None error attribute that contains the exception + encountered during the request. You can call response.reraise() to + throw the exception (if any) in the callback. + """ + if not isinstance(request, HTTPRequest): + request = HTTPRequest(url=request, **kwargs) + self._requests.append((request, callback)) + self._add_perform_callback() + + def _add_perform_callback(self): + if not self._added_perform_callback: + self.io_loop.add_callback(self._perform) + self._added_perform_callback = True + + def _handle_events(self, fd, events): + self._events[fd] = events + self._add_perform_callback() + + def _handle_timeout(self): + self._timeout = None + self._perform() + + def _perform(self): + self._added_perform_callback = False + + while True: + while True: + ret, num_handles = self._multi.perform() + if ret != pycurl.E_CALL_MULTI_PERFORM: + break + + # Handle completed fetches + completed = 0 + while True: + num_q, ok_list, err_list = self._multi.info_read() + for curl in ok_list: + self._finish(curl) + completed += 1 + for curl, errnum, errmsg in err_list: + self._finish(curl, errnum, errmsg) + completed += 1 + if num_q == 0: + break + + # Start fetching new URLs + started = 0 + while self._free_list and self._requests: + started += 1 + curl = self._free_list.pop() + (request, callback) = self._requests.popleft() + curl.info = { + "headers": {}, + "buffer": cStringIO.StringIO(), + "request": request, + "callback": callback, + "start_time": time.time(), + } + _curl_setup_request(curl, request, curl.info["buffer"], + curl.info["headers"]) + self._multi.add_handle(curl) + + if not started and not completed: + break + + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + self._timeout = None + + if num_handles: + self._timeout = self.io_loop.add_timeout( + time.time() + 0.2, self._handle_timeout) + + # Wait for more I/O + fds = {} + (readable, writable, exceptable) = self._multi.fdset() + for fd in readable: + fds[fd] = fds.get(fd, 0) | 0x1 | 0x2 + for fd in writable: + fds[fd] = fds.get(fd, 0) | 0x4 + for fd in exceptable: + fds[fd] = fds.get(fd, 0) | 0x8 | 0x10 + + for fd in self._fds: + if fd not in fds: + try: + self.io_loop.remove_handler(fd) + except (OSError, IOError), e: + if e[0] != errno.ENOENT: + raise + + for fd, events in fds.iteritems(): + old_events = self._fds.get(fd, None) + if old_events is None: + self.io_loop.add_handler(fd, self._handle_events, events) + elif old_events != events: + try: + self.io_loop.update_handler(fd, events) + except (OSError, IOError), e: + if e[0] == errno.ENOENT: + self.io_loop.add_handler(fd, self._handle_events, + events) + else: + raise + self._fds = fds + + def _finish(self, curl, curl_error=None, curl_message=None): + info = curl.info + curl.info = None + self._multi.remove_handle(curl) + self._free_list.append(curl) + if curl_error: + error = CurlError(curl_error, curl_message) + code = error.code + body = None + effective_url = None + else: + error = None + code = curl.getinfo(pycurl.HTTP_CODE) + body = info["buffer"].getvalue() + effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) + info["buffer"].close() + info["callback"](HTTPResponse( + request=info["request"], code=code, headers=info["headers"], + body=body, effective_url=effective_url, error=error, + request_time=time.time() - info["start_time"])) + + +class HTTPRequest(object): + def __init__(self, url, method="GET", headers={}, body=None, + auth_username=None, auth_password=None, + connect_timeout=20.0, request_timeout=20.0, + if_modified_since=None, follow_redirects=True, + max_redirects=5, user_agent=None, use_gzip=True, + network_interface=None, streaming_callback=None, + header_callback=None, prepare_curl_callback=None): + if if_modified_since: + timestamp = calendar.timegm(if_modified_since.utctimetuple()) + headers["If-Modified-Since"] = email.utils.formatdate( + timestamp, localtime=False, usegmt=True) + if "Pragma" not in headers: + headers["Pragma"] = "" + self.url = _utf8(url) + self.method = method + self.headers = headers + self.body = body + self.auth_username = _utf8(auth_username) + self.auth_password = _utf8(auth_password) + self.connect_timeout = connect_timeout + self.request_timeout = request_timeout + self.follow_redirects = follow_redirects + self.max_redirects = max_redirects + self.user_agent = user_agent + self.use_gzip = use_gzip + self.network_interface = network_interface + self.streaming_callback = streaming_callback + self.header_callback = header_callback + self.prepare_curl_callback = prepare_curl_callback + + +class HTTPResponse(object): + def __init__(self, request, code, headers={}, body="", effective_url=None, + error=None, request_time=None): + self.request = request + self.code = code + self.headers = headers + self.body = body + if effective_url is None: + self.effective_url = request.url + else: + self.effective_url = effective_url + if error is None: + if self.code < 200 or self.code >= 300: + self.error = HTTPError(self.code) + else: + self.error = None + else: + self.error = error + self.request_time = request_time + + def rethrow(self): + if self.error: + raise self.error + + def __repr__(self): + args = ",".join("%s=%r" % i for i in self.__dict__.iteritems()) + return "%s(%s)" % (self.__class__.__name__, args) + + +class HTTPError(Exception): + def __init__(self, code, message=None): + self.code = code + message = message or httplib.responses.get(code, "Unknown") + Exception.__init__(self, "HTTP %d: %s" % (self.code, message)) + + +class CurlError(HTTPError): + def __init__(self, errno, message): + HTTPError.__init__(self, 599, message) + self.errno = errno + + +def _curl_create(max_simultaneous_connections=None): + curl = pycurl.Curl() + if _log.isEnabledFor(logging.DEBUG): + curl.setopt(pycurl.VERBOSE, 1) + curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug) + curl.setopt(pycurl.MAXCONNECTS, max_simultaneous_connections or 5) + return curl + + +def _curl_setup_request(curl, request, buffer, headers): + curl.setopt(pycurl.URL, request.url) + curl.setopt(pycurl.HTTPHEADER, + ["%s: %s" % i for i in request.headers.iteritems()]) + try: + if request.header_callback: + curl.setopt(pycurl.HEADERFUNCTION, request.header_callback) + else: + curl.setopt(pycurl.HEADERFUNCTION, + functools.partial(_curl_header_callback, headers)) + except: + # Old version of curl; response will not include headers + pass + if request.streaming_callback: + curl.setopt(pycurl.WRITEFUNCTION, request.streaming_callback) + else: + curl.setopt(pycurl.WRITEFUNCTION, buffer.write) + curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) + curl.setopt(pycurl.MAXREDIRS, request.max_redirects) + curl.setopt(pycurl.CONNECTTIMEOUT, int(request.connect_timeout)) + curl.setopt(pycurl.TIMEOUT, int(request.request_timeout)) + if request.user_agent: + curl.setopt(pycurl.USERAGENT, request.user_agent) + else: + curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") + if request.network_interface: + curl.setopt(pycurl.INTERFACE, request.network_interface) + if request.use_gzip: + curl.setopt(pycurl.ENCODING, "gzip,deflate") + else: + curl.setopt(pycurl.ENCODING, "none") + + # Set the request method through curl's retarded interface which makes + # up names for almost every single method + curl_options = { + "GET": pycurl.HTTPGET, + "POST": pycurl.POST, + "PUT": pycurl.UPLOAD, + "HEAD": pycurl.NOBODY, + } + custom_methods = set(["DELETE"]) + for o in curl_options.values(): + curl.setopt(o, False) + if request.method in curl_options: + curl.unsetopt(pycurl.CUSTOMREQUEST) + curl.setopt(curl_options[request.method], True) + elif request.method in custom_methods: + curl.setopt(pycurl.CUSTOMREQUEST, request.method) + else: + raise KeyError('unknown method ' + request.method) + + # Handle curl's cryptic options for every individual HTTP method + if request.method in ("POST", "PUT"): + request_buffer = cStringIO.StringIO(request.body) + curl.setopt(pycurl.READFUNCTION, request_buffer.read) + if request.method == "POST": + def ioctl(cmd): + if cmd == curl.IOCMD_RESTARTREAD: + request_buffer.seek(0) + curl.setopt(pycurl.IOCTLFUNCTION, ioctl) + curl.setopt(pycurl.POSTFIELDSIZE, len(request.body)) + else: + curl.setopt(pycurl.INFILESIZE, len(request.body)) + + if request.auth_username and request.auth_password: + userpwd = "%s:%s" % (request.auth_username, request.auth_password) + curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) + curl.setopt(pycurl.USERPWD, userpwd) + _log.info("%s %s (username: %r)", request.method, request.url, + request.auth_username) + else: + curl.unsetopt(pycurl.USERPWD) + _log.info("%s %s", request.method, request.url) + if request.prepare_curl_callback is not None: + request.prepare_curl_callback(curl) + + +def _curl_header_callback(headers, header_line): + if header_line.startswith("HTTP/"): + headers.clear() + return + if header_line == "\r\n": + return + parts = header_line.split(": ") + if len(parts) != 2: + _log.warning("Invalid HTTP response header line %r", header_line) + return + name = parts[0].strip() + value = parts[1].strip() + if name in headers: + headers[name] = headers[name] + ',' + value + else: + headers[name] = value + + +def _curl_debug(debug_type, debug_msg): + debug_types = ('I', '<', '>', '<', '>') + if debug_type == 0: + _log.debug('%s', debug_msg.strip()) + elif debug_type in (1, 2): + for line in debug_msg.splitlines(): + _log.debug('%s %s', debug_types[debug_type], line) + elif debug_type == 4: + _log.debug('%s %r', debug_types[debug_type], debug_msg) + + +def _utf8(value): + if value is None: + return value + if isinstance(value, unicode): + return value.encode("utf-8") + assert isinstance(value, str) + return value diff --git a/vendor/tornado/tornado/httpserver.py b/vendor/tornado/tornado/httpserver.py new file mode 100644 index 000000000000..a7ec57eec4b7 --- /dev/null +++ b/vendor/tornado/tornado/httpserver.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A non-blocking, single-threaded HTTP server.""" + +import cgi +import errno +import functools +import ioloop +import iostream +import logging +import os +import socket +import time +import urlparse + +try: + import fcntl +except ImportError: + if os.name == 'nt': + import win32_support as fcntl + else: + raise + +try: + import ssl # Python 2.6+ +except ImportError: + ssl = None + +_log = logging.getLogger('tornado.httpserver') + +class HTTPServer(object): + """A non-blocking, single-threaded HTTP server. + + A server is defined by a request callback that takes an HTTPRequest + instance as an argument and writes a valid HTTP response with + request.write(). request.finish() finishes the request (but does not + necessarily close the connection in the case of HTTP/1.1 keep-alive + requests). A simple example server that echoes back the URI you + requested: + + import httpserver + import ioloop + + def handle_request(request): + message = "You requested %s\n" % request.uri + request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % ( + len(message), message)) + request.finish() + + http_server = httpserver.HTTPServer(handle_request) + http_server.listen(8888) + ioloop.IOLoop.instance().start() + + HTTPServer is a very basic connection handler. Beyond parsing the + HTTP request body and headers, the only HTTP semantics implemented + in HTTPServer is HTTP/1.1 keep-alive connections. We do not, however, + implement chunked encoding, so the request callback must provide a + Content-Length header or implement chunked encoding for HTTP/1.1 + requests for the server to run correctly for HTTP/1.1 clients. If + the request handler is unable to do this, you can provide the + no_keep_alive argument to the HTTPServer constructor, which will + ensure the connection is closed on every request no matter what HTTP + version the client is using. + + If xheaders is True, we support the X-Real-Ip and X-Scheme headers, + which override the remote IP and HTTP scheme for all requests. These + headers are useful when running Tornado behind a reverse proxy or + load balancer. + + HTTPServer can serve HTTPS (SSL) traffic with Python 2.6+ and OpenSSL. + To make this server serve SSL traffic, send the ssl_options dictionary + argument with the arguments required for the ssl.wrap_socket() method, + including "certfile" and "keyfile": + + HTTPServer(applicaton, ssl_options={ + "certfile": os.path.join(data_dir, "mydomain.crt"), + "keyfile": os.path.join(data_dir, "mydomain.key"), + }) + + By default, listen() runs in a single thread in a single process. You + can utilize all available CPUs on this machine by calling bind() and + start() instead of listen(): + + http_server = httpserver.HTTPServer(handle_request) + http_server.bind(8888) + http_server.start() # Forks multiple sub-processes + ioloop.IOLoop.instance().start() + + start() detects the number of CPUs on this machine and "pre-forks" that + number of child processes so that we have one Tornado process per CPU, + all with their own IOLoop. You can also pass in the specific number of + child processes you want to run with if you want to override this + auto-detection. + """ + def __init__(self, request_callback, no_keep_alive=False, io_loop=None, + xheaders=False, ssl_options=None): + """Initializes the server with the given request callback. + + If you use pre-forking/start() instead of the listen() method to + start your server, you should not pass an IOLoop instance to this + constructor. Each pre-forked child process will create its own + IOLoop instance after the forking process. + """ + self.request_callback = request_callback + self.no_keep_alive = no_keep_alive + self.io_loop = io_loop + self.xheaders = xheaders + self.ssl_options = ssl_options + self._socket = None + self._started = False + + def listen(self, port, address=""): + """Binds to the given port and starts the server in a single process. + + This method is a shortcut for: + + server.bind(port, address) + server.start(1) + + """ + self.bind(port, address) + self.start(1) + + def bind(self, port, address=""): + """Binds this server to the given port on the given IP address. + + To start the server, call start(). If you want to run this server + in a single process, you can call listen() as a shortcut to the + sequence of bind() and start() calls. + """ + assert not self._socket + self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + flags = fcntl.fcntl(self._socket.fileno(), fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(self._socket.fileno(), fcntl.F_SETFD, flags) + self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self._socket.setblocking(0) + self._socket.bind((address, port)) + self._socket.listen(128) + + def start(self, num_processes=None): + """Starts this server in the IOLoop. + + By default, we detect the number of cores available on this machine + and fork that number of child processes. If num_processes is given, we + fork that specific number of sub-processes. + + If num_processes is 1 or we detect only 1 CPU core, we run the server + in this process and do not fork any additional child process. + + Since we run use processes and not threads, there is no shared memory + between any server code. + """ + assert not self._started + self._started = True + if num_processes is None: + # Use sysconf to detect the number of CPUs (cores) + try: + num_processes = os.sysconf("SC_NPROCESSORS_CONF") + except ValueError: + _log.error("Could not get num processors from sysconf; " + "running with one process") + num_processes = 1 + if num_processes > 1 and ioloop.IOLoop.initialized(): + _log.error("Cannot run in multiple processes: IOLoop instance " + "has already been initialized. You cannot call " + "IOLoop.instance() before calling start()") + num_processes = 1 + if num_processes > 1: + _log.info("Pre-forking %d server processes", num_processes) + for i in range(num_processes): + if os.fork() == 0: + self.io_loop = ioloop.IOLoop.instance() + self.io_loop.add_handler( + self._socket.fileno(), self._handle_events, + ioloop.IOLoop.READ) + return + os.waitpid(-1, 0) + else: + if not self.io_loop: + self.io_loop = ioloop.IOLoop.instance() + self.io_loop.add_handler(self._socket.fileno(), + self._handle_events, + ioloop.IOLoop.READ) + + def stop(self): + self.io_loop.remove_handler(self._socket.fileno()) + self._socket.close() + + def _handle_events(self, fd, events): + while True: + try: + connection, address = self._socket.accept() + except socket.error, e: + if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): + return + raise + if self.ssl_options is not None: + assert ssl, "Python 2.6+ and OpenSSL required for SSL" + connection = ssl.wrap_socket( + connection, server_side=True, **self.ssl_options) + try: + stream = iostream.IOStream(connection, io_loop=self.io_loop) + HTTPConnection(stream, address, self.request_callback, + self.no_keep_alive, self.xheaders) + except: + _log.error("Error in connection callback", exc_info=True) + + +class HTTPConnection(object): + """Handles a connection to an HTTP client, executing HTTP requests. + + We parse HTTP headers and bodies, and execute the request callback + until the HTTP conection is closed. + """ + def __init__(self, stream, address, request_callback, no_keep_alive=False, + xheaders=False): + self.stream = stream + self.address = address + self.request_callback = request_callback + self.no_keep_alive = no_keep_alive + self.xheaders = xheaders + self._request = None + self._request_finished = False + self.stream.read_until("\r\n\r\n", self._on_headers) + + def write(self, chunk): + assert self._request, "Request closed" + if not self.stream.closed(): + self.stream.write(chunk, self._on_write_complete) + + def finish(self): + assert self._request, "Request closed" + self._request_finished = True + if not self.stream.writing(): + self._finish_request() + + def _on_write_complete(self): + if self._request_finished: + self._finish_request() + + def _finish_request(self): + if self.no_keep_alive: + disconnect = True + else: + connection_header = self._request.headers.get("Connection") + if self._request.supports_http_1_1(): + disconnect = connection_header == "close" + elif ("Content-Length" in self._request.headers + or self._request.method in ("HEAD", "GET")): + disconnect = connection_header != "Keep-Alive" + else: + disconnect = True + self._request = None + self._request_finished = False + if disconnect: + self.stream.close() + return + self.stream.read_until("\r\n\r\n", self._on_headers) + + def _on_headers(self, data): + eol = data.find("\r\n") + start_line = data[:eol] + method, uri, version = start_line.split(" ") + if not version.startswith("HTTP/"): + raise Exception("Malformed HTTP version in HTTP Request-Line") + headers = HTTPHeaders.parse(data[eol:]) + self._request = HTTPRequest( + connection=self, method=method, uri=uri, version=version, + headers=headers, remote_ip=self.address[0]) + + content_length = headers.get("Content-Length") + if content_length: + content_length = int(content_length) + if content_length > self.stream.max_buffer_size: + raise Exception("Content-Length too long") + if headers.get("Expect") == "100-continue": + self.stream.write("HTTP/1.1 100 (Continue)\r\n\r\n") + self.stream.read_bytes(content_length, self._on_request_body) + return + + self.request_callback(self._request) + + def _on_request_body(self, data): + self._request.body = data + content_type = self._request.headers.get("Content-Type", "") + if self._request.method == "POST": + if content_type.startswith("application/x-www-form-urlencoded"): + arguments = cgi.parse_qs(self._request.body) + for name, values in arguments.iteritems(): + values = [v for v in values if v] + if values: + self._request.arguments.setdefault(name, []).extend( + values) + elif content_type.startswith("multipart/form-data"): + boundary = content_type[30:] + if boundary: self._parse_mime_body(boundary, data) + self.request_callback(self._request) + + def _parse_mime_body(self, boundary, data): + if data.endswith("\r\n"): + footer_length = len(boundary) + 6 + else: + footer_length = len(boundary) + 4 + parts = data[:-footer_length].split("--" + boundary + "\r\n") + for part in parts: + if not part: continue + eoh = part.find("\r\n\r\n") + if eoh == -1: + _log.warning("multipart/form-data missing headers") + continue + headers = HTTPHeaders.parse(part[:eoh]) + name_header = headers.get("Content-Disposition", "") + if not name_header.startswith("form-data;") or \ + not part.endswith("\r\n"): + _log.warning("Invalid multipart/form-data") + continue + value = part[eoh + 4:-2] + name_values = {} + for name_part in name_header[10:].split(";"): + name, name_value = name_part.strip().split("=", 1) + name_values[name] = name_value.strip('"').decode("utf-8") + if not name_values.get("name"): + _log.warning("multipart/form-data value missing name") + continue + name = name_values["name"] + if name_values.get("filename"): + ctype = headers.get("Content-Type", "application/unknown") + self._request.files.setdefault(name, []).append(dict( + filename=name_values["filename"], body=value, + content_type=ctype)) + else: + self._request.arguments.setdefault(name, []).append(value) + + +class HTTPRequest(object): + """A single HTTP request. + + GET/POST arguments are available in the arguments property, which + maps arguments names to lists of values (to support multiple values + for individual names). Names and values are both unicode always. + + File uploads are available in the files property, which maps file + names to list of files. Each file is a dictionary of the form + {"filename":..., "content_type":..., "body":...}. The content_type + comes from the provided HTTP header and should not be trusted + outright given that it can be easily forged. + + An HTTP request is attached to a single HTTP connection, which can + be accessed through the "connection" attribute. Since connections + are typically kept open in HTTP/1.1, multiple requests can be handled + sequentially on a single connection. + """ + def __init__(self, method, uri, version="HTTP/1.0", headers=None, + body=None, remote_ip=None, protocol=None, host=None, + files=None, connection=None): + self.method = method + self.uri = uri + self.version = version + self.headers = headers or HTTPHeaders() + self.body = body or "" + if connection and connection.xheaders: + # Squid uses X-Forwarded-For, others use X-Real-Ip + self.remote_ip = self.headers.get( + "X-Real-Ip", self.headers.get("X-Forwarded-For", remote_ip)) + self.protocol = self.headers.get("X-Scheme", protocol) or "http" + else: + self.remote_ip = remote_ip + self.protocol = protocol or "http" + self.host = host or self.headers.get("Host") or "127.0.0.1" + self.files = files or {} + self.connection = connection + self._start_time = time.time() + self._finish_time = None + + scheme, netloc, path, query, fragment = urlparse.urlsplit(uri) + self.path = path + self.query = query + arguments = cgi.parse_qs(query) + self.arguments = {} + for name, values in arguments.iteritems(): + values = [v for v in values if v] + if values: self.arguments[name] = values + + def supports_http_1_1(self): + """Returns True if this request supports HTTP/1.1 semantics""" + return self.version == "HTTP/1.1" + + def write(self, chunk): + """Writes the given chunk to the response stream.""" + assert isinstance(chunk, str) + self.connection.write(chunk) + + def finish(self): + """Finishes this HTTP request on the open connection.""" + self.connection.finish() + self._finish_time = time.time() + + def full_url(self): + """Reconstructs the full URL for this request.""" + return self.protocol + "://" + self.host + self.uri + + def request_time(self): + """Returns the amount of time it took for this request to execute.""" + if self._finish_time is None: + return time.time() - self._start_time + else: + return self._finish_time - self._start_time + + def __repr__(self): + attrs = ("protocol", "host", "method", "uri", "version", "remote_ip", + "remote_ip", "body") + args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) + return "%s(%s, headers=%s)" % ( + self.__class__.__name__, args, dict(self.headers)) + + +class HTTPHeaders(dict): + """A dictionary that maintains Http-Header-Case for all keys.""" + def __setitem__(self, name, value): + dict.__setitem__(self, self._normalize_name(name), value) + + def __getitem__(self, name): + return dict.__getitem__(self, self._normalize_name(name)) + + def _normalize_name(self, name): + return "-".join([w.capitalize() for w in name.split("-")]) + + @classmethod + def parse(cls, headers_string): + headers = cls() + for line in headers_string.splitlines(): + if line: + name, value = line.split(": ", 1) + headers[name] = value + return headers diff --git a/vendor/tornado/tornado/ioloop.py b/vendor/tornado/tornado/ioloop.py new file mode 100644 index 000000000000..e94c17372e9f --- /dev/null +++ b/vendor/tornado/tornado/ioloop.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A level-triggered I/O loop for non-blocking sockets.""" + +import bisect +import errno +import os +import logging +import select +import time + +try: + import fcntl +except ImportError: + if os.name == 'nt': + import win32_support + import win32_support as fcntl + else: + raise + +_log = logging.getLogger("tornado.ioloop") + +class IOLoop(object): + """A level-triggered I/O loop. + + We use epoll if it is available, or else we fall back on select(). If + you are implementing a system that needs to handle 1000s of simultaneous + connections, you should use Linux and either compile our epoll module or + use Python 2.6+ to get epoll support. + + Example usage for a simple TCP server: + + import errno + import functools + import ioloop + import socket + + def connection_ready(sock, fd, events): + while True: + try: + connection, address = sock.accept() + except socket.error, e: + if e[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): + raise + return + connection.setblocking(0) + handle_connection(connection, address) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.setblocking(0) + sock.bind(("", port)) + sock.listen(128) + + io_loop = ioloop.IOLoop.instance() + callback = functools.partial(connection_ready, sock) + io_loop.add_handler(sock.fileno(), callback, io_loop.READ) + io_loop.start() + + """ + # Constants from the epoll module + _EPOLLIN = 0x001 + _EPOLLPRI = 0x002 + _EPOLLOUT = 0x004 + _EPOLLERR = 0x008 + _EPOLLHUP = 0x010 + _EPOLLRDHUP = 0x2000 + _EPOLLONESHOT = (1 << 30) + _EPOLLET = (1 << 31) + + # Our events map exactly to the epoll events + NONE = 0 + READ = _EPOLLIN + WRITE = _EPOLLOUT + ERROR = _EPOLLERR | _EPOLLHUP | _EPOLLRDHUP + + def __init__(self, impl=None): + self._impl = impl or _poll() + if hasattr(self._impl, 'fileno'): + self._set_close_exec(self._impl.fileno()) + self._handlers = {} + self._events = {} + self._callbacks = set() + self._timeouts = [] + self._running = False + self._stopped = False + + # Create a pipe that we send bogus data to when we want to wake + # the I/O loop when it is idle + if os.name != 'nt': + r, w = os.pipe() + self._set_nonblocking(r) + self._set_nonblocking(w) + self._set_close_exec(r) + self._set_close_exec(w) + self._waker_reader = os.fdopen(r, "r", 0) + self._waker_writer = os.fdopen(w, "w", 0) + else: + self._waker_reader = self._waker_writer = win32_support.Pipe() + r = self._waker_writer.reader_fd + self.add_handler(r, self._read_waker, self.READ) + + @classmethod + def instance(cls): + """Returns a global IOLoop instance. + + Most single-threaded applications have a single, global IOLoop. + Use this method instead of passing around IOLoop instances + throughout your code. + + A common pattern for classes that depend on IOLoops is to use + a default argument to enable programs with multiple IOLoops + but not require the argument for simpler applications: + + class MyClass(object): + def __init__(self, io_loop=None): + self.io_loop = io_loop or IOLoop.instance() + """ + if not hasattr(cls, "_instance"): + cls._instance = cls() + return cls._instance + + @classmethod + def initialized(cls): + return hasattr(cls, "_instance") + + def add_handler(self, fd, handler, events): + """Registers the given handler to receive the given events for fd.""" + self._handlers[fd] = handler + self._impl.register(fd, events | self.ERROR) + + def update_handler(self, fd, events): + """Changes the events we listen for fd.""" + self._impl.modify(fd, events | self.ERROR) + + def remove_handler(self, fd): + """Stop listening for events on fd.""" + self._handlers.pop(fd, None) + self._events.pop(fd, None) + try: + self._impl.unregister(fd) + except (OSError, IOError): + _log.debug("Error deleting fd from IOLoop", exc_info=True) + + def start(self): + """Starts the I/O loop. + + The loop will run until one of the I/O handlers calls stop(), which + will make the loop stop after the current event iteration completes. + """ + if self._stopped: + self._stopped = False + return + self._running = True + while True: + # Never use an infinite timeout here - it can stall epoll + poll_timeout = 0.2 + + # Prevent IO event starvation by delaying new callbacks + # to the next iteration of the event loop. + callbacks = list(self._callbacks) + for callback in callbacks: + # A callback can add or remove other callbacks + if callback in self._callbacks: + self._callbacks.remove(callback) + self._run_callback(callback) + + if self._callbacks: + poll_timeout = 0.0 + + if self._timeouts: + now = time.time() + while self._timeouts and self._timeouts[0].deadline <= now: + timeout = self._timeouts.pop(0) + self._run_callback(timeout.callback) + if self._timeouts: + milliseconds = self._timeouts[0].deadline - now + poll_timeout = min(milliseconds, poll_timeout) + + if not self._running: + break + + try: + event_pairs = self._impl.poll(poll_timeout) + except Exception, e: + if hasattr(e, 'errno') and e.errno == errno.EINTR: + _log.warning("Interrupted system call", exc_info=1) + continue + else: + raise + + # Pop one fd at a time from the set of pending fds and run + # its handler. Since that handler may perform actions on + # other file descriptors, there may be reentrant calls to + # this IOLoop that update self._events + self._events.update(event_pairs) + while self._events: + fd, events = self._events.popitem() + try: + self._handlers[fd](fd, events) + except (KeyboardInterrupt, SystemExit): + raise + except (OSError, IOError), e: + if e[0] == errno.EPIPE: + # Happens when the client closes the connection + pass + else: + _log.error("Exception in I/O handler for fd %d", + fd, exc_info=True) + except: + _log.error("Exception in I/O handler for fd %d", + fd, exc_info=True) + # reset the stopped flag so another start/stop pair can be issued + self._stopped = False + + def stop(self): + """Stop the loop after the current event loop iteration is complete. + If the event loop is not currently running, the next call to start() + will return immediately. + + To use asynchronous methods from otherwise-synchronous code (such as + unit tests), you can start and stop the event loop like this: + ioloop = IOLoop() + async_method(ioloop=ioloop, callback=ioloop.stop) + ioloop.start() + ioloop.start() will return after async_method has run its callback, + whether that callback was invoked before or after ioloop.start. + """ + self._running = False + self._stopped = True + self._wake() + + def running(self): + """Returns true if this IOLoop is currently running.""" + return self._running + + def add_timeout(self, deadline, callback): + """Calls the given callback at the time deadline from the I/O loop.""" + timeout = _Timeout(deadline, callback) + bisect.insort(self._timeouts, timeout) + return timeout + + def remove_timeout(self, timeout): + self._timeouts.remove(timeout) + + def add_callback(self, callback): + """Calls the given callback on the next I/O loop iteration.""" + self._callbacks.add(callback) + self._wake() + + def remove_callback(self, callback): + """Removes the given callback from the next I/O loop iteration.""" + self._callbacks.remove(callback) + + def _wake(self): + try: + self._waker_writer.write("x") + except IOError: + pass + + def _run_callback(self, callback): + try: + callback() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handle_callback_exception(callback) + + def handle_callback_exception(self, callback): + """This method is called whenever a callback run by the IOLoop + throws an exception. + + By default simply logs the exception as an error. Subclasses + may override this method to customize reporting of exceptions. + + The exception itself is not passed explicitly, but is available + in sys.exc_info. + """ + _log.error("Exception in callback %r", callback, exc_info=True) + + def _read_waker(self, fd, events): + try: + while True: + self._waker_reader.read() + except IOError: + pass + + def _set_nonblocking(self, fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + def _set_close_exec(self, fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + + +class _Timeout(object): + """An IOLoop timeout, a UNIX timestamp and a callback""" + + # Reduce memory overhead when there are lots of pending callbacks + __slots__ = ['deadline', 'callback'] + + def __init__(self, deadline, callback): + self.deadline = deadline + self.callback = callback + + def __cmp__(self, other): + return cmp((self.deadline, id(self.callback)), + (other.deadline, id(other.callback))) + + +class PeriodicCallback(object): + """Schedules the given callback to be called periodically. + + The callback is called every callback_time milliseconds. + """ + def __init__(self, callback, callback_time, io_loop=None): + self.callback = callback + self.callback_time = callback_time + self.io_loop = io_loop or IOLoop.instance() + self._running = True + + def start(self): + timeout = time.time() + self.callback_time / 1000.0 + self.io_loop.add_timeout(timeout, self._run) + + def stop(self): + self._running = False + + def _run(self): + if not self._running: return + try: + self.callback() + except (KeyboardInterrupt, SystemExit): + raise + except: + _log.error("Error in periodic callback", exc_info=True) + self.start() + + +class _EPoll(object): + """An epoll-based event loop using our C module for Python 2.5 systems""" + _EPOLL_CTL_ADD = 1 + _EPOLL_CTL_DEL = 2 + _EPOLL_CTL_MOD = 3 + + def __init__(self): + self._epoll_fd = epoll.epoll_create() + + def fileno(self): + return self._epoll_fd + + def register(self, fd, events): + epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events) + + def modify(self, fd, events): + epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events) + + def unregister(self, fd): + epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0) + + def poll(self, timeout): + return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000)) + + +class _KQueue(object): + """A kqueue-based event loop for BSD/Mac systems.""" + def __init__(self): + self._kqueue = select.kqueue() + self._active = {} + + def fileno(self): + return self._kqueue.fileno() + + def register(self, fd, events): + self._control(fd, events, select.KQ_EV_ADD) + self._active[fd] = events + + def modify(self, fd, events): + self.unregister(fd) + self.register(fd, events) + + def unregister(self, fd): + events = self._active.pop(fd) + self._control(fd, events, select.KQ_EV_DELETE) + + def _control(self, fd, events, flags): + kevents = [] + if events & IOLoop.WRITE: + kevents.append(select.kevent( + fd, filter=select.KQ_FILTER_WRITE, flags=flags)) + if events & IOLoop.READ or not kevents: + # Always read when there is not a write + kevents.append(select.kevent( + fd, filter=select.KQ_FILTER_READ, flags=flags)) + # Even though control() takes a list, it seems to return EINVAL + # on Mac OS X (10.6) when there is more than one event in the list. + for kevent in kevents: + self._kqueue.control([kevent], 0) + + def poll(self, timeout): + kevents = self._kqueue.control(None, 1000, timeout) + events = {} + for kevent in kevents: + fd = kevent.ident + flags = 0 + if kevent.filter == select.KQ_FILTER_READ: + events[fd] = events.get(fd, 0) | IOLoop.READ + if kevent.filter == select.KQ_FILTER_WRITE: + events[fd] = events.get(fd, 0) | IOLoop.WRITE + if kevent.flags & select.KQ_EV_ERROR: + events[fd] = events.get(fd, 0) | IOLoop.ERROR + return events.items() + + +class _Select(object): + """A simple, select()-based IOLoop implementation for non-Linux systems""" + def __init__(self): + self.read_fds = set() + self.write_fds = set() + self.error_fds = set() + self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) + + def register(self, fd, events): + if events & IOLoop.READ: self.read_fds.add(fd) + if events & IOLoop.WRITE: self.write_fds.add(fd) + if events & IOLoop.ERROR: self.error_fds.add(fd) + + def modify(self, fd, events): + self.unregister(fd) + self.register(fd, events) + + def unregister(self, fd): + self.read_fds.discard(fd) + self.write_fds.discard(fd) + self.error_fds.discard(fd) + + def poll(self, timeout): + readable, writeable, errors = select.select( + self.read_fds, self.write_fds, self.error_fds, timeout) + events = {} + for fd in readable: + events[fd] = events.get(fd, 0) | IOLoop.READ + for fd in writeable: + events[fd] = events.get(fd, 0) | IOLoop.WRITE + for fd in errors: + events[fd] = events.get(fd, 0) | IOLoop.ERROR + return events.items() + + +# Choose a poll implementation. Use epoll if it is available, fall back to +# select() for non-Linux platforms +if hasattr(select, "epoll"): + # Python 2.6+ on Linux + _poll = select.epoll +elif hasattr(select, "kqueue"): + # Python 2.6+ on BSD or Mac + _poll = _KQueue +else: + try: + # Linux systems with our C module installed + import epoll + _poll = _EPoll + except: + # All other systems + import sys + if "linux" in sys.platform: + _log.warning("epoll module not found; using select()") + _poll = _Select diff --git a/vendor/tornado/tornado/iostream.py b/vendor/tornado/tornado/iostream.py new file mode 100644 index 000000000000..af7c6edbfee7 --- /dev/null +++ b/vendor/tornado/tornado/iostream.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A utility class to write to and read from a non-blocking socket.""" + +import errno +import ioloop +import logging +import socket + +_log = logging.getLogger('tornado.iostream') + +class IOStream(object): + """A utility class to write to and read from a non-blocking socket. + + We support three methods: write(), read_until(), and read_bytes(). + All of the methods take callbacks (since writing and reading are + non-blocking and asynchronous). read_until() reads the socket until + a given delimiter, and read_bytes() reads until a specified number + of bytes have been read from the socket. + + A very simple (and broken) HTTP client using this class: + + import ioloop + import iostream + import socket + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + s.connect(("friendfeed.com", 80)) + stream = IOStream(s) + + def on_headers(data): + headers = {} + for line in data.split("\r\n"): + parts = line.split(":") + if len(parts) == 2: + headers[parts[0].strip()] = parts[1].strip() + stream.read_bytes(int(headers["Content-Length"]), on_body) + + def on_body(data): + print data + stream.close() + ioloop.IOLoop.instance().stop() + + stream.write("GET / HTTP/1.0\r\n\r\n") + stream.read_until("\r\n\r\n", on_headers) + ioloop.IOLoop.instance().start() + + """ + def __init__(self, socket, io_loop=None, max_buffer_size=104857600, + read_chunk_size=4096): + self.socket = socket + self.socket.setblocking(False) + self.io_loop = io_loop or ioloop.IOLoop.instance() + self.max_buffer_size = max_buffer_size + self.read_chunk_size = read_chunk_size + self._read_buffer = "" + self._write_buffer = "" + self._read_delimiter = None + self._read_bytes = None + self._read_callback = None + self._write_callback = None + self._close_callback = None + self._state = self.io_loop.ERROR + self.io_loop.add_handler( + self.socket.fileno(), self._handle_events, self._state) + + def read_until(self, delimiter, callback): + """Call callback when we read the given delimiter.""" + assert not self._read_callback, "Already reading" + loc = self._read_buffer.find(delimiter) + if loc != -1: + callback(self._consume(loc + len(delimiter))) + return + self._check_closed() + self._read_delimiter = delimiter + self._read_callback = callback + self._add_io_state(self.io_loop.READ) + + def read_bytes(self, num_bytes, callback): + """Call callback when we read the given number of bytes.""" + assert not self._read_callback, "Already reading" + if len(self._read_buffer) >= num_bytes: + callback(self._consume(num_bytes)) + return + self._check_closed() + self._read_bytes = num_bytes + self._read_callback = callback + self._add_io_state(self.io_loop.READ) + + def write(self, data, callback=None): + """Write the given data to this stream. + + If callback is given, we call it when all of the buffered write + data has been successfully written to the stream. If there was + previously buffered write data and an old write callback, that + callback is simply overwritten with this new callback. + """ + self._check_closed() + self._write_buffer += data + self._add_io_state(self.io_loop.WRITE) + self._write_callback = callback + + def set_close_callback(self, callback): + """Call the given callback when the stream is closed.""" + self._close_callback = callback + + def close(self): + """Close this stream.""" + if self.socket is not None: + self.io_loop.remove_handler(self.socket.fileno()) + self.socket.close() + self.socket = None + if self._close_callback: self._close_callback() + + def reading(self): + """Returns true if we are currently reading from the stream.""" + return self._read_callback is not None + + def writing(self): + """Returns true if we are currently writing to the stream.""" + return len(self._write_buffer) > 0 + + def closed(self): + return self.socket is None + + def _handle_events(self, fd, events): + if not self.socket: + _log.warning("Got events for closed stream %d", fd) + return + if events & self.io_loop.READ: + self._handle_read() + if not self.socket: + return + if events & self.io_loop.WRITE: + self._handle_write() + if not self.socket: + return + if events & self.io_loop.ERROR: + self.close() + return + state = self.io_loop.ERROR + if self._read_delimiter or self._read_bytes: + state |= self.io_loop.READ + if self._write_buffer: + state |= self.io_loop.WRITE + if state != self._state: + self._state = state + self.io_loop.update_handler(self.socket.fileno(), self._state) + + def _handle_read(self): + try: + chunk = self.socket.recv(self.read_chunk_size) + except socket.error, e: + if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): + return + else: + _log.warning("Read error on %d: %s", + self.socket.fileno(), e) + self.close() + return + if not chunk: + self.close() + return + self._read_buffer += chunk + if len(self._read_buffer) >= self.max_buffer_size: + _log.error("Reached maximum read buffer size") + self.close() + return + if self._read_bytes: + if len(self._read_buffer) >= self._read_bytes: + num_bytes = self._read_bytes + callback = self._read_callback + self._read_callback = None + self._read_bytes = None + callback(self._consume(num_bytes)) + elif self._read_delimiter: + loc = self._read_buffer.find(self._read_delimiter) + if loc != -1: + callback = self._read_callback + delimiter_len = len(self._read_delimiter) + self._read_callback = None + self._read_delimiter = None + callback(self._consume(loc + delimiter_len)) + + def _handle_write(self): + while self._write_buffer: + try: + num_bytes = self.socket.send(self._write_buffer) + self._write_buffer = self._write_buffer[num_bytes:] + except socket.error, e: + if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN): + break + else: + _log.warning("Write error on %d: %s", + self.socket.fileno(), e) + self.close() + return + if not self._write_buffer and self._write_callback: + callback = self._write_callback + self._write_callback = None + callback() + + def _consume(self, loc): + result = self._read_buffer[:loc] + self._read_buffer = self._read_buffer[loc:] + return result + + def _check_closed(self): + if not self.socket: + raise IOError("Stream is closed") + + def _add_io_state(self, state): + if not self._state & state: + self._state = self._state | state + self.io_loop.update_handler(self.socket.fileno(), self._state) diff --git a/vendor/tornado/tornado/locale.py b/vendor/tornado/tornado/locale.py new file mode 100644 index 000000000000..6a8537d75009 --- /dev/null +++ b/vendor/tornado/tornado/locale.py @@ -0,0 +1,457 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Translation methods for generating localized strings. + +To load a locale and generate a translated string: + + user_locale = locale.get("es_LA") + print user_locale.translate("Sign out") + +locale.get() returns the closest matching locale, not necessarily the +specific locale you requested. You can support pluralization with +additional arguments to translate(), e.g.: + + people = [...] + message = user_locale.translate( + "%(list)s is online", "%(list)s are online", len(people)) + print message % {"list": user_locale.list(people)} + +The first string is chosen if len(people) == 1, otherwise the second +string is chosen. + +Applications should call one of load_translations (which uses a simple +CSV format) or load_gettext_translations (which uses the .mo format +supported by gettext and related tools). If neither method is called, +the locale.translate method will simply return the original string. +""" + +import csv +import datetime +import logging +import os +import os.path +import re + +_default_locale = "en_US" +_translations = {} +_supported_locales = frozenset([_default_locale]) +_use_gettext = False + +_log = logging.getLogger('tornado.locale') + +def get(*locale_codes): + """Returns the closest match for the given locale codes. + + We iterate over all given locale codes in order. If we have a tight + or a loose match for the code (e.g., "en" for "en_US"), we return + the locale. Otherwise we move to the next code in the list. + + By default we return en_US if no translations are found for any of + the specified locales. You can change the default locale with + set_default_locale() below. + """ + return Locale.get_closest(*locale_codes) + + +def set_default_locale(code): + """Sets the default locale, used in get_closest_locale(). + + The default locale is assumed to be the language used for all strings + in the system. The translations loaded from disk are mappings from + the default locale to the destination locale. Consequently, you don't + need to create a translation file for the default locale. + """ + global _default_locale + global _supported_locales + _default_locale = code + _supported_locales = frozenset(_translations.keys() + [_default_locale]) + + +def load_translations(directory): + """Loads translations from CSV files in a directory. + + Translations are strings with optional Python-style named placeholders + (e.g., "My name is %(name)s") and their associated translations. + + The directory should have translation files of the form LOCALE.csv, + e.g. es_GT.csv. The CSV files should have two or three columns: string, + translation, and an optional plural indicator. Plural indicators should + be one of "plural" or "singular". A given string can have both singular + and plural forms. For example "%(name)s liked this" may have a + different verb conjugation depending on whether %(name)s is one + name or a list of names. There should be two rows in the CSV file for + that string, one with plural indicator "singular", and one "plural". + For strings with no verbs that would change on translation, simply + use "unknown" or the empty string (or don't include the column at all). + + Example translation es_LA.csv: + + "I love you","Te amo" + "%(name)s liked this","A %(name)s les gust\xf3 esto","plural" + "%(name)s liked this","A %(name)s le gust\xf3 esto","singular" + + """ + global _translations + global _supported_locales + _translations = {} + for path in os.listdir(directory): + if not path.endswith(".csv"): continue + locale, extension = path.split(".") + if locale not in LOCALE_NAMES: + _log.error("Unrecognized locale %r (path: %s)", locale, + os.path.join(directory, path)) + continue + f = open(os.path.join(directory, path), "r") + _translations[locale] = {} + for i, row in enumerate(csv.reader(f)): + if not row or len(row) < 2: continue + row = [c.decode("utf-8").strip() for c in row] + english, translation = row[:2] + if len(row) > 2: + plural = row[2] or "unknown" + else: + plural = "unknown" + if plural not in ("plural", "singular", "unknown"): + _log.error("Unrecognized plural indicator %r in %s line %d", + plural, path, i + 1) + continue + _translations[locale].setdefault(plural, {})[english] = translation + f.close() + _supported_locales = frozenset(_translations.keys() + [_default_locale]) + _log.info("Supported locales: %s", sorted(_supported_locales)) + +def load_gettext_translations(directory, domain): + """Loads translations from gettext's locale tree + + Locale tree is similar to system's /usr/share/locale, like: + + {directory}/{lang}/LC_MESSAGES/{domain}.mo + + Three steps are required to have you app translated: + + 1. Generate POT translation file + xgettext --language=Python --keyword=_:1,2 -d cyclone file1.py file2.html etc + + 2. Merge against existing POT file: + msgmerge old.po cyclone.po > new.po + + 3. Compile: + msgfmt cyclone.po -o {directory}/pt_BR/LC_MESSAGES/cyclone.mo + """ + import gettext + global _translations + global _supported_locales + global _use_gettext + _translations = {} + for lang in os.listdir(directory): + if os.path.isfile(os.path.join(directory, lang)): continue + try: + os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain+".mo")) + _translations[lang] = gettext.translation(domain, directory, + languages=[lang]) + except Exception, e: + logging.error("Cannot load translation for '%s': %s", lang, str(e)) + continue + _supported_locales = frozenset(_translations.keys() + [_default_locale]) + _use_gettext = True + _log.info("Supported locales: %s", sorted(_supported_locales)) + + +def get_supported_locales(cls): + """Returns a list of all the supported locale codes.""" + return _supported_locales + + +class Locale(object): + @classmethod + def get_closest(cls, *locale_codes): + """Returns the closest match for the given locale code.""" + for code in locale_codes: + if not code: continue + code = code.replace("-", "_") + parts = code.split("_") + if len(parts) > 2: + continue + elif len(parts) == 2: + code = parts[0].lower() + "_" + parts[1].upper() + if code in _supported_locales: + return cls.get(code) + if parts[0].lower() in _supported_locales: + return cls.get(parts[0].lower()) + return cls.get(_default_locale) + + @classmethod + def get(cls, code): + """Returns the Locale for the given locale code. + + If it is not supported, we raise an exception. + """ + if not hasattr(cls, "_cache"): + cls._cache = {} + if code not in cls._cache: + assert code in _supported_locales + translations = _translations.get(code, None) + if translations is None: + locale = CSVLocale(code, {}) + elif _use_gettext: + locale = GettextLocale(code, translations) + else: + locale = CSVLocale(code, translations) + cls._cache[code] = locale + return cls._cache[code] + + def __init__(self, code, translations): + self.code = code + self.name = LOCALE_NAMES.get(code, {}).get("name", u"Unknown") + self.rtl = False + for prefix in ["fa", "ar", "he"]: + if self.code.startswith(prefix): + self.rtl = True + break + self.translations = translations + + # Initialize strings for date formatting + _ = self.translate + self._months = [ + _("January"), _("February"), _("March"), _("April"), + _("May"), _("June"), _("July"), _("August"), + _("September"), _("October"), _("November"), _("December")] + self._weekdays = [ + _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), + _("Friday"), _("Saturday"), _("Sunday")] + + def translate(self, message, plural_message=None, count=None): + raise NotImplementedError() + + def format_date(self, date, gmt_offset=0, relative=True, shorter=False, + full_format=False): + """Formats the given date (which should be GMT). + + By default, we return a relative time (e.g., "2 minutes ago"). You + can return an absolute date string with relative=False. + + You can force a full format date ("July 10, 1980") with + full_format=True. + """ + if self.code.startswith("ru"): + relative = False + if type(date) in (int, long, float): + date = datetime.datetime.utcfromtimestamp(date) + now = datetime.datetime.utcnow() + # Round down to now. Due to click skew, things are somethings + # slightly in the future. + if date > now: date = now + local_date = date - datetime.timedelta(minutes=gmt_offset) + local_now = now - datetime.timedelta(minutes=gmt_offset) + local_yesterday = local_now - datetime.timedelta(hours=24) + difference = now - date + seconds = difference.seconds + days = difference.days + + _ = self.translate + format = None + if not full_format: + if relative and days == 0: + if seconds < 50: + return _("1 second ago", "%(seconds)d seconds ago", + seconds) % { "seconds": seconds } + + if seconds < 50 * 60: + minutes = round(seconds / 60.0) + return _("1 minute ago", "%(minutes)d minutes ago", + minutes) % { "minutes": minutes } + + hours = round(seconds / (60.0 * 60)) + return _("1 hour ago", "%(hours)d hours ago", + hours) % { "hours": hours } + + if days == 0: + format = _("%(time)s") + elif days == 1 and local_date.day == local_yesterday.day and \ + relative: + format = _("yesterday") if shorter else \ + _("yesterday at %(time)s") + elif days < 5: + format = _("%(weekday)s") if shorter else \ + _("%(weekday)s at %(time)s") + elif days < 334: # 11mo, since confusing for same month last year + format = _("%(month_name)s %(day)s") if shorter else \ + _("%(month_name)s %(day)s at %(time)s") + + if format is None: + format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ + _("%(month_name)s %(day)s, %(year)s at %(time)s") + + tfhour_clock = self.code not in ("en", "en_US", "zh_CN") + if tfhour_clock: + str_time = "%d:%02d" % (local_date.hour, local_date.minute) + elif self.code == "zh_CN": + str_time = "%s%d:%02d" % ( + (u'\u4e0a\u5348', u'\u4e0b\u5348')[local_date.hour >= 12], + local_date.hour % 12 or 12, local_date.minute) + else: + str_time = "%d:%02d %s" % ( + local_date.hour % 12 or 12, local_date.minute, + ("am", "pm")[local_date.hour >= 12]) + + return format % { + "month_name": self._months[local_date.month - 1], + "weekday": self._weekdays[local_date.weekday()], + "day": str(local_date.day), + "year": str(local_date.year), + "time": str_time + } + + def format_day(self, date, gmt_offset=0, dow=True): + """Formats the given date as a day of week. + + Example: "Monday, January 22". You can remove the day of week with + dow=False. + """ + local_date = date - datetime.timedelta(minutes=gmt_offset) + _ = self.translate + if dow: + return _("%(weekday)s, %(month_name)s %(day)s") % { + "month_name": self._months[local_date.month - 1], + "weekday": self._weekdays[local_date.weekday()], + "day": str(local_date.day), + } + else: + return _("%(month_name)s %(day)s") % { + "month_name": self._months[local_date.month - 1], + "day": str(local_date.day), + } + + def list(self, parts): + """Returns a comma-separated list for the given list of parts. + + The format is, e.g., "A, B and C", "A and B" or just "A" for lists + of size 1. + """ + _ = self.translate + if len(parts) == 0: return "" + if len(parts) == 1: return parts[0] + comma = u' \u0648 ' if self.code.startswith("fa") else u", " + return _("%(commas)s and %(last)s") % { + "commas": comma.join(parts[:-1]), + "last": parts[len(parts) - 1], + } + + def friendly_number(self, value): + """Returns a comma-separated number for the given integer.""" + if self.code not in ("en", "en_US"): + return str(value) + value = str(value) + parts = [] + while value: + parts.append(value[-3:]) + value = value[:-3] + return ",".join(reversed(parts)) + +class CSVLocale(Locale): + """Locale implementation using tornado's CSV translation format.""" + def translate(self, message, plural_message=None, count=None): + """Returns the translation for the given message for this locale. + + If plural_message is given, you must also provide count. We return + plural_message when count != 1, and we return the singular form + for the given message when count == 1. + """ + if plural_message is not None: + assert count is not None + if count != 1: + message = plural_message + message_dict = self.translations.get("plural", {}) + else: + message_dict = self.translations.get("singular", {}) + else: + message_dict = self.translations.get("unknown", {}) + return message_dict.get(message, message) + +class GettextLocale(Locale): + """Locale implementation using the gettext module.""" + def translate(self, message, plural_message=None, count=None): + if plural_message is not None: + assert count is not None + return self.translations.ungettext(message, plural_message, count) + else: + return self.translations.ugettext(message) + +LOCALE_NAMES = { + "af_ZA": {"name_en": u"Afrikaans", "name": u"Afrikaans"}, + "ar_AR": {"name_en": u"Arabic", "name": u"\u0627\u0644\u0639\u0631\u0628\u064a\u0629"}, + "bg_BG": {"name_en": u"Bulgarian", "name": u"\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438"}, + "bn_IN": {"name_en": u"Bengali", "name": u"\u09ac\u09be\u0982\u09b2\u09be"}, + "bs_BA": {"name_en": u"Bosnian", "name": u"Bosanski"}, + "ca_ES": {"name_en": u"Catalan", "name": u"Catal\xe0"}, + "cs_CZ": {"name_en": u"Czech", "name": u"\u010ce\u0161tina"}, + "cy_GB": {"name_en": u"Welsh", "name": u"Cymraeg"}, + "da_DK": {"name_en": u"Danish", "name": u"Dansk"}, + "de_DE": {"name_en": u"German", "name": u"Deutsch"}, + "el_GR": {"name_en": u"Greek", "name": u"\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac"}, + "en_GB": {"name_en": u"English (UK)", "name": u"English (UK)"}, + "en_US": {"name_en": u"English (US)", "name": u"English (US)"}, + "es_ES": {"name_en": u"Spanish (Spain)", "name": u"Espa\xf1ol (Espa\xf1a)"}, + "es_LA": {"name_en": u"Spanish", "name": u"Espa\xf1ol"}, + "et_EE": {"name_en": u"Estonian", "name": u"Eesti"}, + "eu_ES": {"name_en": u"Basque", "name": u"Euskara"}, + "fa_IR": {"name_en": u"Persian", "name": u"\u0641\u0627\u0631\u0633\u06cc"}, + "fi_FI": {"name_en": u"Finnish", "name": u"Suomi"}, + "fr_CA": {"name_en": u"French (Canada)", "name": u"Fran\xe7ais (Canada)"}, + "fr_FR": {"name_en": u"French", "name": u"Fran\xe7ais"}, + "ga_IE": {"name_en": u"Irish", "name": u"Gaeilge"}, + "gl_ES": {"name_en": u"Galician", "name": u"Galego"}, + "he_IL": {"name_en": u"Hebrew", "name": u"\u05e2\u05d1\u05e8\u05d9\u05ea"}, + "hi_IN": {"name_en": u"Hindi", "name": u"\u0939\u093f\u0928\u094d\u0926\u0940"}, + "hr_HR": {"name_en": u"Croatian", "name": u"Hrvatski"}, + "hu_HU": {"name_en": u"Hungarian", "name": u"Magyar"}, + "id_ID": {"name_en": u"Indonesian", "name": u"Bahasa Indonesia"}, + "is_IS": {"name_en": u"Icelandic", "name": u"\xcdslenska"}, + "it_IT": {"name_en": u"Italian", "name": u"Italiano"}, + "ja_JP": {"name_en": u"Japanese", "name": u"\xe6\xe6\xe8"}, + "ko_KR": {"name_en": u"Korean", "name": u"\xed\xea\xec"}, + "lt_LT": {"name_en": u"Lithuanian", "name": u"Lietuvi\u0173"}, + "lv_LV": {"name_en": u"Latvian", "name": u"Latvie\u0161u"}, + "mk_MK": {"name_en": u"Macedonian", "name": u"\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438"}, + "ml_IN": {"name_en": u"Malayalam", "name": u"\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02"}, + "ms_MY": {"name_en": u"Malay", "name": u"Bahasa Melayu"}, + "nb_NO": {"name_en": u"Norwegian (bokmal)", "name": u"Norsk (bokm\xe5l)"}, + "nl_NL": {"name_en": u"Dutch", "name": u"Nederlands"}, + "nn_NO": {"name_en": u"Norwegian (nynorsk)", "name": u"Norsk (nynorsk)"}, + "pa_IN": {"name_en": u"Punjabi", "name": u"\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40"}, + "pl_PL": {"name_en": u"Polish", "name": u"Polski"}, + "pt_BR": {"name_en": u"Portuguese (Brazil)", "name": u"Portugu\xeas (Brasil)"}, + "pt_PT": {"name_en": u"Portuguese (Portugal)", "name": u"Portugu\xeas (Portugal)"}, + "ro_RO": {"name_en": u"Romanian", "name": u"Rom\xe2n\u0103"}, + "ru_RU": {"name_en": u"Russian", "name": u"\u0420\u0443\u0441\u0441\u043a\u0438\u0439"}, + "sk_SK": {"name_en": u"Slovak", "name": u"Sloven\u010dina"}, + "sl_SI": {"name_en": u"Slovenian", "name": u"Sloven\u0161\u010dina"}, + "sq_AL": {"name_en": u"Albanian", "name": u"Shqip"}, + "sr_RS": {"name_en": u"Serbian", "name": u"\u0421\u0440\u043f\u0441\u043a\u0438"}, + "sv_SE": {"name_en": u"Swedish", "name": u"Svenska"}, + "sw_KE": {"name_en": u"Swahili", "name": u"Kiswahili"}, + "ta_IN": {"name_en": u"Tamil", "name": u"\u0ba4\u0bae\u0bbf\u0bb4\u0bcd"}, + "te_IN": {"name_en": u"Telugu", "name": u"\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41"}, + "th_TH": {"name_en": u"Thai", "name": u"\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22"}, + "tl_PH": {"name_en": u"Filipino", "name": u"Filipino"}, + "tr_TR": {"name_en": u"Turkish", "name": u"T\xfcrk\xe7e"}, + "uk_UA": {"name_en": u"Ukraini ", "name": u"\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430"}, + "vi_VN": {"name_en": u"Vietnamese", "name": u"Ti\u1ebfng Vi\u1ec7t"}, + "zh_CN": {"name_en": u"Chinese (Simplified)", "name": u"\xe4\xe6(\xe7\xe4)"}, + "zh_HK": {"name_en": u"Chinese (Hong Kong)", "name": u"\xe4\xe6(\xe9\xe6)"}, + "zh_TW": {"name_en": u"Chinese (Taiwan)", "name": u"\xe4\xe6(\xe5\xe7)"}, +} diff --git a/vendor/tornado/tornado/options.py b/vendor/tornado/tornado/options.py new file mode 100644 index 000000000000..66bce091e725 --- /dev/null +++ b/vendor/tornado/tornado/options.py @@ -0,0 +1,386 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A command line parsing module that lets modules define their own options. + +Each module defines its own options, e.g., + + from tornado.options import define, options + + define("mysql_host", default="127.0.0.1:3306", help="Main user DB") + define("memcache_hosts", default="127.0.0.1:11011", multiple=True, + help="Main user memcache servers") + + def connect(): + db = database.Connection(options.mysql_host) + ... + +The main() method of your application does not need to be aware of all of +the options used throughout your program; they are all automatically loaded +when the modules are loaded. Your main() method can parse the command line +or parse a config file with: + + import tornado.options + tornado.options.parse_config_file("/etc/server.conf") + tornado.options.parse_command_line() + +Command line formats are what you would expect ("--myoption=myvalue"). +Config files are just Python files. Global names become options, e.g., + + myoption = "myvalue" + myotheroption = "myothervalue" + +We support datetimes, timedeltas, ints, and floats (just pass a 'type' +kwarg to define). We also accept multi-value options. See the documentation +for define() below. +""" + +import datetime +import logging +import logging.handlers +import re +import sys +import time + +# For pretty log messages, if available +try: + import curses +except: + curses = None + + +def define(name, default=None, type=str, help=None, metavar=None, + multiple=False): + """Defines a new command line option. + + If type is given (one of str, float, int, datetime, or timedelta), + we parse the command line arguments based on the given type. If + multiple is True, we accept comma-separated values, and the option + value is always a list. + + For multi-value integers, we also accept the syntax x:y, which + turns into range(x, y) - very useful for long integer ranges. + + help and metavar are used to construct the automatically generated + command line help string. The help message is formatted like: + + --name=METAVAR help string + + Command line option names must be unique globally. They can be parsed + from the command line with parse_command_line() or parsed from a + config file with parse_config_file. + """ + if name in options: + raise Error("Option %r already defined in %s", name, + options[name].file_name) + frame = sys._getframe(0) + options_file = frame.f_code.co_filename + file_name = frame.f_back.f_code.co_filename + if file_name == options_file: file_name = "" + options[name] = _Option(name, file_name=file_name, default=default, + type=type, help=help, metavar=metavar, + multiple=multiple) + + +def parse_command_line(args=None): + """Parses all options given on the command line. + + We return all command line arguments that are not options as a list. + """ + if args is None: args = sys.argv + remaining = [] + for i in xrange(1, len(args)): + # All things after the last option are command line arguments + if not args[i].startswith("-"): + remaining = args[i:] + break + if args[i] == "--": + remaining = args[i+1:] + break + arg = args[i].lstrip("-") + name, equals, value = arg.partition("=") + name = name.replace('-', '_') + if not name in options: + print_help() + raise Error('Unrecognized command line option: %r' % name) + option = options[name] + if not equals: + if option.type == bool: + value = "true" + else: + raise Error('Option %r requires a value' % name) + option.parse(value) + if options.help: + print_help() + sys.exit(0) + + # Set up log level and pretty console logging by default + if options.logging != 'none': + logging.getLogger().setLevel(getattr(logging, options.logging.upper())) + enable_pretty_logging() + + return remaining + + +def parse_config_file(path, overwrite=True): + """Parses and loads the Python config file at the given path.""" + config = {} + execfile(path, config, config) + for name in config: + if name in options: + options[name].set(config[name]) + + +def print_help(file=sys.stdout): + """Prints all the command line options to stdout.""" + print >> file, "Usage: %s [OPTIONS]" % sys.argv[0] + print >> file, "" + print >> file, "Options:" + by_file = {} + for option in options.itervalues(): + by_file.setdefault(option.file_name, []).append(option) + + for filename, o in sorted(by_file.items()): + if filename: print >> file, filename + o.sort(key=lambda option: option.name) + for option in o: + prefix = option.name + if option.metavar: + prefix += "=" + option.metavar + print >> file, " --%-30s %s" % (prefix, option.help or "") + print >> file + + +class _Options(dict): + """Our global program options, an dictionary with object-like access.""" + @classmethod + def instance(cls): + if not hasattr(cls, "_instance"): + cls._instance = cls() + return cls._instance + + def __getattr__(self, name): + if isinstance(self.get(name), _Option): + return self[name].value() + raise AttributeError("Unrecognized option %r" % name) + + +class _Option(object): + def __init__(self, name, default=None, type=str, help=None, metavar=None, + multiple=False, file_name=None): + if default is None and multiple: + default = [] + self.name = name + self.type = type + self.help = help + self.metavar = metavar + self.multiple = multiple + self.file_name = file_name + self.default = default + self._value = None + + def value(self): + return self.default if self._value is None else self._value + + def parse(self, value): + _parse = { + datetime.datetime: self._parse_datetime, + datetime.timedelta: self._parse_timedelta, + bool: self._parse_bool, + str: self._parse_string, + }.get(self.type, self.type) + if self.multiple: + if self._value is None: + self._value = [] + for part in value.split(","): + if self.type in (int, long): + # allow ranges of the form X:Y (inclusive at both ends) + lo, _, hi = part.partition(":") + lo = _parse(lo) + hi = _parse(hi) if hi else lo + self._value.extend(range(lo, hi+1)) + else: + self._value.append(_parse(part)) + else: + self._value = _parse(value) + return self.value() + + def set(self, value): + if self.multiple: + if not isinstance(value, list): + raise Error("Option %r is required to be a list of %s" % + (self.name, self.type.__name__)) + for item in value: + if item != None and not isinstance(item, self.type): + raise Error("Option %r is required to be a list of %s" % + (self.name, self.type.__name__)) + else: + if value != None and not isinstance(value, self.type): + raise Error("Option %r is required to be a %s" % + (self.name, self.type.__name__)) + self._value = value + + # Supported date/time formats in our options + _DATETIME_FORMATS = [ + "%a %b %d %H:%M:%S %Y", + "%Y-%m-%d %H:%M:%S", + "%Y-%m-%d %H:%M", + "%Y-%m-%dT%H:%M", + "%Y%m%d %H:%M:%S", + "%Y%m%d %H:%M", + "%Y-%m-%d", + "%Y%m%d", + "%H:%M:%S", + "%H:%M", + ] + + def _parse_datetime(self, value): + for format in self._DATETIME_FORMATS: + try: + return datetime.datetime.strptime(value, format) + except ValueError: + pass + raise Error('Unrecognized date/time format: %r' % value) + + _TIMEDELTA_ABBREVS = [ + ('hours', ['h']), + ('minutes', ['m', 'min']), + ('seconds', ['s', 'sec']), + ('milliseconds', ['ms']), + ('microseconds', ['us']), + ('days', ['d']), + ('weeks', ['w']), + ] + + _TIMEDELTA_ABBREV_DICT = dict( + (abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS + for abbrev in abbrevs) + + _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' + + _TIMEDELTA_PATTERN = re.compile( + r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) + + def _parse_timedelta(self, value): + try: + sum = datetime.timedelta() + start = 0 + while start < len(value): + m = self._TIMEDELTA_PATTERN.match(value, start) + if not m: + raise Exception() + num = float(m.group(1)) + units = m.group(2) or 'seconds' + units = self._TIMEDELTA_ABBREV_DICT.get(units, units) + sum += datetime.timedelta(**{units: num}) + start = m.end() + return sum + except: + raise + + def _parse_bool(self, value): + return value.lower() not in ("false", "0", "f") + + def _parse_string(self, value): + return value.decode("utf-8") + + +class Error(Exception): + pass + + +def enable_pretty_logging(): + """Turns on formatted logging output as configured.""" + if (options.log_to_stderr or + (options.log_to_stderr is None and not options.log_file_prefix)): + # Set up color if we are in a tty and curses is installed + color = False + if curses and sys.stderr.isatty(): + try: + curses.setupterm() + if curses.tigetnum("colors") > 0: + color = True + except: + pass + channel = logging.StreamHandler() + channel.setFormatter(_LogFormatter(color=color)) + logging.getLogger().addHandler(channel) + + if options.log_file_prefix: + channel = logging.handlers.RotatingFileHandler( + filename=options.log_file_prefix, + maxBytes=options.log_file_max_size, + backupCount=options.log_file_num_backups) + channel.setFormatter(_LogFormatter(color=False)) + logging.getLogger().addHandler(channel) + + +class _LogFormatter(logging.Formatter): + def __init__(self, color, *args, **kwargs): + logging.Formatter.__init__(self, *args, **kwargs) + self._color = color + if color: + fg_color = curses.tigetstr("setaf") or curses.tigetstr("setf") or "" + self._colors = { + logging.DEBUG: curses.tparm(fg_color, 4), # Blue + logging.INFO: curses.tparm(fg_color, 2), # Green + logging.WARNING: curses.tparm(fg_color, 3), # Yellow + logging.ERROR: curses.tparm(fg_color, 1), # Red + } + self._normal = curses.tigetstr("sgr0") + + def format(self, record): + try: + record.message = record.getMessage() + except Exception, e: + record.message = "Bad message (%r): %r" % (e, record.__dict__) + record.asctime = time.strftime( + "%y%m%d %H:%M:%S", self.converter(record.created)) + prefix = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]' % \ + record.__dict__ + if self._color: + prefix = (self._colors.get(record.levelno, self._normal) + + prefix + self._normal) + formatted = prefix + " " + record.message + if record.exc_info: + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + if record.exc_text: + formatted = formatted.rstrip() + "\n" + record.exc_text + return formatted.replace("\n", "\n ") + + +options = _Options.instance() + + +# Default options +define("help", type=bool, help="show this help information") +define("logging", default="info", + help=("Set the Python log level. If 'none', tornado won't touch the " + "logging configuration."), + metavar="info|warning|error|none") +define("log_to_stderr", type=bool, default=None, + help=("Send log output to stderr (colorized if possible). " + "By default use stderr if --log_file_prefix is not set.")) +define("log_file_prefix", type=str, default=None, metavar="PATH", + help=("Path prefix for log files. " + "Note that if you are running multiple tornado processes, " + "log_file_prefix must be different for each of them (e.g. " + "include the port number)")) +define("log_file_max_size", type=int, default=100 * 1000 * 1000, + help="max size of log files before rollover") +define("log_file_num_backups", type=int, default=10, + help="number of log files to keep") diff --git a/vendor/tornado/tornado/s3server.py b/vendor/tornado/tornado/s3server.py new file mode 100644 index 000000000000..2e8a97de201b --- /dev/null +++ b/vendor/tornado/tornado/s3server.py @@ -0,0 +1,255 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of an S3-like storage server based on local files. + +Useful to test features that will eventually run on S3, or if you want to +run something locally that was once running on S3. + +We don't support all the features of S3, but it does work with the +standard S3 client for the most basic semantics. To use the standard +S3 client with this module: + + c = S3.AWSAuthConnection("", "", server="localhost", port=8888, + is_secure=False) + c.create_bucket("mybucket") + c.put("mybucket", "mykey", "a value") + print c.get("mybucket", "mykey").body + +""" + +import bisect +import datetime +import escape +import hashlib +import httpserver +import ioloop +import os +import os.path +import urllib +import web + + +def start(port, root_directory="/tmp/s3", bucket_depth=0): + """Starts the mock S3 server on the given port at the given path.""" + application = S3Application(root_directory, bucket_depth) + http_server = httpserver.HTTPServer(application) + http_server.listen(port) + ioloop.IOLoop.instance().start() + + +class S3Application(web.Application): + """Implementation of an S3-like storage server based on local files. + + If bucket depth is given, we break files up into multiple directories + to prevent hitting file system limits for number of files in each + directories. 1 means one level of directories, 2 means 2, etc. + """ + def __init__(self, root_directory, bucket_depth=0): + web.Application.__init__(self, [ + (r"/", RootHandler), + (r"/([^/]+)/(.+)", ObjectHandler), + (r"/([^/]+)/", BucketHandler), + ]) + self.directory = os.path.abspath(root_directory) + if not os.path.exists(self.directory): + os.makedirs(self.directory) + self.bucket_depth = bucket_depth + + +class BaseRequestHandler(web.RequestHandler): + SUPPORTED_METHODS = ("PUT", "GET", "DELETE") + + def render_xml(self, value): + assert isinstance(value, dict) and len(value) == 1 + self.set_header("Content-Type", "application/xml; charset=UTF-8") + name = value.keys()[0] + parts = [] + parts.append('<' + escape.utf8(name) + + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') + self._render_parts(value.values()[0], parts) + parts.append('') + self.finish('\n' + + ''.join(parts)) + + def _render_parts(self, value, parts=[]): + if isinstance(value, basestring): + parts.append(escape.xhtml_escape(value)) + elif isinstance(value, int) or isinstance(value, long): + parts.append(str(value)) + elif isinstance(value, datetime.datetime): + parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) + elif isinstance(value, dict): + for name, subvalue in value.iteritems(): + if not isinstance(subvalue, list): + subvalue = [subvalue] + for subsubvalue in subvalue: + parts.append('<' + escape.utf8(name) + '>') + self._render_parts(subsubvalue, parts) + parts.append('') + else: + raise Exception("Unknown S3 value type %r", value) + + def _object_path(self, bucket, object_name): + if self.application.bucket_depth < 1: + return os.path.abspath(os.path.join( + self.application.directory, bucket, object_name)) + hash = hashlib.md5(object_name).hexdigest() + path = os.path.abspath(os.path.join( + self.application.directory, bucket)) + for i in range(self.application.bucket_depth): + path = os.path.join(path, hash[:2 * (i + 1)]) + return os.path.join(path, object_name) + + +class RootHandler(BaseRequestHandler): + def get(self): + names = os.listdir(self.application.directory) + buckets = [] + for name in names: + path = os.path.join(self.application.directory, name) + info = os.stat(path) + buckets.append({ + "Name": name, + "CreationDate": datetime.datetime.utcfromtimestamp( + info.st_ctime), + }) + self.render_xml({"ListAllMyBucketsResult": { + "Buckets": {"Bucket": buckets}, + }}) + + +class BucketHandler(BaseRequestHandler): + def get(self, bucket_name): + prefix = self.get_argument("prefix", u"") + marker = self.get_argument("marker", u"") + max_keys = int(self.get_argument("max-keys", 50000)) + path = os.path.abspath(os.path.join(self.application.directory, + bucket_name)) + terse = int(self.get_argument("terse", 0)) + if not path.startswith(self.application.directory) or \ + not os.path.isdir(path): + raise web.HTTPError(404) + object_names = [] + for root, dirs, files in os.walk(path): + for file_name in files: + object_names.append(os.path.join(root, file_name)) + skip = len(path) + 1 + for i in range(self.application.bucket_depth): + skip += 2 * (i + 1) + 1 + object_names = [n[skip:] for n in object_names] + object_names.sort() + contents = [] + + start_pos = 0 + if marker: + start_pos = bisect.bisect_right(object_names, marker, start_pos) + if prefix: + start_pos = bisect.bisect_left(object_names, prefix, start_pos) + + truncated = False + for object_name in object_names[start_pos:]: + if not object_name.startswith(prefix): + break + if len(contents) >= max_keys: + truncated = True + break + object_path = self._object_path(bucket_name, object_name) + c = {"Key": object_name} + if not terse: + info = os.stat(object_path) + c.update({ + "LastModified": datetime.datetime.utcfromtimestamp( + info.st_mtime), + "Size": info.st_size, + }) + contents.append(c) + marker = object_name + self.render_xml({"ListBucketResult": { + "Name": bucket_name, + "Prefix": prefix, + "Marker": marker, + "MaxKeys": max_keys, + "IsTruncated": truncated, + "Contents": contents, + }}) + + def put(self, bucket_name): + path = os.path.abspath(os.path.join( + self.application.directory, bucket_name)) + if not path.startswith(self.application.directory) or \ + os.path.exists(path): + raise web.HTTPError(403) + os.makedirs(path) + self.finish() + + def delete(self, bucket_name): + path = os.path.abspath(os.path.join( + self.application.directory, bucket_name)) + if not path.startswith(self.application.directory) or \ + not os.path.isdir(path): + raise web.HTTPError(404) + if len(os.listdir(path)) > 0: + raise web.HTTPError(403) + os.rmdir(path) + self.set_status(204) + self.finish() + + +class ObjectHandler(BaseRequestHandler): + def get(self, bucket, object_name): + object_name = urllib.unquote(object_name) + path = self._object_path(bucket, object_name) + if not path.startswith(self.application.directory) or \ + not os.path.isfile(path): + raise web.HTTPError(404) + info = os.stat(path) + self.set_header("Content-Type", "application/unknown") + self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( + info.st_mtime)) + object_file = open(path, "r") + try: + self.finish(object_file.read()) + finally: + object_file.close() + + def put(self, bucket, object_name): + object_name = urllib.unquote(object_name) + bucket_dir = os.path.abspath(os.path.join( + self.application.directory, bucket)) + if not bucket_dir.startswith(self.application.directory) or \ + not os.path.isdir(bucket_dir): + raise web.HTTPError(404) + path = self._object_path(bucket, object_name) + if not path.startswith(bucket_dir) or os.path.isdir(path): + raise web.HTTPError(403) + directory = os.path.dirname(path) + if not os.path.exists(directory): + os.makedirs(directory) + object_file = open(path, "w") + object_file.write(self.request.body) + object_file.close() + self.finish() + + def delete(self, bucket, object_name): + object_name = urllib.unquote(object_name) + path = self._object_path(bucket, object_name) + if not path.startswith(self.application.directory) or \ + not os.path.isfile(path): + raise web.HTTPError(404) + os.unlink(path) + self.set_status(204) + self.finish() diff --git a/vendor/tornado/tornado/template.py b/vendor/tornado/tornado/template.py new file mode 100644 index 000000000000..7ed56cfa6929 --- /dev/null +++ b/vendor/tornado/tornado/template.py @@ -0,0 +1,576 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A simple template system that compiles templates to Python code. + +Basic usage looks like: + + t = template.Template("{{ myvalue }}") + print t.generate(myvalue="XXX") + +Loader is a class that loads templates from a root directory and caches +the compiled templates: + + loader = template.Loader("/home/btaylor") + print loader.load("test.html").generate(myvalue="XXX") + +We compile all templates to raw Python. Error-reporting is currently... uh, +interesting. Syntax for the templates + + ### base.html + + + {% block title %}Default title{% end %} + + +
                                  + {% for student in students %} + {% block student %} +
                                • {{ escape(student.name) }}
                                • + {% end %} + {% end %} +
                                + + + + ### bold.html + {% extends "base.html" %} + + {% block title %}A bolder title{% end %} + + {% block student %} +
                              1. {{ escape(student.name) }}
                              2. + {% block %} + +Unlike most other template systems, we do not put any restrictions on the +expressions you can include in your statements. if and for blocks get +translated exactly into Python, do you can do complex expressions like: + + {% for student in [p for p in people if p.student and p.age > 23] %} +
                              3. {{ escape(student.name) }}
                              4. + {% end %} + +Translating directly to Python means you can apply functions to expressions +easily, like the escape() function in the examples above. You can pass +functions in to your template just like any other variable: + + ### Python code + def add(x, y): + return x + y + template.execute(add=add) + + ### The template + {{ add(1, 2) }} + +We provide the functions escape(), url_escape(), json_encode(), and squeeze() +to all templates by default. +""" + +from __future__ import with_statement + +import cStringIO +import datetime +import escape +import logging +import os.path +import re + +_log = logging.getLogger('tornado.template') + +class Template(object): + """A compiled template. + + We compile into Python from the given template_string. You can generate + the template from variables with generate(). + """ + def __init__(self, template_string, name="", loader=None, + compress_whitespace=None): + self.name = name + if compress_whitespace is None: + compress_whitespace = name.endswith(".html") or \ + name.endswith(".js") + reader = _TemplateReader(name, template_string) + self.file = _File(_parse(reader)) + self.code = self._generate_python(loader, compress_whitespace) + try: + self.compiled = compile(self.code, self.name, "exec") + except: + formatted_code = _format_code(self.code).rstrip() + _log.error("%s code:\n%s", self.name, formatted_code) + raise + + def generate(self, **kwargs): + """Generate this template with the given arguments.""" + namespace = { + "escape": escape.xhtml_escape, + "url_escape": escape.url_escape, + "json_encode": escape.json_encode, + "squeeze": escape.squeeze, + "datetime": datetime, + } + namespace.update(kwargs) + exec self.compiled in namespace + execute = namespace["_execute"] + try: + return execute() + except: + formatted_code = _format_code(self.code).rstrip() + _log.error("%s code:\n%s", self.name, formatted_code) + raise + + def _generate_python(self, loader, compress_whitespace): + buffer = cStringIO.StringIO() + try: + named_blocks = {} + ancestors = self._get_ancestors(loader) + ancestors.reverse() + for ancestor in ancestors: + ancestor.find_named_blocks(loader, named_blocks) + self.file.find_named_blocks(loader, named_blocks) + writer = _CodeWriter(buffer, named_blocks, loader, self, + compress_whitespace) + ancestors[0].generate(writer) + return buffer.getvalue() + finally: + buffer.close() + + def _get_ancestors(self, loader): + ancestors = [self.file] + for chunk in self.file.body.chunks: + if isinstance(chunk, _ExtendsBlock): + if not loader: + raise ParseError("{% extends %} block found, but no " + "template loader") + template = loader.load(chunk.name, self.name) + ancestors.extend(template._get_ancestors(loader)) + return ancestors + + +class Loader(object): + """A template loader that loads from a single root directory. + + You must use a template loader to use template constructs like + {% extends %} and {% include %}. Loader caches all templates after + they are loaded the first time. + """ + def __init__(self, root_directory): + self.root = os.path.abspath(root_directory) + self.templates = {} + + def reset(self): + self.templates = {} + + def resolve_path(self, name, parent_path=None): + if parent_path and not parent_path.startswith("<") and \ + not parent_path.startswith("/") and \ + not name.startswith("/"): + current_path = os.path.join(self.root, parent_path) + file_dir = os.path.dirname(os.path.abspath(current_path)) + relative_path = os.path.abspath(os.path.join(file_dir, name)) + if relative_path.startswith(self.root): + name = relative_path[len(self.root) + 1:] + return name + + def load(self, name, parent_path=None): + name = self.resolve_path(name, parent_path=parent_path) + if name not in self.templates: + path = os.path.join(self.root, name) + f = open(path, "r") + self.templates[name] = Template(f.read(), name=name, loader=self) + f.close() + return self.templates[name] + + +class _Node(object): + def each_child(self): + return () + + def generate(self, writer): + raise NotImplementedError() + + def find_named_blocks(self, loader, named_blocks): + for child in self.each_child(): + child.find_named_blocks(loader, named_blocks) + + +class _File(_Node): + def __init__(self, body): + self.body = body + + def generate(self, writer): + writer.write_line("def _execute():") + with writer.indent(): + writer.write_line("_buffer = []") + self.body.generate(writer) + writer.write_line("return ''.join(_buffer)") + + def each_child(self): + return (self.body,) + + + +class _ChunkList(_Node): + def __init__(self, chunks): + self.chunks = chunks + + def generate(self, writer): + for chunk in self.chunks: + chunk.generate(writer) + + def each_child(self): + return self.chunks + + +class _NamedBlock(_Node): + def __init__(self, name, body=None): + self.name = name + self.body = body + + def each_child(self): + return (self.body,) + + def generate(self, writer): + writer.named_blocks[self.name].generate(writer) + + def find_named_blocks(self, loader, named_blocks): + named_blocks[self.name] = self.body + _Node.find_named_blocks(self, loader, named_blocks) + + +class _ExtendsBlock(_Node): + def __init__(self, name): + self.name = name + + +class _IncludeBlock(_Node): + def __init__(self, name, reader): + self.name = name + self.template_name = reader.name + + def find_named_blocks(self, loader, named_blocks): + included = loader.load(self.name, self.template_name) + included.file.find_named_blocks(loader, named_blocks) + + def generate(self, writer): + included = writer.loader.load(self.name, self.template_name) + old = writer.current_template + writer.current_template = included + included.file.body.generate(writer) + writer.current_template = old + + +class _ApplyBlock(_Node): + def __init__(self, method, body=None): + self.method = method + self.body = body + + def each_child(self): + return (self.body,) + + def generate(self, writer): + method_name = "apply%d" % writer.apply_counter + writer.apply_counter += 1 + writer.write_line("def %s():" % method_name) + with writer.indent(): + writer.write_line("_buffer = []") + self.body.generate(writer) + writer.write_line("return ''.join(_buffer)") + writer.write_line("_buffer.append(%s(%s()))" % ( + self.method, method_name)) + + +class _ControlBlock(_Node): + def __init__(self, statement, body=None): + self.statement = statement + self.body = body + + def each_child(self): + return (self.body,) + + def generate(self, writer): + writer.write_line("%s:" % self.statement) + with writer.indent(): + self.body.generate(writer) + + +class _IntermediateControlBlock(_Node): + def __init__(self, statement): + self.statement = statement + + def generate(self, writer): + writer.write_line("%s:" % self.statement, writer.indent_size() - 1) + + +class _Statement(_Node): + def __init__(self, statement): + self.statement = statement + + def generate(self, writer): + writer.write_line(self.statement) + + +class _Expression(_Node): + def __init__(self, expression): + self.expression = expression + + def generate(self, writer): + writer.write_line("_tmp = %s" % self.expression) + writer.write_line("if isinstance(_tmp, str): _buffer.append(_tmp)") + writer.write_line("elif isinstance(_tmp, unicode): " + "_buffer.append(_tmp.encode('utf-8'))") + writer.write_line("else: _buffer.append(str(_tmp))") + + +class _Text(_Node): + def __init__(self, value): + self.value = value + + def generate(self, writer): + value = self.value + + # Compress lots of white space to a single character. If the whitespace + # breaks a line, have it continue to break a line, but just with a + # single \n character + if writer.compress_whitespace and "
                                " not in value:
                                +            value = re.sub(r"([\t ]+)", " ", value)
                                +            value = re.sub(r"(\s*\n\s*)", "\n", value)
                                +
                                +        if value:
                                +            writer.write_line('_buffer.append(%r)' % value)
                                +
                                +
                                +class ParseError(Exception):
                                +    """Raised for template syntax errors."""
                                +    pass
                                +
                                +
                                +class _CodeWriter(object):
                                +    def __init__(self, file, named_blocks, loader, current_template,
                                +                 compress_whitespace):
                                +        self.file = file
                                +        self.named_blocks = named_blocks
                                +        self.loader = loader
                                +        self.current_template = current_template
                                +        self.compress_whitespace = compress_whitespace
                                +        self.apply_counter = 0
                                +        self._indent = 0
                                +
                                +    def indent(self):
                                +        return self
                                +
                                +    def indent_size(self):
                                +        return self._indent
                                +
                                +    def __enter__(self):
                                +        self._indent += 1
                                +        return self
                                +
                                +    def __exit__(self, *args):
                                +        assert self._indent > 0
                                +        self._indent -= 1
                                +
                                +    def write_line(self, line, indent=None):
                                +        if indent == None:
                                +            indent = self._indent
                                +        for i in xrange(indent):
                                +            self.file.write("    ")
                                +        print >> self.file, line
                                +
                                +
                                +class _TemplateReader(object):
                                +    def __init__(self, name, text):
                                +        self.name = name
                                +        self.text = text
                                +        self.line = 0
                                +        self.pos = 0
                                +
                                +    def find(self, needle, start=0, end=None):
                                +        assert start >= 0, start
                                +        pos = self.pos
                                +        start += pos
                                +        if end is None:
                                +            index = self.text.find(needle, start)
                                +        else:
                                +            end += pos
                                +            assert end >= start
                                +            index = self.text.find(needle, start, end)
                                +        if index != -1:
                                +            index -= pos
                                +        return index
                                +
                                +    def consume(self, count=None):
                                +        if count is None:
                                +            count = len(self.text) - self.pos
                                +        newpos = self.pos + count
                                +        self.line += self.text.count("\n", self.pos, newpos)
                                +        s = self.text[self.pos:newpos]
                                +        self.pos = newpos
                                +        return s
                                +
                                +    def remaining(self):
                                +        return len(self.text) - self.pos
                                +
                                +    def __len__(self):
                                +        return self.remaining()
                                +
                                +    def __getitem__(self, key):
                                +        if type(key) is slice:
                                +            size = len(self)
                                +            start, stop, step = slice.indices(size)
                                +            if start is None: start = self.pos
                                +            else: start += self.pos
                                +            if stop is not None: stop += self.pos
                                +            return self.text[slice(start, stop, step)]
                                +        elif key < 0:
                                +            return self.text[key]
                                +        else:
                                +            return self.text[self.pos + key]
                                +
                                +    def __str__(self):
                                +        return self.text[self.pos:]
                                +
                                +
                                +def _format_code(code):
                                +    lines = code.splitlines()
                                +    format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
                                +    return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
                                +
                                +
                                +def _parse(reader, in_block=None):
                                +    body = _ChunkList([])
                                +    while True:
                                +        # Find next template directive
                                +        curly = 0
                                +        while True:
                                +            curly = reader.find("{", curly)
                                +            if curly == -1 or curly + 1 == reader.remaining():
                                +                # EOF
                                +                if in_block:
                                +                    raise ParseError("Missing {%% end %%} block for %s" %
                                +                                     in_block)
                                +                body.chunks.append(_Text(reader.consume()))
                                +                return body
                                +            # If the first curly brace is not the start of a special token,
                                +            # start searching from the character after it
                                +            if reader[curly + 1] not in ("{", "%"):
                                +                curly += 1
                                +                continue
                                +            # When there are more than 2 curlies in a row, use the
                                +            # innermost ones.  This is useful when generating languages
                                +            # like latex where curlies are also meaningful
                                +            if (curly + 2 < reader.remaining() and
                                +                reader[curly + 1] == '{' and reader[curly + 2] == '{'):
                                +                curly += 1
                                +                continue
                                +            break
                                +
                                +        # Append any text before the special token
                                +        if curly > 0:
                                +            body.chunks.append(_Text(reader.consume(curly)))
                                +
                                +        start_brace = reader.consume(2)
                                +        line = reader.line
                                +
                                +        # Expression
                                +        if start_brace == "{{":
                                +            end = reader.find("}}")
                                +            if end == -1 or reader.find("\n", 0, end) != -1:
                                +                raise ParseError("Missing end expression }} on line %d" % line)
                                +            contents = reader.consume(end).strip()
                                +            reader.consume(2)
                                +            if not contents:
                                +                raise ParseError("Empty expression on line %d" % line)
                                +            body.chunks.append(_Expression(contents))
                                +            continue
                                +
                                +        # Block
                                +        assert start_brace == "{%", start_brace
                                +        end = reader.find("%}")
                                +        if end == -1 or reader.find("\n", 0, end) != -1:
                                +            raise ParseError("Missing end block %%} on line %d" % line)
                                +        contents = reader.consume(end).strip()
                                +        reader.consume(2)
                                +        if not contents:
                                +            raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
                                +
                                +        operator, space, suffix = contents.partition(" ")
                                +        suffix = suffix.strip()
                                +
                                +        # Intermediate ("else", "elif", etc) blocks
                                +        intermediate_blocks = {
                                +            "else": set(["if", "for", "while"]),
                                +            "elif": set(["if"]),
                                +            "except": set(["try"]),
                                +            "finally": set(["try"]),
                                +        }
                                +        allowed_parents = intermediate_blocks.get(operator)
                                +        if allowed_parents is not None:
                                +            if not in_block:
                                +                raise ParseError("%s outside %s block" %
                                +                            (operator, allowed_parents))
                                +            if in_block not in allowed_parents:
                                +                raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
                                +            body.chunks.append(_IntermediateControlBlock(contents))
                                +            continue
                                +
                                +        # End tag
                                +        elif operator == "end":
                                +            if not in_block:
                                +                raise ParseError("Extra {%% end %%} block on line %d" % line)
                                +            return body
                                +
                                +        elif operator in ("extends", "include", "set", "import", "comment"):
                                +            if operator == "comment":
                                +                continue
                                +            if operator == "extends":
                                +                suffix = suffix.strip('"').strip("'")
                                +                if not suffix:
                                +                    raise ParseError("extends missing file path on line %d" % line)
                                +                block = _ExtendsBlock(suffix)
                                +            elif operator == "import":
                                +                if not suffix:
                                +                    raise ParseError("import missing statement on line %d" % line)
                                +                block = _Statement(contents)
                                +            elif operator == "include":
                                +                suffix = suffix.strip('"').strip("'")
                                +                if not suffix:
                                +                    raise ParseError("include missing file path on line %d" % line)
                                +                block = _IncludeBlock(suffix, reader)
                                +            elif operator == "set":
                                +                if not suffix:
                                +                    raise ParseError("set missing statement on line %d" % line)
                                +                block = _Statement(suffix)
                                +            body.chunks.append(block)
                                +            continue
                                +
                                +        elif operator in ("apply", "block", "try", "if", "for", "while"):
                                +            # parse inner body recursively
                                +            block_body = _parse(reader, operator)
                                +            if operator == "apply":
                                +                if not suffix:
                                +                    raise ParseError("apply missing method name on line %d" % line)
                                +                block = _ApplyBlock(suffix, block_body)
                                +            elif operator == "block":
                                +                if not suffix:
                                +                    raise ParseError("block missing name on line %d" % line)
                                +                block = _NamedBlock(suffix, block_body)
                                +            else:
                                +                block = _ControlBlock(contents, block_body)
                                +            body.chunks.append(block)
                                +            continue
                                +
                                +        else:
                                +            raise ParseError("unknown operator: %r" % operator)
                                diff --git a/vendor/tornado/tornado/test/README b/vendor/tornado/tornado/test/README
                                new file mode 100644
                                index 000000000000..2d6195d807e0
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/test/README
                                @@ -0,0 +1,4 @@
                                +Test coverage is almost non-existent, but it's a start.  Be sure to
                                +set PYTHONPATH apprioriately (generally to the root directory of your
                                +tornado checkout) when running tests to make sure you're getting the
                                +version of the tornado package that you expect.
                                \ No newline at end of file
                                diff --git a/vendor/tornado/tornado/test/test_ioloop.py b/vendor/tornado/tornado/test/test_ioloop.py
                                new file mode 100755
                                index 000000000000..2541fa87e157
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/test/test_ioloop.py
                                @@ -0,0 +1,38 @@
                                +#!/usr/bin/env python
                                +
                                +import unittest
                                +import time
                                +
                                +from tornado import ioloop
                                +
                                +
                                +class TestIOLoop(unittest.TestCase):
                                +    def setUp(self):
                                +        self.loop = ioloop.IOLoop()
                                +
                                +    def tearDown(self):
                                +        pass
                                +
                                +    def _callback(self):
                                +        self.called = True
                                +        self.loop.stop()
                                +
                                +    def _schedule_callback(self):
                                +        self.loop.add_callback(self._callback)
                                +        # Scroll away the time so we can check if we woke up immediately
                                +        self._start_time = time.time()
                                +        self.called = False
                                +
                                +    def test_add_callback(self):
                                +        self.loop.add_timeout(time.time(), self._schedule_callback)
                                +        self.loop.start() # Set some long poll timeout so we can check wakeup
                                +        self.assertAlmostEqual(time.time(), self._start_time, places=2)
                                +        self.assertTrue(self.called)
                                +
                                +
                                +if __name__ == "__main__":
                                +    import logging
                                +
                                +    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(msecs)03d %(levelname)-8s %(name)-8s %(message)s', datefmt='%H:%M:%S')
                                +
                                +    unittest.main()
                                diff --git a/vendor/tornado/tornado/web.py b/vendor/tornado/tornado/web.py
                                new file mode 100644
                                index 000000000000..7559fae8a576
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/web.py
                                @@ -0,0 +1,1445 @@
                                +#!/usr/bin/env python
                                +#
                                +# Copyright 2009 Facebook
                                +#
                                +# Licensed under the Apache License, Version 2.0 (the "License"); you may
                                +# not use this file except in compliance with the License. You may obtain
                                +# a copy of the License at
                                +#
                                +#     http://www.apache.org/licenses/LICENSE-2.0
                                +#
                                +# Unless required by applicable law or agreed to in writing, software
                                +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
                                +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
                                +# License for the specific language governing permissions and limitations
                                +# under the License.
                                +
                                +"""The Tornado web framework.
                                +
                                +The Tornado web framework looks a bit like web.py (http://webpy.org/) or
                                +Google's webapp (http://code.google.com/appengine/docs/python/tools/webapp/),
                                +but with additional tools and optimizations to take advantage of the
                                +Tornado non-blocking web server and tools.
                                +
                                +Here is the canonical "Hello, world" example app:
                                +
                                +    import tornado.httpserver
                                +    import tornado.ioloop
                                +    import tornado.web
                                +
                                +    class MainHandler(tornado.web.RequestHandler):
                                +        def get(self):
                                +            self.write("Hello, world")
                                +
                                +    if __name__ == "__main__":
                                +        application = tornado.web.Application([
                                +            (r"/", MainHandler),
                                +        ])
                                +        http_server = tornado.httpserver.HTTPServer(application)
                                +        http_server.listen(8888)
                                +        tornado.ioloop.IOLoop.instance().start()
                                +
                                +See the Tornado walkthrough on GitHub for more details and a good
                                +getting started guide.
                                +"""
                                +
                                +import base64
                                +import binascii
                                +import calendar
                                +import Cookie
                                +import cStringIO
                                +import datetime
                                +import email.utils
                                +import escape
                                +import functools
                                +import gzip
                                +import hashlib
                                +import hmac
                                +import httplib
                                +import locale
                                +import logging
                                +import mimetypes
                                +import os.path
                                +import re
                                +import stat
                                +import sys
                                +import template
                                +import time
                                +import types
                                +import urllib
                                +import urlparse
                                +import uuid
                                +
                                +_log = logging.getLogger('tornado.web')
                                +
                                +class RequestHandler(object):
                                +    """Subclass this class and define get() or post() to make a handler.
                                +
                                +    If you want to support more methods than the standard GET/HEAD/POST, you
                                +    should override the class variable SUPPORTED_METHODS in your
                                +    RequestHandler class.
                                +    """
                                +    SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PUT")
                                +
                                +    def __init__(self, application, request, transforms=None):
                                +        self.application = application
                                +        self.request = request
                                +        self._headers_written = False
                                +        self._finished = False
                                +        self._auto_finish = True
                                +        self._transforms = transforms or []
                                +        self.ui = _O((n, self._ui_method(m)) for n, m in
                                +                     application.ui_methods.iteritems())
                                +        self.ui["modules"] = _O((n, self._ui_module(n, m)) for n, m in
                                +                                application.ui_modules.iteritems())
                                +        self.clear()
                                +        # Check since connection is not available in WSGI
                                +        if hasattr(self.request, "connection"):
                                +            self.request.connection.stream.set_close_callback(
                                +                self.on_connection_close)
                                +
                                +    @property
                                +    def settings(self):
                                +        return self.application.settings
                                +
                                +    def head(self, *args, **kwargs):
                                +        raise HTTPError(405)
                                +
                                +    def get(self, *args, **kwargs):
                                +        raise HTTPError(405)
                                +
                                +    def post(self, *args, **kwargs):
                                +        raise HTTPError(405)
                                +
                                +    def delete(self, *args, **kwargs):
                                +        raise HTTPError(405)
                                +
                                +    def put(self, *args, **kwargs):
                                +        raise HTTPError(405)
                                +
                                +    def prepare(self):
                                +        """Called before the actual handler method.
                                +
                                +        Useful to override in a handler if you want a common bottleneck for
                                +        all of your requests.
                                +        """
                                +        pass
                                +
                                +    def on_connection_close(self):
                                +        """Called in async handlers if the client closed the connection.
                                +
                                +        You may override this to clean up resources associated with
                                +        long-lived connections.
                                +
                                +        Note that the select()-based implementation of IOLoop does not detect
                                +        closed connections and so this method will not be called until
                                +        you try (and fail) to produce some output.  The epoll- and kqueue-
                                +        based implementations should detect closed connections even while
                                +        the request is idle.
                                +        """
                                +        pass
                                +
                                +    def clear(self):
                                +        """Resets all headers and content for this response."""
                                +        self._headers = {
                                +            "Server": "TornadoServer/0.1",
                                +            "Content-Type": "text/html; charset=UTF-8",
                                +        }
                                +        if not self.request.supports_http_1_1():
                                +            if self.request.headers.get("Connection") == "Keep-Alive":
                                +                self.set_header("Connection", "Keep-Alive")
                                +        self._write_buffer = []
                                +        self._status_code = 200
                                +
                                +    def set_status(self, status_code):
                                +        """Sets the status code for our response."""
                                +        assert status_code in httplib.responses
                                +        self._status_code = status_code
                                +
                                +    def set_header(self, name, value):
                                +        """Sets the given response header name and value.
                                +
                                +        If a datetime is given, we automatically format it according to the
                                +        HTTP specification. If the value is not a string, we convert it to
                                +        a string. All header values are then encoded as UTF-8.
                                +        """
                                +        if isinstance(value, datetime.datetime):
                                +            t = calendar.timegm(value.utctimetuple())
                                +            value = email.utils.formatdate(t, localtime=False, usegmt=True)
                                +        elif isinstance(value, int) or isinstance(value, long):
                                +            value = str(value)
                                +        else:
                                +            value = _utf8(value)
                                +            # If \n is allowed into the header, it is possible to inject
                                +            # additional headers or split the request. Also cap length to
                                +            # prevent obviously erroneous values.
                                +            safe_value = re.sub(r"[\x00-\x1f]", " ", value)[:4000]
                                +            if safe_value != value:
                                +                raise ValueError("Unsafe header value %r", value)
                                +        self._headers[name] = value
                                +
                                +    _ARG_DEFAULT = []
                                +    def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
                                +        """Returns the value of the argument with the given name.
                                +
                                +        If default is not provided, the argument is considered to be
                                +        required, and we throw an HTTP 404 exception if it is missing.
                                +
                                +        The returned value is always unicode.
                                +        """
                                +        values = self.request.arguments.get(name, None)
                                +        if values is None:
                                +            if default is self._ARG_DEFAULT:
                                +                raise HTTPError(404, "Missing argument %s" % name)
                                +            return default
                                +        # Get rid of any weird control chars
                                +        value = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", values[-1])
                                +        value = _unicode(value)
                                +        if strip: value = value.strip()
                                +        return value
                                +
                                +    @property
                                +    def cookies(self):
                                +        """A dictionary of Cookie.Morsel objects."""
                                +        if not hasattr(self, "_cookies"):
                                +            self._cookies = Cookie.BaseCookie()
                                +            if "Cookie" in self.request.headers:
                                +                try:
                                +                    self._cookies.load(self.request.headers["Cookie"])
                                +                except:
                                +                    self.clear_all_cookies()
                                +        return self._cookies
                                +
                                +    def get_cookie(self, name, default=None):
                                +        """Gets the value of the cookie with the given name, else default."""
                                +        if name in self.cookies:
                                +            return self.cookies[name].value
                                +        return default
                                +
                                +    def set_cookie(self, name, value, domain=None, expires=None, path="/",
                                +                   expires_days=None):
                                +        """Sets the given cookie name/value with the given options."""
                                +        name = _utf8(name)
                                +        value = _utf8(value)
                                +        if re.search(r"[\x00-\x20]", name + value):
                                +            # Don't let us accidentally inject bad stuff
                                +            raise ValueError("Invalid cookie %r: %r" % (name, value))
                                +        if not hasattr(self, "_new_cookies"):
                                +            self._new_cookies = []
                                +        new_cookie = Cookie.BaseCookie()
                                +        self._new_cookies.append(new_cookie)
                                +        new_cookie[name] = value
                                +        if domain:
                                +            new_cookie[name]["domain"] = domain
                                +        if expires_days is not None and not expires:
                                +            expires = datetime.datetime.utcnow() + datetime.timedelta(
                                +                days=expires_days)
                                +        if expires:
                                +            timestamp = calendar.timegm(expires.utctimetuple())
                                +            new_cookie[name]["expires"] = email.utils.formatdate(
                                +                timestamp, localtime=False, usegmt=True)
                                +        if path:
                                +            new_cookie[name]["path"] = path
                                +
                                +    def clear_cookie(self, name, path="/", domain=None):
                                +        """Deletes the cookie with the given name."""
                                +        expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
                                +        self.set_cookie(name, value="", path=path, expires=expires,
                                +                        domain=domain)
                                +
                                +    def clear_all_cookies(self):
                                +        """Deletes all the cookies the user sent with this request."""
                                +        for name in self.cookies.iterkeys():
                                +            self.clear_cookie(name)
                                +
                                +    def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
                                +        """Signs and timestamps a cookie so it cannot be forged.
                                +
                                +        You must specify the 'cookie_secret' setting in your Application
                                +        to use this method. It should be a long, random sequence of bytes
                                +        to be used as the HMAC secret for the signature.
                                +
                                +        To read a cookie set with this method, use get_secure_cookie().
                                +        """
                                +        timestamp = str(int(time.time()))
                                +        value = base64.b64encode(value)
                                +        signature = self._cookie_signature(name, value, timestamp)
                                +        value = "|".join([value, timestamp, signature])
                                +        self.set_cookie(name, value, expires_days=expires_days, **kwargs)
                                +
                                +    def get_secure_cookie(self, name, include_name=True, value=None):
                                +        """Returns the given signed cookie if it validates, or None.
                                +
                                +        In older versions of Tornado (0.1 and 0.2), we did not include the
                                +        name of the cookie in the cookie signature. To read these old-style
                                +        cookies, pass include_name=False to this method. Otherwise, all
                                +        attempts to read old-style cookies will fail (and you may log all
                                +        your users out whose cookies were written with a previous Tornado
                                +        version).
                                +        """
                                +        if value is None: value = self.get_cookie(name)
                                +        if not value: return None
                                +        parts = value.split("|")
                                +        if len(parts) != 3: return None
                                +        if include_name:
                                +            signature = self._cookie_signature(name, parts[0], parts[1])
                                +        else:
                                +            signature = self._cookie_signature(parts[0], parts[1])
                                +        if not _time_independent_equals(parts[2], signature):
                                +            _log.warning("Invalid cookie signature %r", value)
                                +            return None
                                +        timestamp = int(parts[1])
                                +        if timestamp < time.time() - 31 * 86400:
                                +            _log.warning("Expired cookie %r", value)
                                +            return None
                                +        try:
                                +            return base64.b64decode(parts[0])
                                +        except:
                                +            return None
                                +
                                +    def _cookie_signature(self, *parts):
                                +        self.require_setting("cookie_secret", "secure cookies")
                                +        hash = hmac.new(self.application.settings["cookie_secret"],
                                +                        digestmod=hashlib.sha1)
                                +        for part in parts: hash.update(part)
                                +        return hash.hexdigest()
                                +
                                +    def redirect(self, url, permanent=False):
                                +        """Sends a redirect to the given (optionally relative) URL."""
                                +        if self._headers_written:
                                +            raise Exception("Cannot redirect after headers have been written")
                                +        self.set_status(301 if permanent else 302)
                                +        # Remove whitespace
                                +        url = re.sub(r"[\x00-\x20]+", "", _utf8(url))
                                +        self.set_header("Location", urlparse.urljoin(self.request.uri, url))
                                +        self.finish()
                                +
                                +    def write(self, chunk):
                                +        """Writes the given chunk to the output buffer.
                                +
                                +        To write the output to the network, use the flush() method below.
                                +
                                +        If the given chunk is a dictionary, we write it as JSON and set
                                +        the Content-Type of the response to be text/javascript.
                                +        """
                                +        assert not self._finished
                                +        if isinstance(chunk, dict):
                                +            chunk = escape.json_encode(chunk)
                                +            self.set_header("Content-Type", "text/javascript; charset=UTF-8")
                                +        chunk = _utf8(chunk)
                                +        self._write_buffer.append(chunk)
                                +
                                +    def render(self, template_name, **kwargs):
                                +        """Renders the template with the given arguments as the response."""
                                +        html = self.render_string(template_name, **kwargs)
                                +
                                +        # Insert the additional JS and CSS added by the modules on the page
                                +        js_embed = []
                                +        js_files = []
                                +        css_embed = []
                                +        css_files = []
                                +        html_heads = []
                                +        html_bodies = []
                                +        for module in getattr(self, "_active_modules", {}).itervalues():
                                +            embed_part = module.embedded_javascript()
                                +            if embed_part: js_embed.append(_utf8(embed_part))
                                +            file_part = module.javascript_files()
                                +            if file_part:
                                +                if isinstance(file_part, basestring):
                                +                    js_files.append(file_part)
                                +                else:
                                +                    js_files.extend(file_part)
                                +            embed_part = module.embedded_css()
                                +            if embed_part: css_embed.append(_utf8(embed_part))
                                +            file_part = module.css_files()
                                +            if file_part:
                                +                if isinstance(file_part, basestring):
                                +                    css_files.append(file_part)
                                +                else:
                                +                    css_files.extend(file_part)
                                +            head_part = module.html_head()
                                +            if head_part: html_heads.append(_utf8(head_part))
                                +            body_part = module.html_body()
                                +            if body_part: html_bodies.append(_utf8(body_part))
                                +        if js_files:
                                +            # Maintain order of JavaScript files given by modules
                                +            paths = []
                                +            unique_paths = set()
                                +            for path in js_files:
                                +                if not path.startswith("/") and not path.startswith("http:"):
                                +                    path = self.static_url(path)
                                +                if path not in unique_paths:
                                +                    paths.append(path)
                                +                    unique_paths.add(path)
                                +            js = ''.join(''
                                +                         for p in paths)
                                +            sloc = html.rindex('')
                                +            html = html[:sloc] + js + '\n' + html[sloc:]
                                +        if js_embed:
                                +            js = ''
                                +            sloc = html.rindex('')
                                +            html = html[:sloc] + js + '\n' + html[sloc:]
                                +        if css_files:
                                +            paths = set()
                                +            for path in css_files:
                                +                if not path.startswith("/") and not path.startswith("http:"):
                                +                    paths.add(self.static_url(path))
                                +                else:
                                +                    paths.add(path)
                                +            css = ''.join(''
                                +                          for p in paths)
                                +            hloc = html.index('')
                                +            html = html[:hloc] + css + '\n' + html[hloc:]
                                +        if css_embed:
                                +            css = ''
                                +            hloc = html.index('')
                                +            html = html[:hloc] + css + '\n' + html[hloc:]
                                +        if html_heads:
                                +            hloc = html.index('')
                                +            html = html[:hloc] + ''.join(html_heads) + '\n' + html[hloc:]
                                +        if html_bodies:
                                +            hloc = html.index('')
                                +            html = html[:hloc] + ''.join(html_bodies) + '\n' + html[hloc:]
                                +        self.finish(html)
                                +
                                +    def render_string(self, template_name, **kwargs):
                                +        """Generate the given template with the given arguments.
                                +
                                +        We return the generated string. To generate and write a template
                                +        as a response, use render() above.
                                +        """
                                +        # If no template_path is specified, use the path of the calling file
                                +        template_path = self.application.settings.get("template_path")
                                +        if not template_path:
                                +            frame = sys._getframe(0)
                                +            web_file = frame.f_code.co_filename
                                +            while frame.f_code.co_filename == web_file:
                                +                frame = frame.f_back
                                +            template_path = os.path.dirname(frame.f_code.co_filename)
                                +        if not getattr(RequestHandler, "_templates", None):
                                +            RequestHandler._templates = {}
                                +        if template_path not in RequestHandler._templates:
                                +            loader = self.application.settings.get("template_loader") or\
                                +              template.Loader(template_path)
                                +            RequestHandler._templates[template_path] = loader
                                +        t = RequestHandler._templates[template_path].load(template_name)
                                +        args = dict(
                                +            handler=self,
                                +            request=self.request,
                                +            current_user=self.current_user,
                                +            locale=self.locale,
                                +            _=self.locale.translate,
                                +            static_url=self.static_url,
                                +            xsrf_form_html=self.xsrf_form_html,
                                +            reverse_url=self.application.reverse_url
                                +        )
                                +        args.update(self.ui)
                                +        args.update(kwargs)
                                +        return t.generate(**args)
                                +
                                +    def flush(self, include_footers=False):
                                +        """Flushes the current output buffer to the nextwork."""
                                +        if self.application._wsgi:
                                +            raise Exception("WSGI applications do not support flush()")
                                +
                                +        chunk = "".join(self._write_buffer)
                                +        self._write_buffer = []
                                +        if not self._headers_written:
                                +            self._headers_written = True
                                +            for transform in self._transforms:
                                +                self._headers, chunk = transform.transform_first_chunk(
                                +                    self._headers, chunk, include_footers)
                                +            headers = self._generate_headers()
                                +        else:
                                +            for transform in self._transforms:
                                +                chunk = transform.transform_chunk(chunk, include_footers)
                                +            headers = ""
                                +
                                +        # Ignore the chunk and only write the headers for HEAD requests
                                +        if self.request.method == "HEAD":
                                +            if headers: self.request.write(headers)
                                +            return
                                +
                                +        if headers or chunk:
                                +            self.request.write(headers + chunk)
                                +
                                +    def finish(self, chunk=None):
                                +        """Finishes this response, ending the HTTP request."""
                                +        assert not self._finished
                                +        if chunk is not None: self.write(chunk)
                                +
                                +        # Automatically support ETags and add the Content-Length header if
                                +        # we have not flushed any content yet.
                                +        if not self._headers_written:
                                +            if (self._status_code == 200 and self.request.method == "GET" and
                                +                "Etag" not in self._headers):
                                +                hasher = hashlib.sha1()
                                +                for part in self._write_buffer:
                                +                    hasher.update(part)
                                +                etag = '"%s"' % hasher.hexdigest()
                                +                inm = self.request.headers.get("If-None-Match")
                                +                if inm and inm.find(etag) != -1:
                                +                    self._write_buffer = []
                                +                    self.set_status(304)
                                +                else:
                                +                    self.set_header("Etag", etag)
                                +            if "Content-Length" not in self._headers:
                                +                content_length = sum(len(part) for part in self._write_buffer)
                                +                self.set_header("Content-Length", content_length)
                                +
                                +        if not self.application._wsgi:
                                +            self.flush(include_footers=True)
                                +            self.request.finish()
                                +            self._log()
                                +        self._finished = True
                                +
                                +    def send_error(self, status_code=500, **kwargs):
                                +        """Sends the given HTTP error code to the browser.
                                +
                                +        We also send the error HTML for the given error code as returned by
                                +        get_error_html. Override that method if you want custom error pages
                                +        for your application.
                                +        """
                                +        if self._headers_written:
                                +            _log.error("Cannot send error response after headers written")
                                +            if not self._finished:
                                +                self.finish()
                                +            return
                                +        self.clear()
                                +        self.set_status(status_code)
                                +        message = self.get_error_html(status_code, **kwargs)
                                +        self.finish(message)
                                +
                                +    def get_error_html(self, status_code, **kwargs):
                                +        """Override to implement custom error pages.
                                +
                                +        If this error was caused by an uncaught exception, the
                                +        exception object can be found in kwargs e.g. kwargs['exception']
                                +        """
                                +        return "%(code)d: %(message)s" \
                                +               "%(code)d: %(message)s" % {
                                +            "code": status_code,
                                +            "message": httplib.responses[status_code],
                                +        }
                                +
                                +    @property
                                +    def locale(self):
                                +        """The local for the current session.
                                +
                                +        Determined by either get_user_locale, which you can override to
                                +        set the locale based on, e.g., a user preference stored in a
                                +        database, or get_browser_locale, which uses the Accept-Language
                                +        header.
                                +        """
                                +        if not hasattr(self, "_locale"):
                                +            self._locale = self.get_user_locale()
                                +            if not self._locale:
                                +                self._locale = self.get_browser_locale()
                                +                assert self._locale
                                +        return self._locale
                                +
                                +    def get_user_locale(self):
                                +        """Override to determine the locale from the authenticated user.
                                +
                                +        If None is returned, we use the Accept-Language header.
                                +        """
                                +        return None
                                +
                                +    def get_browser_locale(self, default="en_US"):
                                +        """Determines the user's locale from Accept-Language header.
                                +
                                +        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
                                +        """
                                +        if "Accept-Language" in self.request.headers:
                                +            languages = self.request.headers["Accept-Language"].split(",")
                                +            locales = []
                                +            for language in languages:
                                +                parts = language.strip().split(";")
                                +                if len(parts) > 1 and parts[1].startswith("q="):
                                +                    try:
                                +                        score = float(parts[1][2:])
                                +                    except (ValueError, TypeError):
                                +                        score = 0.0
                                +                else:
                                +                    score = 1.0
                                +                locales.append((parts[0], score))
                                +            if locales:
                                +                locales.sort(key=lambda (l, s): s, reverse=True)
                                +                codes = [l[0] for l in locales]
                                +                return locale.get(*codes)
                                +        return locale.get(default)
                                +
                                +    @property
                                +    def current_user(self):
                                +        """The authenticated user for this request.
                                +
                                +        Determined by either get_current_user, which you can override to
                                +        set the user based on, e.g., a cookie. If that method is not
                                +        overridden, this method always returns None.
                                +
                                +        We lazy-load the current user the first time this method is called
                                +        and cache the result after that.
                                +        """
                                +        if not hasattr(self, "_current_user"):
                                +            self._current_user = self.get_current_user()
                                +        return self._current_user
                                +
                                +    def get_current_user(self):
                                +        """Override to determine the current user from, e.g., a cookie."""
                                +        return None
                                +
                                +    def get_login_url(self):
                                +        """Override to customize the login URL based on the request.
                                +
                                +        By default, we use the 'login_url' application setting.
                                +        """
                                +        self.require_setting("login_url", "@tornado.web.authenticated")
                                +        return self.application.settings["login_url"]
                                +
                                +    @property
                                +    def xsrf_token(self):
                                +        """The XSRF-prevention token for the current user/session.
                                +
                                +        To prevent cross-site request forgery, we set an '_xsrf' cookie
                                +        and include the same '_xsrf' value as an argument with all POST
                                +        requests. If the two do not match, we reject the form submission
                                +        as a potential forgery.
                                +
                                +        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
                                +        """
                                +        if not hasattr(self, "_xsrf_token"):
                                +            token = self.get_cookie("_xsrf")
                                +            if not token:
                                +                token = binascii.b2a_hex(uuid.uuid4().bytes)
                                +                expires_days = 30 if self.current_user else None
                                +                self.set_cookie("_xsrf", token, expires_days=expires_days)
                                +            self._xsrf_token = token
                                +        return self._xsrf_token
                                +
                                +    def check_xsrf_cookie(self):
                                +        """Verifies that the '_xsrf' cookie matches the '_xsrf' argument.
                                +
                                +        To prevent cross-site request forgery, we set an '_xsrf' cookie
                                +        and include the same '_xsrf' value as an argument with all POST
                                +        requests. If the two do not match, we reject the form submission
                                +        as a potential forgery.
                                +
                                +        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
                                +        """
                                +        if self.request.headers.get("X-Requested-With") == "XMLHttpRequest":
                                +            return
                                +        token = self.get_argument("_xsrf", None)
                                +        if not token:
                                +            raise HTTPError(403, "'_xsrf' argument missing from POST")
                                +        if self.xsrf_token != token:
                                +            raise HTTPError(403, "XSRF cookie does not match POST argument")
                                +
                                +    def xsrf_form_html(self):
                                +        """An HTML  element to be included with all POST forms.
                                +
                                +        It defines the _xsrf input value, which we check on all POST
                                +        requests to prevent cross-site request forgery. If you have set
                                +        the 'xsrf_cookies' application setting, you must include this
                                +        HTML within all of your HTML forms.
                                +
                                +        See check_xsrf_cookie() above for more information.
                                +        """
                                +        return ''
                                +
                                +    def static_url(self, path):
                                +        """Returns a static URL for the given relative static file path.
                                +
                                +        This method requires you set the 'static_path' setting in your
                                +        application (which specifies the root directory of your static
                                +        files).
                                +
                                +        We append ?v= to the returned URL, which makes our
                                +        static file handler set an infinite expiration header on the
                                +        returned content. The signature is based on the content of the
                                +        file.
                                +
                                +        If this handler has a "include_host" attribute, we include the
                                +        full host for every static URL, including the "http://". Set
                                +        this attribute for handlers whose output needs non-relative static
                                +        path names.
                                +        """
                                +        self.require_setting("static_path", "static_url")
                                +        if not hasattr(RequestHandler, "_static_hashes"):
                                +            RequestHandler._static_hashes = {}
                                +        hashes = RequestHandler._static_hashes
                                +        if path not in hashes:
                                +            try:
                                +                f = open(os.path.join(
                                +                    self.application.settings["static_path"], path))
                                +                hashes[path] = hashlib.md5(f.read()).hexdigest()
                                +                f.close()
                                +            except:
                                +                _log.error("Could not open static file %r", path)
                                +                hashes[path] = None
                                +        base = self.request.protocol + "://" + self.request.host \
                                +            if getattr(self, "include_host", False) else ""
                                +        static_url_prefix = self.settings.get('static_url_prefix', '/static/')
                                +        if hashes.get(path):
                                +            return base + static_url_prefix + path + "?v=" + hashes[path][:5]
                                +        else:
                                +            return base + static_url_prefix + path
                                +
                                +    def async_callback(self, callback, *args, **kwargs):
                                +        """Wrap callbacks with this if they are used on asynchronous requests.
                                +
                                +        Catches exceptions and properly finishes the request.
                                +        """
                                +        if callback is None:
                                +            return None
                                +        if args or kwargs:
                                +            callback = functools.partial(callback, *args, **kwargs)
                                +        def wrapper(*args, **kwargs):
                                +            try:
                                +                return callback(*args, **kwargs)
                                +            except Exception, e:
                                +                if self._headers_written:
                                +                    _log.error("Exception after headers written",
                                +                                  exc_info=True)
                                +                else:
                                +                    self._handle_request_exception(e)
                                +        return wrapper
                                +
                                +    def require_setting(self, name, feature="this feature"):
                                +        """Raises an exception if the given app setting is not defined."""
                                +        if not self.application.settings.get(name):
                                +            raise Exception("You must define the '%s' setting in your "
                                +                            "application to use %s" % (name, feature))
                                +
                                +    def reverse_url(self, name, *args):
                                +        return self.application.reverse_url(name, *args)
                                +
                                +    def _execute(self, transforms, *args, **kwargs):
                                +        """Executes this request with the given output transforms."""
                                +        self._transforms = transforms
                                +        try:
                                +            if self.request.method not in self.SUPPORTED_METHODS:
                                +                raise HTTPError(405)
                                +            # If XSRF cookies are turned on, reject form submissions without
                                +            # the proper cookie
                                +            if self.request.method == "POST" and \
                                +               self.application.settings.get("xsrf_cookies"):
                                +                self.check_xsrf_cookie()
                                +            self.prepare()
                                +            if not self._finished:
                                +                getattr(self, self.request.method.lower())(*args, **kwargs)
                                +                if self._auto_finish and not self._finished:
                                +                    self.finish()
                                +        except Exception, e:
                                +            self._handle_request_exception(e)
                                +
                                +    def _generate_headers(self):
                                +        lines = [self.request.version + " " + str(self._status_code) + " " +
                                +                 httplib.responses[self._status_code]]
                                +        lines.extend(["%s: %s" % (n, v) for n, v in self._headers.iteritems()])
                                +        for cookie_dict in getattr(self, "_new_cookies", []):
                                +            for cookie in cookie_dict.values():
                                +                lines.append("Set-Cookie: " + cookie.OutputString(None))
                                +        return "\r\n".join(lines) + "\r\n\r\n"
                                +
                                +    def _log(self):
                                +        if self._status_code < 400:
                                +            log_method = _log.info
                                +        elif self._status_code < 500:
                                +            log_method = _log.warning
                                +        else:
                                +            log_method = _log.error
                                +        request_time = 1000.0 * self.request.request_time()
                                +        log_method("%d %s %.2fms", self._status_code,
                                +                   self._request_summary(), request_time)
                                +
                                +    def _request_summary(self):
                                +        return self.request.method + " " + self.request.uri + " (" + \
                                +            self.request.remote_ip + ")"
                                +
                                +    def _handle_request_exception(self, e):
                                +        if isinstance(e, HTTPError):
                                +            if e.log_message:
                                +                format = "%d %s: " + e.log_message
                                +                args = [e.status_code, self._request_summary()] + list(e.args)
                                +                _log.warning(format, *args)
                                +            if e.status_code not in httplib.responses:
                                +                _log.error("Bad HTTP status code: %d", e.status_code)
                                +                self.send_error(500, exception=e)
                                +            else:
                                +                self.send_error(e.status_code, exception=e)
                                +        else:
                                +            _log.error("Uncaught exception %s\n%r", self._request_summary(),
                                +                          self.request, exc_info=e)
                                +            self.send_error(500, exception=e)
                                +
                                +    def _ui_module(self, name, module):
                                +        def render(*args, **kwargs):
                                +            if not hasattr(self, "_active_modules"):
                                +                self._active_modules = {}
                                +            if name not in self._active_modules:
                                +                self._active_modules[name] = module(self)
                                +            rendered = self._active_modules[name].render(*args, **kwargs)
                                +            return rendered
                                +        return render
                                +
                                +    def _ui_method(self, method):
                                +        return lambda *args, **kwargs: method(self, *args, **kwargs)
                                +
                                +
                                +def asynchronous(method):
                                +    """Wrap request handler methods with this if they are asynchronous.
                                +
                                +    If this decorator is given, the response is not finished when the
                                +    method returns. It is up to the request handler to call self.finish()
                                +    to finish the HTTP request. Without this decorator, the request is
                                +    automatically finished when the get() or post() method returns.
                                +
                                +       class MyRequestHandler(web.RequestHandler):
                                +           @web.asynchronous
                                +           def get(self):
                                +              http = httpclient.AsyncHTTPClient()
                                +              http.fetch("http://friendfeed.com/", self._on_download)
                                +
                                +           def _on_download(self, response):
                                +              self.write("Downloaded!")
                                +              self.finish()
                                +
                                +    """
                                +    @functools.wraps(method)
                                +    def wrapper(self, *args, **kwargs):
                                +        if self.application._wsgi:
                                +            raise Exception("@asynchronous is not supported for WSGI apps")
                                +        self._auto_finish = False
                                +        return method(self, *args, **kwargs)
                                +    return wrapper
                                +
                                +
                                +def removeslash(method):
                                +    """Use this decorator to remove trailing slashes from the request path.
                                +
                                +    For example, a request to '/foo/' would redirect to '/foo' with this
                                +    decorator. Your request handler mapping should use a regular expression
                                +    like r'/foo/*' in conjunction with using the decorator.
                                +    """
                                +    @functools.wraps(method)
                                +    def wrapper(self, *args, **kwargs):
                                +        if self.request.path.endswith("/"):
                                +            if self.request.method == "GET":
                                +                uri = self.request.path.rstrip("/")
                                +                if self.request.query: uri += "?" + self.request.query
                                +                self.redirect(uri)
                                +                return
                                +            raise HTTPError(404)
                                +        return method(self, *args, **kwargs)
                                +    return wrapper
                                +
                                +
                                +def addslash(method):
                                +    """Use this decorator to add a missing trailing slash to the request path.
                                +
                                +    For example, a request to '/foo' would redirect to '/foo/' with this
                                +    decorator. Your request handler mapping should use a regular expression
                                +    like r'/foo/?' in conjunction with using the decorator.
                                +    """
                                +    @functools.wraps(method)
                                +    def wrapper(self, *args, **kwargs):
                                +        if not self.request.path.endswith("/"):
                                +            if self.request.method == "GET":
                                +                uri = self.request.path + "/"
                                +                if self.request.query: uri += "?" + self.request.query
                                +                self.redirect(uri)
                                +                return
                                +            raise HTTPError(404)
                                +        return method(self, *args, **kwargs)
                                +    return wrapper
                                +
                                +
                                +class Application(object):
                                +    """A collection of request handlers that make up a web application.
                                +
                                +    Instances of this class are callable and can be passed directly to
                                +    HTTPServer to serve the application:
                                +
                                +        application = web.Application([
                                +            (r"/", MainPageHandler),
                                +        ])
                                +        http_server = httpserver.HTTPServer(application)
                                +        http_server.listen(8080)
                                +        ioloop.IOLoop.instance().start()
                                +
                                +    The constructor for this class takes in a list of URLSpec objects
                                +    or (regexp, request_class) tuples. When we receive requests, we
                                +    iterate over the list in order and instantiate an instance of the
                                +    first request class whose regexp matches the request path.
                                +
                                +    Each tuple can contain an optional third element, which should be a
                                +    dictionary if it is present. That dictionary is passed as keyword
                                +    arguments to the contructor of the handler. This pattern is used
                                +    for the StaticFileHandler below:
                                +
                                +        application = web.Application([
                                +            (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
                                +        ])
                                +
                                +    We support virtual hosts with the add_handlers method, which takes in
                                +    a host regular expression as the first argument:
                                +
                                +        application.add_handlers(r"www\.myhost\.com", [
                                +            (r"/article/([0-9]+)", ArticleHandler),
                                +        ])
                                +
                                +    You can serve static files by sending the static_path setting as a
                                +    keyword argument. We will serve those files from the /static/ URI
                                +    (this is configurable with the static_url_prefix setting),
                                +    and we will serve /favicon.ico and /robots.txt from the same directory.
                                +    """
                                +    def __init__(self, handlers=None, default_host="", transforms=None,
                                +                 wsgi=False, **settings):
                                +        if transforms is None:
                                +            self.transforms = []
                                +            if settings.get("gzip"):
                                +                self.transforms.append(GZipContentEncoding)
                                +            self.transforms.append(ChunkedTransferEncoding)
                                +        else:
                                +            self.transforms = transforms
                                +        self.handlers = []
                                +        self.named_handlers = {}
                                +        self.default_host = default_host
                                +        self.settings = settings
                                +        self.ui_modules = {}
                                +        self.ui_methods = {}
                                +        self._wsgi = wsgi
                                +        self._load_ui_modules(settings.get("ui_modules", {}))
                                +        self._load_ui_methods(settings.get("ui_methods", {}))
                                +        if self.settings.get("static_path"):
                                +            path = self.settings["static_path"]
                                +            handlers = list(handlers or [])
                                +            static_url_prefix = settings.get("static_url_prefix",
                                +                                             "/static/")
                                +            handlers = [
                                +                (re.escape(static_url_prefix) + r"(.*)", StaticFileHandler,
                                +                 dict(path=path)),
                                +                (r"/(favicon\.ico)", StaticFileHandler, dict(path=path)),
                                +                (r"/(robots\.txt)", StaticFileHandler, dict(path=path)),
                                +            ] + handlers
                                +        if handlers: self.add_handlers(".*$", handlers)
                                +
                                +        # Automatically reload modified modules
                                +        if self.settings.get("debug") and not wsgi:
                                +            import autoreload
                                +            autoreload.start()
                                +
                                +    def add_handlers(self, host_pattern, host_handlers):
                                +        """Appends the given handlers to our handler list."""
                                +        if not host_pattern.endswith("$"):
                                +            host_pattern += "$"
                                +        handlers = []
                                +        # The handlers with the wildcard host_pattern are a special
                                +        # case - they're added in the constructor but should have lower
                                +        # precedence than the more-precise handlers added later.
                                +        # If a wildcard handler group exists, it should always be last
                                +        # in the list, so insert new groups just before it.
                                +        if self.handlers and self.handlers[-1][0].pattern == '.*$':
                                +            self.handlers.insert(-1, (re.compile(host_pattern), handlers))
                                +        else:
                                +            self.handlers.append((re.compile(host_pattern), handlers))
                                +
                                +        for spec in host_handlers:
                                +            if type(spec) is type(()):
                                +                assert len(spec) in (2, 3)
                                +                pattern = spec[0]
                                +                handler = spec[1]
                                +                if len(spec) == 3:
                                +                    kwargs = spec[2]
                                +                else:
                                +                    kwargs = {}
                                +                spec = URLSpec(pattern, handler, kwargs)
                                +            handlers.append(spec)
                                +            if spec.name:
                                +                if spec.name in self.named_handlers:
                                +                    _log.warning(
                                +                        "Multiple handlers named %s; replacing previous value",
                                +                        spec.name)
                                +                self.named_handlers[spec.name] = spec
                                +
                                +    def add_transform(self, transform_class):
                                +        """Adds the given OutputTransform to our transform list."""
                                +        self.transforms.append(transform_class)
                                +
                                +    def _get_host_handlers(self, request):
                                +        host = request.host.lower().split(':')[0]
                                +        for pattern, handlers in self.handlers:
                                +            if pattern.match(host):
                                +                return handlers
                                +        # Look for default host if not behind load balancer (for debugging)
                                +        if "X-Real-Ip" not in request.headers:
                                +            for pattern, handlers in self.handlers:
                                +                if pattern.match(self.default_host):
                                +                    return handlers
                                +        return None
                                +
                                +    def _load_ui_methods(self, methods):
                                +        if type(methods) is types.ModuleType:
                                +            self._load_ui_methods(dict((n, getattr(methods, n))
                                +                                       for n in dir(methods)))
                                +        elif isinstance(methods, list):
                                +            for m in list: self._load_ui_methods(m)
                                +        else:
                                +            for name, fn in methods.iteritems():
                                +                if not name.startswith("_") and hasattr(fn, "__call__") \
                                +                   and name[0].lower() == name[0]:
                                +                    self.ui_methods[name] = fn
                                +
                                +    def _load_ui_modules(self, modules):
                                +        if type(modules) is types.ModuleType:
                                +            self._load_ui_modules(dict((n, getattr(modules, n))
                                +                                       for n in dir(modules)))
                                +        elif isinstance(modules, list):
                                +            for m in list: self._load_ui_modules(m)
                                +        else:
                                +            assert isinstance(modules, dict)
                                +            for name, cls in modules.iteritems():
                                +                try:
                                +                    if issubclass(cls, UIModule):
                                +                        self.ui_modules[name] = cls
                                +                except TypeError:
                                +                    pass
                                +
                                +    def __call__(self, request):
                                +        """Called by HTTPServer to execute the request."""
                                +        transforms = [t(request) for t in self.transforms]
                                +        handler = None
                                +        args = []
                                +        kwargs = {}
                                +        handlers = self._get_host_handlers(request)
                                +        if not handlers:
                                +            handler = RedirectHandler(
                                +                request, "http://" + self.default_host + "/")
                                +        else:
                                +            for spec in handlers:
                                +                match = spec.regex.match(request.path)
                                +                if match:
                                +                    handler = spec.handler_class(self, request, **spec.kwargs)
                                +                    # Pass matched groups to the handler.  Since
                                +                    # match.groups() includes both named and unnamed groups,
                                +                    # we want to use either groups or groupdict but not both.
                                +                    kwargs = match.groupdict()
                                +                    if kwargs:
                                +                        args = []
                                +                    else:
                                +                        args = match.groups()
                                +                    break
                                +            if not handler:
                                +                handler = ErrorHandler(self, request, 404)
                                +
                                +        # In debug mode, re-compile templates and reload static files on every
                                +        # request so you don't need to restart to see changes
                                +        if self.settings.get("debug"):
                                +            if getattr(RequestHandler, "_templates", None):
                                +              map(lambda loader: loader.reset(),
                                +                  RequestHandler._templates.values())
                                +            RequestHandler._static_hashes = {}
                                +
                                +        handler._execute(transforms, *args, **kwargs)
                                +        return handler
                                +
                                +    def reverse_url(self, name, *args):
                                +        """Returns a URL path for handler named `name`
                                +
                                +        The handler must be added to the application as a named URLSpec
                                +        """
                                +        if name in self.named_handlers:
                                +            return self.named_handlers[name].reverse(*args)
                                +        raise KeyError("%s not found in named urls" % name)
                                +
                                +
                                +class HTTPError(Exception):
                                +    """An exception that will turn into an HTTP error response."""
                                +    def __init__(self, status_code, log_message=None, *args):
                                +        self.status_code = status_code
                                +        self.log_message = log_message
                                +        self.args = args
                                +
                                +    def __str__(self):
                                +        message = "HTTP %d: %s" % (
                                +            self.status_code, httplib.responses[self.status_code])
                                +        if self.log_message:
                                +            return message + " (" + (self.log_message % self.args) + ")"
                                +        else:
                                +            return message
                                +
                                +
                                +class ErrorHandler(RequestHandler):
                                +    """Generates an error response with status_code for all requests."""
                                +    def __init__(self, application, request, status_code):
                                +        RequestHandler.__init__(self, application, request)
                                +        self.set_status(status_code)
                                +
                                +    def prepare(self):
                                +        raise HTTPError(self._status_code)
                                +
                                +
                                +class RedirectHandler(RequestHandler):
                                +    """Redirects the client to the given URL for all GET requests.
                                +
                                +    You should provide the keyword argument "url" to the handler, e.g.:
                                +
                                +        application = web.Application([
                                +            (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
                                +        ])
                                +    """
                                +    def __init__(self, application, request, url, permanent=True):
                                +        RequestHandler.__init__(self, application, request)
                                +        self._url = url
                                +        self._permanent = permanent
                                +
                                +    def get(self):
                                +        self.redirect(self._url, permanent=self._permanent)
                                +
                                +
                                +class StaticFileHandler(RequestHandler):
                                +    """A simple handler that can serve static content from a directory.
                                +
                                +    To map a path to this handler for a static data directory /var/www,
                                +    you would add a line to your application like:
                                +
                                +        application = web.Application([
                                +            (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
                                +        ])
                                +
                                +    The local root directory of the content should be passed as the "path"
                                +    argument to the handler.
                                +
                                +    To support aggressive browser caching, if the argument "v" is given
                                +    with the path, we set an infinite HTTP expiration header. So, if you
                                +    want browsers to cache a file indefinitely, send them to, e.g.,
                                +    /static/images/myimage.png?v=xxx.
                                +    """
                                +    def __init__(self, application, request, path):
                                +        RequestHandler.__init__(self, application, request)
                                +        self.root = os.path.abspath(path) + os.path.sep
                                +
                                +    def head(self, path):
                                +        self.get(path, include_body=False)
                                +
                                +    def get(self, path, include_body=True):
                                +        abspath = os.path.abspath(os.path.join(self.root, path))
                                +        if not abspath.startswith(self.root):
                                +            raise HTTPError(403, "%s is not in root static directory", path)
                                +        if not os.path.exists(abspath):
                                +            raise HTTPError(404)
                                +        if not os.path.isfile(abspath):
                                +            raise HTTPError(403, "%s is not a file", path)
                                +
                                +        stat_result = os.stat(abspath)
                                +        modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
                                +
                                +        self.set_header("Last-Modified", modified)
                                +        if "v" in self.request.arguments:
                                +            self.set_header("Expires", datetime.datetime.utcnow() + \
                                +                                       datetime.timedelta(days=365*10))
                                +            self.set_header("Cache-Control", "max-age=" + str(86400*365*10))
                                +        else:
                                +            self.set_header("Cache-Control", "public")
                                +        mime_type, encoding = mimetypes.guess_type(abspath)
                                +        if mime_type:
                                +            self.set_header("Content-Type", mime_type)
                                +
                                +        # Check the If-Modified-Since, and don't send the result if the
                                +        # content has not been modified
                                +        ims_value = self.request.headers.get("If-Modified-Since")
                                +        if ims_value is not None:
                                +            date_tuple = email.utils.parsedate(ims_value)
                                +            if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
                                +            if if_since >= modified:
                                +                self.set_status(304)
                                +                return
                                +
                                +        if not include_body:
                                +            return
                                +        self.set_header("Content-Length", stat_result[stat.ST_SIZE])
                                +        file = open(abspath, "rb")
                                +        try:
                                +            self.write(file.read())
                                +        finally:
                                +            file.close()
                                +
                                +
                                +class FallbackHandler(RequestHandler):
                                +    """A RequestHandler that wraps another HTTP server callback.
                                +
                                +    The fallback is a callable object that accepts an HTTPRequest,
                                +    such as an Application or tornado.wsgi.WSGIContainer.  This is most
                                +    useful to use both tornado RequestHandlers and WSGI in the same server.
                                +    Typical usage:
                                +        wsgi_app = tornado.wsgi.WSGIContainer(
                                +            django.core.handlers.wsgi.WSGIHandler())
                                +        application = tornado.web.Application([
                                +            (r"/foo", FooHandler),
                                +            (r".*", FallbackHandler, dict(fallback=wsgi_app),
                                +        ])
                                +    """
                                +    def __init__(self, app, request, fallback):
                                +        RequestHandler.__init__(self, app, request)
                                +        self.fallback = fallback
                                +
                                +    def prepare(self):
                                +        self.fallback(self.request)
                                +        self._finished = True
                                +
                                +
                                +class OutputTransform(object):
                                +    """A transform modifies the result of an HTTP request (e.g., GZip encoding)
                                +
                                +    A new transform instance is created for every request. See the
                                +    ChunkedTransferEncoding example below if you want to implement a
                                +    new Transform.
                                +    """
                                +    def __init__(self, request):
                                +        pass
                                +
                                +    def transform_first_chunk(self, headers, chunk, finishing):
                                +        return headers, chunk
                                +
                                +    def transform_chunk(self, chunk, finishing):
                                +        return chunk
                                +
                                +
                                +class GZipContentEncoding(OutputTransform):
                                +    """Applies the gzip content encoding to the response.
                                +
                                +    See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
                                +    """
                                +    CONTENT_TYPES = set([
                                +        "text/plain", "text/html", "text/css", "text/xml",
                                +        "application/x-javascript", "application/xml", "application/atom+xml",
                                +        "text/javascript", "application/json", "application/xhtml+xml"])
                                +    MIN_LENGTH = 5
                                +
                                +    def __init__(self, request):
                                +        self._gzipping = request.supports_http_1_1() and \
                                +            "gzip" in request.headers.get("Accept-Encoding", "")
                                +
                                +    def transform_first_chunk(self, headers, chunk, finishing):
                                +        if self._gzipping:
                                +            ctype = headers.get("Content-Type", "").split(";")[0]
                                +            self._gzipping = (ctype in self.CONTENT_TYPES) and \
                                +                (not finishing or len(chunk) >= self.MIN_LENGTH) and \
                                +                (finishing or "Content-Length" not in headers) and \
                                +                ("Content-Encoding" not in headers)
                                +        if self._gzipping:
                                +            headers["Content-Encoding"] = "gzip"
                                +            self._gzip_value = cStringIO.StringIO()
                                +            self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
                                +            self._gzip_pos = 0
                                +            chunk = self.transform_chunk(chunk, finishing)
                                +            if "Content-Length" in headers:
                                +                headers["Content-Length"] = str(len(chunk))
                                +        return headers, chunk
                                +
                                +    def transform_chunk(self, chunk, finishing):
                                +        if self._gzipping:
                                +            self._gzip_file.write(chunk)
                                +            if finishing:
                                +                self._gzip_file.close()
                                +            else:
                                +                self._gzip_file.flush()
                                +            chunk = self._gzip_value.getvalue()
                                +            if self._gzip_pos > 0:
                                +                chunk = chunk[self._gzip_pos:]
                                +            self._gzip_pos += len(chunk)
                                +        return chunk
                                +
                                +
                                +class ChunkedTransferEncoding(OutputTransform):
                                +    """Applies the chunked transfer encoding to the response.
                                +
                                +    See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
                                +    """
                                +    def __init__(self, request):
                                +        self._chunking = request.supports_http_1_1()
                                +
                                +    def transform_first_chunk(self, headers, chunk, finishing):
                                +        if self._chunking:
                                +            # No need to chunk the output if a Content-Length is specified
                                +            if "Content-Length" in headers or "Transfer-Encoding" in headers:
                                +                self._chunking = False
                                +            else:
                                +                headers["Transfer-Encoding"] = "chunked"
                                +                chunk = self.transform_chunk(chunk, finishing)
                                +        return headers, chunk
                                +
                                +    def transform_chunk(self, block, finishing):
                                +        if self._chunking:
                                +            # Don't write out empty chunks because that means END-OF-STREAM
                                +            # with chunked encoding
                                +            if block:
                                +                block = ("%x" % len(block)) + "\r\n" + block + "\r\n"
                                +            if finishing:
                                +                block += "0\r\n\r\n"
                                +        return block
                                +
                                +
                                +def authenticated(method):
                                +    """Decorate methods with this to require that the user be logged in."""
                                +    @functools.wraps(method)
                                +    def wrapper(self, *args, **kwargs):
                                +        if not self.current_user:
                                +            if self.request.method == "GET":
                                +                url = self.get_login_url()
                                +                if "?" not in url:
                                +                    url += "?" + urllib.urlencode(dict(next=self.request.uri))
                                +                self.redirect(url)
                                +                return
                                +            raise HTTPError(403)
                                +        return method(self, *args, **kwargs)
                                +    return wrapper
                                +
                                +
                                +class UIModule(object):
                                +    """A UI re-usable, modular unit on a page.
                                +
                                +    UI modules often execute additional queries, and they can include
                                +    additional CSS and JavaScript that will be included in the output
                                +    page, which is automatically inserted on page render.
                                +    """
                                +    def __init__(self, handler):
                                +        self.handler = handler
                                +        self.request = handler.request
                                +        self.ui = handler.ui
                                +        self.current_user = handler.current_user
                                +        self.locale = handler.locale
                                +
                                +    def render(self, *args, **kwargs):
                                +        raise NotImplementedError()
                                +
                                +    def embedded_javascript(self):
                                +        """Returns a JavaScript string that will be embedded in the page."""
                                +        return None
                                +
                                +    def javascript_files(self):
                                +        """Returns a list of JavaScript files required by this module."""
                                +        return None
                                +
                                +    def embedded_css(self):
                                +        """Returns a CSS string that will be embedded in the page."""
                                +        return None
                                +
                                +    def css_files(self):
                                +        """Returns a list of JavaScript files required by this module."""
                                +        return None
                                +
                                +    def html_head(self):
                                +        """Returns a CSS string that will be put in the  element"""
                                +        return None
                                +
                                +    def html_body(self):
                                +        """Returns an HTML string that will be put in the  element"""
                                +        return None
                                +
                                +    def render_string(self, path, **kwargs):
                                +        return self.handler.render_string(path, **kwargs)
                                +
                                +class URLSpec(object):
                                +    """Specifies mappings between URLs and handlers."""
                                +    def __init__(self, pattern, handler_class, kwargs={}, name=None):
                                +        """Creates a URLSpec.
                                +
                                +        Parameters:
                                +        pattern: Regular expression to be matched.  Any groups in the regex
                                +            will be passed in to the handler's get/post/etc methods as
                                +            arguments.
                                +        handler_class: RequestHandler subclass to be invoked.
                                +        kwargs (optional): A dictionary of additional arguments to be passed
                                +            to the handler's constructor.
                                +        name (optional): A name for this handler.  Used by
                                +            Application.reverse_url.
                                +        """
                                +        if not pattern.endswith('$'):
                                +            pattern += '$'
                                +        self.regex = re.compile(pattern)
                                +        self.handler_class = handler_class
                                +        self.kwargs = kwargs
                                +        self.name = name
                                +        self._path, self._group_count = self._find_groups()
                                +
                                +    def _find_groups(self):
                                +        """Returns a tuple (reverse string, group count) for a url.
                                +
                                +        For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
                                +        would return ('/%s/%s/', 2).
                                +        """
                                +        pattern = self.regex.pattern
                                +        if pattern.startswith('^'):
                                +            pattern = pattern[1:]
                                +        if pattern.endswith('$'):
                                +            pattern = pattern[:-1]
                                +
                                +        if self.regex.groups != pattern.count('('):
                                +            # The pattern is too complicated for our simplistic matching,
                                +            # so we can't support reversing it.
                                +            return (None, None)
                                +
                                +        pieces = []
                                +        for fragment in pattern.split('('):
                                +            if ')' in fragment:
                                +                paren_loc = fragment.index(')')
                                +                if paren_loc >= 0:
                                +                    pieces.append('%s' + fragment[paren_loc + 1:])
                                +            else:
                                +                pieces.append(fragment)
                                +
                                +        return (''.join(pieces), self.regex.groups)
                                +
                                +    def reverse(self, *args):
                                +        assert self._path is not None, \
                                +            "Cannot reverse url regex " + self.regex.pattern
                                +        assert len(args) == self._group_count, "required number of arguments "\
                                +            "not found"
                                +        if not len(args):
                                +            return self._path
                                +        return self._path % tuple([str(a) for a in args])
                                +
                                +url = URLSpec
                                +
                                +def _utf8(s):
                                +    if isinstance(s, unicode):
                                +        return s.encode("utf-8")
                                +    assert isinstance(s, str)
                                +    return s
                                +
                                +
                                +def _unicode(s):
                                +    if isinstance(s, str):
                                +        try:
                                +            return s.decode("utf-8")
                                +        except UnicodeDecodeError:
                                +            raise HTTPError(400, "Non-utf8 argument")
                                +    assert isinstance(s, unicode)
                                +    return s
                                +
                                +
                                +def _time_independent_equals(a, b):
                                +    if len(a) != len(b):
                                +        return False
                                +    result = 0
                                +    for x, y in zip(a, b):
                                +        result |= ord(x) ^ ord(y)
                                +    return result == 0
                                +
                                +
                                +class _O(dict):
                                +    """Makes a dictionary behave like an object."""
                                +    def __getattr__(self, name):
                                +        try:
                                +            return self[name]
                                +        except KeyError:
                                +            raise AttributeError(name)
                                +
                                +    def __setattr__(self, name, value):
                                +        self[name] = value
                                diff --git a/vendor/tornado/tornado/websocket.py b/vendor/tornado/tornado/websocket.py
                                new file mode 100644
                                index 000000000000..38a58012cc1d
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/websocket.py
                                @@ -0,0 +1,138 @@
                                +#!/usr/bin/env python
                                +#
                                +# Copyright 2009 Facebook
                                +#
                                +# Licensed under the Apache License, Version 2.0 (the "License"); you may
                                +# not use this file except in compliance with the License. You may obtain
                                +# a copy of the License at
                                +#
                                +#     http://www.apache.org/licenses/LICENSE-2.0
                                +#
                                +# Unless required by applicable law or agreed to in writing, software
                                +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
                                +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
                                +# License for the specific language governing permissions and limitations
                                +# under the License.
                                +
                                +import functools
                                +import logging
                                +import tornado.escape
                                +import tornado.web
                                +
                                +_log = logging.getLogger('tornado.websocket')
                                +
                                +class WebSocketHandler(tornado.web.RequestHandler):
                                +    """A request handler for HTML 5 Web Sockets.
                                +
                                +    See http://www.w3.org/TR/2009/WD-websockets-20091222/ for details on the
                                +    JavaScript interface. We implement the protocol as specified at
                                +    http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-55.
                                +
                                +    Here is an example Web Socket handler that echos back all received messages
                                +    back to the client:
                                +
                                +      class EchoWebSocket(websocket.WebSocketHandler):
                                +          def open(self):
                                +              self.receive_message(self.on_message)
                                +
                                +          def on_message(self, message):
                                +             self.write_message(u"You said: " + message)
                                +
                                +    Web Sockets are not standard HTTP connections. The "handshake" is HTTP,
                                +    but after the handshake, the protocol is message-based. Consequently,
                                +    most of the Tornado HTTP facilities are not available in handlers of this
                                +    type. The only communication methods available to you are send_message()
                                +    and receive_message(). Likewise, your request handler class should
                                +    implement open() method rather than get() or post().
                                +
                                +    If you map the handler above to "/websocket" in your application, you can
                                +    invoke it in JavaScript with:
                                +
                                +      var ws = new WebSocket("ws://localhost:8888/websocket");
                                +      ws.onopen = function() {
                                +         ws.send("Hello, world");
                                +      };
                                +      ws.onmessage = function (evt) {
                                +         alert(evt.data);
                                +      };
                                +
                                +    This script pops up an alert box that says "You said: Hello, world".
                                +    """
                                +    def __init__(self, application, request):
                                +        tornado.web.RequestHandler.__init__(self, application, request)
                                +        self.stream = request.connection.stream
                                +
                                +    def _execute(self, transforms, *args, **kwargs):
                                +        if self.request.headers.get("Upgrade") != "WebSocket" or \
                                +           self.request.headers.get("Connection") != "Upgrade" or \
                                +           not self.request.headers.get("Origin"):
                                +            message = "Expected WebSocket headers"
                                +            self.stream.write(
                                +                "HTTP/1.1 403 Forbidden\r\nContent-Length: " +
                                +                str(len(message)) + "\r\n\r\n" + message)
                                +            return
                                +        self.stream.write(
                                +            "HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
                                +            "Upgrade: WebSocket\r\n"
                                +            "Connection: Upgrade\r\n"
                                +            "Server: TornadoServer/0.1\r\n"
                                +            "WebSocket-Origin: " + self.request.headers["Origin"] + "\r\n"
                                +            "WebSocket-Location: ws://" + self.request.host +
                                +            self.request.path + "\r\n\r\n")
                                +        self.async_callback(self.open)(*args, **kwargs)
                                +
                                +    def write_message(self, message):
                                +        """Sends the given message to the client of this Web Socket."""
                                +        if isinstance(message, dict):
                                +            message = tornado.escape.json_encode(message)
                                +        if isinstance(message, unicode):
                                +            message = message.encode("utf-8")
                                +        assert isinstance(message, str)
                                +        self.stream.write("\x00" + message + "\xff")
                                +
                                +    def receive_message(self, callback):
                                +        """Calls callback when the browser calls send() on this Web Socket."""
                                +        callback = self.async_callback(callback)
                                +        self.stream.read_bytes(
                                +            1, functools.partial(self._on_frame_type, callback))
                                +
                                +    def close(self):
                                +        """Closes this Web Socket.
                                +
                                +        The browser will receive the onclose event for the open web socket
                                +        when this method is called.
                                +        """
                                +        self.stream.close()
                                +
                                +    def async_callback(self, callback, *args, **kwargs):
                                +        """Wrap callbacks with this if they are used on asynchronous requests.
                                +
                                +        Catches exceptions properly and closes this Web Socket if an exception
                                +        is uncaught.
                                +        """
                                +        if args or kwargs:
                                +            callback = functools.partial(callback, *args, **kwargs)
                                +        def wrapper(*args, **kwargs):
                                +            try:
                                +                return callback(*args, **kwargs)
                                +            except Exception, e:
                                +                _log.error("Uncaught exception in %s",
                                +                              self.request.path, exc_info=True)
                                +                self.stream.close()
                                +        return wrapper
                                +
                                +    def _on_frame_type(self, callback, byte):
                                +        if ord(byte) & 0x80 == 0x80:
                                +            raise Exception("Length-encoded format not yet supported")
                                +        self.stream.read_until(
                                +            "\xff", functools.partial(self._on_end_delimiter, callback))
                                +
                                +    def _on_end_delimiter(self, callback, frame):
                                +        callback(frame[:-1].decode("utf-8", "replace"))
                                +
                                +    def _not_supported(self, *args, **kwargs):
                                +        raise Exception("Method not supported for Web Sockets")
                                +
                                +for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
                                +               "set_status", "flush", "finish"]:
                                +    setattr(WebSocketHandler, method, WebSocketHandler._not_supported)
                                diff --git a/vendor/tornado/tornado/win32_support.py b/vendor/tornado/tornado/win32_support.py
                                new file mode 100644
                                index 000000000000..f3efa8e8929c
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/win32_support.py
                                @@ -0,0 +1,123 @@
                                +# NOTE: win32 support is currently experimental, and not recommended
                                +# for production use.
                                +
                                +import ctypes
                                +import ctypes.wintypes
                                +import os
                                +import socket
                                +import errno
                                +
                                +
                                +# See: http://msdn.microsoft.com/en-us/library/ms738573(VS.85).aspx
                                +ioctlsocket = ctypes.windll.ws2_32.ioctlsocket
                                +ioctlsocket.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.LONG, ctypes.wintypes.ULONG)
                                +ioctlsocket.restype = ctypes.c_int
                                +
                                +# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
                                +SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
                                +SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
                                +SetHandleInformation.restype = ctypes.wintypes.BOOL
                                +
                                +HANDLE_FLAG_INHERIT = 0x00000001
                                +
                                +
                                +F_GETFD = 1
                                +F_SETFD = 2
                                +F_GETFL = 3
                                +F_SETFL = 4
                                +
                                +FD_CLOEXEC = 1
                                +
                                +os.O_NONBLOCK = 2048
                                +
                                +FIONBIO = 126
                                +
                                +
                                +def fcntl(fd, op, arg=0):
                                +    if op == F_GETFD or op == F_GETFL:
                                +        return 0
                                +    elif op == F_SETFD:
                                +        # Check that the flag is CLOEXEC and translate
                                +        if arg == FD_CLOEXEC:
                                +            success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, arg)
                                +            if not success:
                                +                raise ctypes.GetLastError()
                                +        else:
                                +            raise ValueError("Unsupported arg")
                                +    #elif op == F_SETFL:
                                +        ## Check that the flag is NONBLOCK and translate
                                +        #if arg == os.O_NONBLOCK:
                                +            ##pass
                                +            #result = ioctlsocket(fd, FIONBIO, 1)
                                +            #if result != 0:
                                +                #raise ctypes.GetLastError()
                                +        #else:
                                +            #raise ValueError("Unsupported arg")
                                +    else:
                                +        raise ValueError("Unsupported op")
                                +
                                +
                                +class Pipe(object):
                                +    """Create an OS independent asynchronous pipe"""
                                +    def __init__(self):
                                +        # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
                                +
                                +        self.writer = socket.socket()
                                +        # Disable buffering -- pulling the trigger sends 1 byte,
                                +        # and we want that sent immediately, to wake up ASAP.
                                +        self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
                                +
                                +        count = 0
                                +        while 1:
                                +            count += 1
                                +            # Bind to a local port; for efficiency, let the OS pick
                                +            # a free port for us.
                                +            # Unfortunately, stress tests showed that we may not
                                +            # be able to connect to that port ("Address already in
                                +            # use") despite that the OS picked it.  This appears
                                +            # to be a race bug in the Windows socket implementation.
                                +            # So we loop until a connect() succeeds (almost always
                                +            # on the first try).  See the long thread at
                                +            # http://mail.zope.org/pipermail/zope/2005-July/160433.html
                                +            # for hideous details.
                                +            a = socket.socket()
                                +            a.bind(("127.0.0.1", 0))
                                +            connect_address = a.getsockname()  # assigned (host, port) pair
                                +            a.listen(1)
                                +            try:
                                +                self.writer.connect(connect_address)
                                +                break    # success
                                +            except socket.error, detail:
                                +                if detail[0] != errno.WSAEADDRINUSE:
                                +                    # "Address already in use" is the only error
                                +                    # I've seen on two WinXP Pro SP2 boxes, under
                                +                    # Pythons 2.3.5 and 2.4.1.
                                +                    raise
                                +                # (10048, 'Address already in use')
                                +                # assert count <= 2 # never triggered in Tim's tests
                                +                if count >= 10:  # I've never seen it go above 2
                                +                    a.close()
                                +                    self.writer.close()
                                +                    raise socket.error("Cannot bind trigger!")
                                +                # Close `a` and try again.  Note:  I originally put a short
                                +                # sleep() here, but it didn't appear to help or hurt.
                                +                a.close()
                                +
                                +        self.reader, addr = a.accept()
                                +        self.reader.setblocking(0)
                                +        self.writer.setblocking(0)
                                +        a.close()
                                +        self.reader_fd = self.reader.fileno()
                                +
                                +    def read(self):
                                +        """Emulate a file descriptors read method"""
                                +        try:
                                +            return self.reader.recv(1)
                                +        except socket.error, ex:
                                +            if ex.args[0] == errno.EWOULDBLOCK:
                                +                raise IOError
                                +            raise
                                +
                                +    def write(self, data):
                                +        """Emulate a file descriptors write method"""
                                +        return self.writer.send(data)
                                diff --git a/vendor/tornado/tornado/wsgi.py b/vendor/tornado/tornado/wsgi.py
                                new file mode 100644
                                index 000000000000..69fa0988ebb7
                                --- /dev/null
                                +++ b/vendor/tornado/tornado/wsgi.py
                                @@ -0,0 +1,311 @@
                                +#!/usr/bin/env python
                                +#
                                +# Copyright 2009 Facebook
                                +#
                                +# Licensed under the Apache License, Version 2.0 (the "License"); you may
                                +# not use this file except in compliance with the License. You may obtain
                                +# a copy of the License at
                                +#
                                +#     http://www.apache.org/licenses/LICENSE-2.0
                                +#
                                +# Unless required by applicable law or agreed to in writing, software
                                +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
                                +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
                                +# License for the specific language governing permissions and limitations
                                +# under the License.
                                +
                                +"""WSGI support for the Tornado web framework.
                                +
                                +We export WSGIApplication, which is very similar to web.Application, except
                                +no asynchronous methods are supported (since WSGI does not support
                                +non-blocking requests properly). If you call self.flush() or other
                                +asynchronous methods in your request handlers running in a WSGIApplication,
                                +we throw an exception.
                                +
                                +Example usage:
                                +
                                +    import tornado.web
                                +    import tornado.wsgi
                                +    import wsgiref.simple_server
                                +
                                +    class MainHandler(tornado.web.RequestHandler):
                                +        def get(self):
                                +            self.write("Hello, world")
                                +
                                +    if __name__ == "__main__":
                                +        application = tornado.wsgi.WSGIApplication([
                                +            (r"/", MainHandler),
                                +        ])
                                +        server = wsgiref.simple_server.make_server('', 8888, application)
                                +        server.serve_forever()
                                +
                                +See the 'appengine' demo for an example of using this module to run
                                +a Tornado app on Google AppEngine.
                                +
                                +Since no asynchronous methods are available for WSGI applications, the
                                +httpclient and auth modules are both not available for WSGI applications.
                                +
                                +We also export WSGIContainer, which lets you run other WSGI-compatible
                                +frameworks on the Tornado HTTP server and I/O loop. See WSGIContainer for
                                +details and documentation.
                                +"""
                                +
                                +import cgi
                                +import cStringIO
                                +import escape
                                +import httplib
                                +import logging
                                +import sys
                                +import time
                                +import urllib
                                +import web
                                +
                                +_log = logging.getLogger('tornado.wsgi')
                                +
                                +class WSGIApplication(web.Application):
                                +    """A WSGI-equivalent of web.Application.
                                +
                                +    We support the same interface, but handlers running in a WSGIApplication
                                +    do not support flush() or asynchronous methods.
                                +    """
                                +    def __init__(self, handlers=None, default_host="", **settings):
                                +        web.Application.__init__(self, handlers, default_host, transforms=[],
                                +                                 wsgi=True, **settings)
                                +
                                +    def __call__(self, environ, start_response):
                                +        handler = web.Application.__call__(self, HTTPRequest(environ))
                                +        assert handler._finished
                                +        status = str(handler._status_code) + " " + \
                                +            httplib.responses[handler._status_code]
                                +        headers = handler._headers.items()
                                +        for cookie_dict in getattr(handler, "_new_cookies", []):
                                +            for cookie in cookie_dict.values():
                                +                headers.append(("Set-Cookie", cookie.OutputString(None)))
                                +        start_response(status, headers)
                                +        return handler._write_buffer
                                +
                                +
                                +class HTTPRequest(object):
                                +    """Mimics httpserver.HTTPRequest for WSGI applications."""
                                +    def __init__(self, environ):
                                +        """Parses the given WSGI environ to construct the request."""
                                +        self.method = environ["REQUEST_METHOD"]
                                +        self.path = urllib.quote(environ.get("SCRIPT_NAME", ""))
                                +        self.path += urllib.quote(environ.get("PATH_INFO", ""))
                                +        self.uri = self.path
                                +        self.arguments = {}
                                +        self.query = environ.get("QUERY_STRING", "")
                                +        if self.query:
                                +            self.uri += "?" + self.query
                                +            arguments = cgi.parse_qs(self.query)
                                +            for name, values in arguments.iteritems():
                                +                values = [v for v in values if v]
                                +                if values: self.arguments[name] = values
                                +        self.version = "HTTP/1.1"
                                +        self.headers = HTTPHeaders()
                                +        if environ.get("CONTENT_TYPE"):
                                +            self.headers["Content-Type"] = environ["CONTENT_TYPE"]
                                +        if environ.get("CONTENT_LENGTH"):
                                +            self.headers["Content-Length"] = int(environ["CONTENT_LENGTH"])
                                +        for key in environ:
                                +            if key.startswith("HTTP_"):
                                +                self.headers[key[5:].replace("_", "-")] = environ[key]
                                +        if self.headers.get("Content-Length"):
                                +            self.body = environ["wsgi.input"].read()
                                +        else:
                                +            self.body = ""
                                +        self.protocol = environ["wsgi.url_scheme"]
                                +        self.remote_ip = environ.get("REMOTE_ADDR", "")
                                +        if environ.get("HTTP_HOST"):
                                +            self.host = environ["HTTP_HOST"]
                                +        else:
                                +            self.host = environ["SERVER_NAME"]
                                +
                                +        # Parse request body
                                +        self.files = {}
                                +        content_type = self.headers.get("Content-Type", "")
                                +        if content_type.startswith("application/x-www-form-urlencoded"):
                                +            for name, values in cgi.parse_qs(self.body).iteritems():
                                +                self.arguments.setdefault(name, []).extend(values)
                                +        elif content_type.startswith("multipart/form-data"):
                                +            boundary = content_type[30:]
                                +            if boundary: self._parse_mime_body(boundary)
                                +
                                +        self._start_time = time.time()
                                +        self._finish_time = None
                                +
                                +    def supports_http_1_1(self):
                                +        """Returns True if this request supports HTTP/1.1 semantics"""
                                +        return self.version == "HTTP/1.1"
                                +
                                +    def full_url(self):
                                +        """Reconstructs the full URL for this request."""
                                +        return self.protocol + "://" + self.host + self.uri
                                +
                                +    def request_time(self):
                                +        """Returns the amount of time it took for this request to execute."""
                                +        if self._finish_time is None:
                                +            return time.time() - self._start_time
                                +        else:
                                +            return self._finish_time - self._start_time
                                +
                                +    def _parse_mime_body(self, boundary):
                                +        if self.body.endswith("\r\n"):
                                +            footer_length = len(boundary) + 6
                                +        else:
                                +            footer_length = len(boundary) + 4
                                +        parts = self.body[:-footer_length].split("--" + boundary + "\r\n")
                                +        for part in parts:
                                +            if not part: continue
                                +            eoh = part.find("\r\n\r\n")
                                +            if eoh == -1:
                                +                _log.warning("multipart/form-data missing headers")
                                +                continue
                                +            headers = HTTPHeaders.parse(part[:eoh])
                                +            name_header = headers.get("Content-Disposition", "")
                                +            if not name_header.startswith("form-data;") or \
                                +               not part.endswith("\r\n"):
                                +                _log.warning("Invalid multipart/form-data")
                                +                continue
                                +            value = part[eoh + 4:-2]
                                +            name_values = {}
                                +            for name_part in name_header[10:].split(";"):
                                +                name, name_value = name_part.strip().split("=", 1)
                                +                name_values[name] = name_value.strip('"').decode("utf-8")
                                +            if not name_values.get("name"):
                                +                _log.warning("multipart/form-data value missing name")
                                +                continue
                                +            name = name_values["name"]
                                +            if name_values.get("filename"):
                                +                ctype = headers.get("Content-Type", "application/unknown")
                                +                self.files.setdefault(name, []).append(dict(
                                +                    filename=name_values["filename"], body=value,
                                +                    content_type=ctype))
                                +            else:
                                +                self.arguments.setdefault(name, []).append(value)
                                +
                                +
                                +class WSGIContainer(object):
                                +    """Makes a WSGI-compatible function runnable on Tornado's HTTP server.
                                +
                                +    Wrap a WSGI function in a WSGIContainer and pass it to HTTPServer to
                                +    run it. For example:
                                +
                                +        def simple_app(environ, start_response):
                                +            status = "200 OK"
                                +            response_headers = [("Content-type", "text/plain")]
                                +            start_response(status, response_headers)
                                +            return ["Hello world!\n"]
                                +
                                +        container = tornado.wsgi.WSGIContainer(simple_app)
                                +        http_server = tornado.httpserver.HTTPServer(container)
                                +        http_server.listen(8888)
                                +        tornado.ioloop.IOLoop.instance().start()
                                +
                                +    This class is intended to let other frameworks (Django, web.py, etc)
                                +    run on the Tornado HTTP server and I/O loop. It has not yet been
                                +    thoroughly tested in production.
                                +    """
                                +    def __init__(self, wsgi_application):
                                +        self.wsgi_application = wsgi_application
                                +
                                +    def __call__(self, request):
                                +        data = {}
                                +        response = []
                                +        def start_response(status, response_headers, exc_info=None):
                                +            data["status"] = status
                                +            data["headers"] = response_headers
                                +            return response.append
                                +        response.extend(self.wsgi_application(
                                +                WSGIContainer.environ(request), start_response))
                                +        body = "".join(response)
                                +        if hasattr(response, "close"):
                                +            response.close()
                                +        if not data: raise Exception("WSGI app did not call start_response")
                                +
                                +        status_code = int(data["status"].split()[0])
                                +        headers = data["headers"]
                                +        header_set = set(k.lower() for (k,v) in headers)
                                +        body = escape.utf8(body)
                                +        if "content-length" not in header_set:
                                +            headers.append(("Content-Length", str(len(body))))
                                +        if "content-type" not in header_set:
                                +            headers.append(("Content-Type", "text/html; charset=UTF-8"))
                                +        if "server" not in header_set:
                                +            headers.append(("Server", "TornadoServer/0.1"))
                                +
                                +        parts = ["HTTP/1.1 " + data["status"] + "\r\n"]
                                +        for key, value in headers:
                                +            parts.append(escape.utf8(key) + ": " + escape.utf8(value) + "\r\n")
                                +        parts.append("\r\n")
                                +        parts.append(body)
                                +        request.write("".join(parts))
                                +        request.finish()
                                +        self._log(status_code, request)
                                +
                                +    @staticmethod
                                +    def environ(request):
                                +        hostport = request.host.split(":")
                                +        if len(hostport) == 2:
                                +            host = hostport[0]
                                +            port = int(hostport[1])
                                +        else:
                                +            host = request.host
                                +            port = 443 if request.protocol == "https" else 80
                                +        environ = {
                                +            "REQUEST_METHOD": request.method,
                                +            "SCRIPT_NAME": "",
                                +            "PATH_INFO": request.path,
                                +            "QUERY_STRING": request.query,
                                +            "REMOTE_ADDR": request.remote_ip,
                                +            "SERVER_NAME": host,
                                +            "SERVER_PORT": port,
                                +            "SERVER_PROTOCOL": request.version,
                                +            "wsgi.version": (1, 0),
                                +            "wsgi.url_scheme": request.protocol,
                                +            "wsgi.input": cStringIO.StringIO(request.body),
                                +            "wsgi.errors": sys.stderr,
                                +            "wsgi.multithread": False,
                                +            "wsgi.multiprocess": True,
                                +            "wsgi.run_once": False,
                                +        }
                                +        if "Content-Type" in request.headers:
                                +            environ["CONTENT_TYPE"] = request.headers["Content-Type"]
                                +        if "Content-Length" in request.headers:
                                +            environ["CONTENT_LENGTH"] = request.headers["Content-Length"]
                                +        for key, value in request.headers.iteritems():
                                +            environ["HTTP_" + key.replace("-", "_").upper()] = value
                                +        return environ
                                +
                                +    def _log(self, status_code, request):
                                +        if status_code < 400:
                                +            log_method = _log.info
                                +        elif status_code < 500:
                                +            log_method = _log.warning
                                +        else:
                                +            log_method = _log.error
                                +        request_time = 1000.0 * request.request_time()
                                +        summary = request.method + " " + request.uri + " (" + \
                                +            request.remote_ip + ")"
                                +        log_method("%d %s %.2fms", status_code, summary, request_time)
                                +
                                +
                                +class HTTPHeaders(dict):
                                +    """A dictionary that maintains Http-Header-Case for all keys."""
                                +    def __setitem__(self, name, value):
                                +        dict.__setitem__(self, self._normalize_name(name), value)
                                +
                                +    def __getitem__(self, name):
                                +        return dict.__getitem__(self, self._normalize_name(name))
                                +
                                +    def _normalize_name(self, name):
                                +        return "-".join([w.capitalize() for w in name.split("-")])
                                +
                                +    @classmethod
                                +    def parse(cls, headers_string):
                                +        headers = cls()
                                +        for line in headers_string.splitlines():
                                +            if line:
                                +                name, value = line.split(": ", 1)
                                +                headers[name] = value
                                +        return headers
                                diff --git a/vendor/tornado/website/app.yaml b/vendor/tornado/website/app.yaml
                                new file mode 100644
                                index 000000000000..8a1ff06648df
                                --- /dev/null
                                +++ b/vendor/tornado/website/app.yaml
                                @@ -0,0 +1,15 @@
                                +application: python-tornado
                                +version: 1
                                +runtime: python
                                +api_version: 1
                                +
                                +handlers:
                                +- url: /static/
                                +  static_dir: static
                                +
                                +- url: /robots\.txt
                                +  static_files: static/robots.txt
                                +  upload: static/robots.txt
                                +
                                +- url: /.*
                                +  script: website.py
                                diff --git a/vendor/tornado/website/index.yaml b/vendor/tornado/website/index.yaml
                                new file mode 100644
                                index 000000000000..e69de29bb2d1
                                diff --git a/vendor/tornado/website/markdown/__init__.py b/vendor/tornado/website/markdown/__init__.py
                                new file mode 100644
                                index 000000000000..0d1c50497920
                                --- /dev/null
                                +++ b/vendor/tornado/website/markdown/__init__.py
                                @@ -0,0 +1,603 @@
                                +"""
                                +Python Markdown
                                +===============
                                +
                                +Python Markdown converts Markdown to HTML and can be used as a library or
                                +called from the command line.
                                +
                                +## Basic usage as a module:
                                +
                                +    import markdown
                                +    md = Markdown()
                                +    html = md.convert(your_text_string)
                                +
                                +## Basic use from the command line:
                                +
                                +    python markdown.py source.txt > destination.html
                                +
                                +Run "python markdown.py --help" to see more options.
                                +
                                +## Extensions
                                +
                                +See  for more
                                +information and instructions on how to extend the functionality of
                                +Python Markdown.  Read that before you try modifying this file.
                                +
                                +## Authors and License
                                +
                                +Started by [Manfred Stienstra](http://www.dwerg.net/).  Continued and
                                +maintained  by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
                                +Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
                                +
                                +Contact: markdown@freewisdom.org
                                +
                                +Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
                                +Copyright 200? Django Software Foundation (OrderedDict implementation)
                                +Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
                                +Copyright 2004 Manfred Stienstra (the original version)
                                +
                                +License: BSD (see docs/LICENSE for details).
                                +"""
                                +
                                +version = "2.0"
                                +version_info = (2,0,0, "Final")
                                +
                                +import re
                                +import codecs
                                +import sys
                                +import warnings
                                +import logging
                                +from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
                                +
                                +
                                +"""
                                +CONSTANTS
                                +=============================================================================
                                +"""
                                +
                                +"""
                                +Constants you might want to modify
                                +-----------------------------------------------------------------------------
                                +"""
                                +
                                +# default logging level for command-line use
                                +COMMAND_LINE_LOGGING_LEVEL = CRITICAL
                                +TAB_LENGTH = 4               # expand tabs to this many spaces
                                +ENABLE_ATTRIBUTES = True     # @id = xyz -> <... id="xyz">
                                +SMART_EMPHASIS = True        # this_or_that does not become thisorthat
                                +DEFAULT_OUTPUT_FORMAT = 'xhtml1'     # xhtml or html4 output
                                +HTML_REMOVED_TEXT = "[HTML_REMOVED]" # text used instead of HTML in safe mode
                                +BLOCK_LEVEL_ELEMENTS = re.compile("p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
                                +                                  "|script|noscript|form|fieldset|iframe|math"
                                +                                  "|ins|del|hr|hr/|style|li|dt|dd|thead|tbody"
                                +                                  "|tr|th|td")
                                +DOC_TAG = "div"     # Element used to wrap document - later removed
                                +
                                +# Placeholders
                                +STX = u'\u0002'  # Use STX ("Start of text") for start-of-placeholder
                                +ETX = u'\u0003'  # Use ETX ("End of text") for end-of-placeholder
                                +INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
                                +INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
                                +AMP_SUBSTITUTE = STX+"amp"+ETX
                                +
                                +
                                +"""
                                +Constants you probably do not need to change
                                +-----------------------------------------------------------------------------
                                +"""
                                +
                                +RTL_BIDI_RANGES = ( (u'\u0590', u'\u07FF'),
                                +                     # Hebrew (0590-05FF), Arabic (0600-06FF),
                                +                     # Syriac (0700-074F), Arabic supplement (0750-077F),
                                +                     # Thaana (0780-07BF), Nko (07C0-07FF).
                                +                    (u'\u2D30', u'\u2D7F'), # Tifinagh
                                +                    )
                                +
                                +
                                +"""
                                +AUXILIARY GLOBAL FUNCTIONS
                                +=============================================================================
                                +"""
                                +
                                +
                                +def message(level, text):
                                +    """ A wrapper method for logging debug messages. """
                                +    logger =  logging.getLogger('MARKDOWN')
                                +    if logger.handlers:
                                +        # The logger is configured
                                +        logger.log(level, text)
                                +        if level > WARN:
                                +            sys.exit(0)
                                +    elif level > WARN:
                                +        raise MarkdownException, text
                                +    else:
                                +        warnings.warn(text, MarkdownWarning)
                                +
                                +
                                +def isBlockLevel(tag):
                                +    """Check if the tag is a block level HTML tag."""
                                +    return BLOCK_LEVEL_ELEMENTS.match(tag)
                                +
                                +"""
                                +MISC AUXILIARY CLASSES
                                +=============================================================================
                                +"""
                                +
                                +class AtomicString(unicode):
                                +    """A string which should not be further processed."""
                                +    pass
                                +
                                +
                                +class MarkdownException(Exception):
                                +    """ A Markdown Exception. """
                                +    pass
                                +
                                +
                                +class MarkdownWarning(Warning):
                                +    """ A Markdown Warning. """
                                +    pass
                                +
                                +
                                +"""
                                +OVERALL DESIGN
                                +=============================================================================
                                +
                                +Markdown processing takes place in four steps:
                                +
                                +1. A bunch of "preprocessors" munge the input text.
                                +2. BlockParser() parses the high-level structural elements of the
                                +   pre-processed text into an ElementTree.
                                +3. A bunch of "treeprocessors" are run against the ElementTree. One such
                                +   treeprocessor runs InlinePatterns against the ElementTree, detecting inline
                                +   markup.
                                +4. Some post-processors are run against the text after the ElementTree has
                                +   been serialized into text.
                                +5. The output is written to a string.
                                +
                                +Those steps are put together by the Markdown() class.
                                +
                                +"""
                                +
                                +import preprocessors
                                +import blockprocessors
                                +import treeprocessors
                                +import inlinepatterns
                                +import postprocessors
                                +import blockparser
                                +import etree_loader
                                +import odict
                                +
                                +# Extensions should use "markdown.etree" instead of "etree" (or do `from
                                +# markdown import etree`).  Do not import it by yourself.
                                +
                                +etree = etree_loader.importETree()
                                +
                                +# Adds the ability to output html4
                                +import html4
                                +
                                +
                                +class Markdown:
                                +    """Convert Markdown to HTML."""
                                +
                                +    def __init__(self,
                                +                 extensions=[],
                                +                 extension_configs={},
                                +                 safe_mode = False, 
                                +                 output_format=DEFAULT_OUTPUT_FORMAT):
                                +        """
                                +        Creates a new Markdown instance.
                                +
                                +        Keyword arguments:
                                +
                                +        * extensions: A list of extensions.
                                +           If they are of type string, the module mdx_name.py will be loaded.
                                +           If they are a subclass of markdown.Extension, they will be used
                                +           as-is.
                                +        * extension-configs: Configuration setting for extensions.
                                +        * safe_mode: Disallow raw html. One of "remove", "replace" or "escape".
                                +        * output_format: Format of output. Supported formats are:
                                +            * "xhtml1": Outputs XHTML 1.x. Default.
                                +            * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
                                +            * "html4": Outputs HTML 4
                                +            * "html": Outputs latest supported version of HTML (currently HTML 4).
                                +            Note that it is suggested that the more specific formats ("xhtml1" 
                                +            and "html4") be used as "xhtml" or "html" may change in the future
                                +            if it makes sense at that time. 
                                +
                                +        """
                                +        
                                +        self.safeMode = safe_mode
                                +        self.registeredExtensions = []
                                +        self.docType = ""
                                +        self.stripTopLevelTags = True
                                +
                                +        # Preprocessors
                                +        self.preprocessors = odict.OrderedDict()
                                +        self.preprocessors["html_block"] = \
                                +                preprocessors.HtmlBlockPreprocessor(self)
                                +        self.preprocessors["reference"] = \
                                +                preprocessors.ReferencePreprocessor(self)
                                +        # footnote preprocessor will be inserted with "amp_substitute"
                                +
                                +        # Map format keys to serializers
                                +        self.output_formats = {
                                +            'html'  : html4.to_html_string, 
                                +            'html4' : html4.to_html_string,
                                +            'xhtml' : etree.tostring, 
                                +            'xhtml1': etree.tostring,
                                +        }
                                +
                                +        self.references = {}
                                +        self.htmlStash = preprocessors.HtmlStash()
                                +        self.registerExtensions(extensions = extensions,
                                +                                configs = extension_configs)
                                +        self.set_output_format(output_format)
                                +        self.reset()
                                +
                                +    def registerExtensions(self, extensions, configs):
                                +        """
                                +        Register extensions with this instance of Markdown.
                                +
                                +        Keyword aurguments:
                                +
                                +        * extensions: A list of extensions, which can either
                                +           be strings or objects.  See the docstring on Markdown.
                                +        * configs: A dictionary mapping module names to config options.
                                +
                                +        """
                                +        for ext in extensions:
                                +            if isinstance(ext, basestring):
                                +                ext = load_extension(ext, configs.get(ext, []))
                                +            try:
                                +                ext.extendMarkdown(self, globals())
                                +            except AttributeError:
                                +                message(ERROR, "Incorrect type! Extension '%s' is "
                                +                               "neither a string or an Extension." %(repr(ext)))
                                +            
                                +
                                +    def registerExtension(self, extension):
                                +        """ This gets called by the extension """
                                +        self.registeredExtensions.append(extension)
                                +
                                +    def reset(self):
                                +        """
                                +        Resets all state variables so that we can start with a new text.
                                +        """
                                +        self.htmlStash.reset()
                                +        self.references.clear()
                                +
                                +        for extension in self.registeredExtensions:
                                +            extension.reset()
                                +
                                +    def set_output_format(self, format):
                                +        """ Set the output format for the class instance. """
                                +        try:
                                +            self.serializer = self.output_formats[format.lower()]
                                +        except KeyError:
                                +            message(CRITICAL, 'Invalid Output Format: "%s". Use one of %s.' \
                                +                               % (format, self.output_formats.keys()))
                                +
                                +    def convert(self, source):
                                +        """
                                +        Convert markdown to serialized XHTML or HTML.
                                +
                                +        Keyword arguments:
                                +
                                +        * source: Source text as a Unicode string.
                                +
                                +        """
                                +
                                +        # Fixup the source text
                                +        if not source.strip():
                                +            return u""  # a blank unicode string
                                +        try:
                                +            source = unicode(source)
                                +        except UnicodeDecodeError:
                                +            message(CRITICAL, 'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')
                                +            return u""
                                +
                                +        source = source.replace(STX, "").replace(ETX, "")
                                +        source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
                                +        source = re.sub(r'\n\s+\n', '\n\n', source)
                                +        source = source.expandtabs(TAB_LENGTH)
                                +
                                +        # Split into lines and run the line preprocessors.
                                +        self.lines = source.split("\n")
                                +        for prep in self.preprocessors.values():
                                +            self.lines = prep.run(self.lines)
                                +
                                +        # Parse the high-level elements.
                                +        root = self.parser.parseDocument(self.lines).getroot()
                                +
                                +        # Run the tree-processors
                                +        for treeprocessor in self.treeprocessors.values():
                                +            newRoot = treeprocessor.run(root)
                                +            if newRoot:
                                +                root = newRoot
                                +
                                +        # Serialize _properly_.  Strip top-level tags.
                                +        output, length = codecs.utf_8_decode(self.serializer(root, encoding="utf8"))
                                +        if self.stripTopLevelTags:
                                +            start = output.index('<%s>'%DOC_TAG)+len(DOC_TAG)+2
                                +            end = output.rindex(''%DOC_TAG)
                                +            output = output[start:end].strip()
                                +
                                +        # Run the text post-processors
                                +        for pp in self.postprocessors.values():
                                +            output = pp.run(output)
                                +
                                +        return output.strip()
                                +
                                +    def convertFile(self, input=None, output=None, encoding=None):
                                +        """Converts a markdown file and returns the HTML as a unicode string.
                                +
                                +        Decodes the file using the provided encoding (defaults to utf-8),
                                +        passes the file content to markdown, and outputs the html to either
                                +        the provided stream or the file with provided name, using the same
                                +        encoding as the source file.
                                +
                                +        **Note:** This is the only place that decoding and encoding of unicode
                                +        takes place in Python-Markdown.  (All other code is unicode-in /
                                +        unicode-out.)
                                +
                                +        Keyword arguments:
                                +
                                +        * input: Name of source text file.
                                +        * output: Name of output file. Writes to stdout if `None`.
                                +        * encoding: Encoding of input and output files. Defaults to utf-8.
                                +
                                +        """
                                +
                                +        encoding = encoding or "utf-8"
                                +
                                +        # Read the source
                                +        input_file = codecs.open(input, mode="r", encoding=encoding)
                                +        text = input_file.read()
                                +        input_file.close()
                                +        text = text.lstrip(u'\ufeff') # remove the byte-order mark
                                +
                                +        # Convert
                                +        html = self.convert(text)
                                +
                                +        # Write to file or stdout
                                +        if isinstance(output, (str, unicode)):
                                +            output_file = codecs.open(output, "w", encoding=encoding)
                                +            output_file.write(html)
                                +            output_file.close()
                                +        else:
                                +            output.write(html.encode(encoding))
                                +
                                +
                                +"""
                                +Extensions
                                +-----------------------------------------------------------------------------
                                +"""
                                +
                                +class Extension:
                                +    """ Base class for extensions to subclass. """
                                +    def __init__(self, configs = {}):
                                +        """Create an instance of an Extention.
                                +
                                +        Keyword arguments:
                                +
                                +        * configs: A dict of configuration setting used by an Extension.
                                +        """
                                +        self.config = configs
                                +
                                +    def getConfig(self, key):
                                +        """ Return a setting for the given key or an empty string. """
                                +        if key in self.config:
                                +            return self.config[key][0]
                                +        else:
                                +            return ""
                                +
                                +    def getConfigInfo(self):
                                +        """ Return all config settings as a list of tuples. """
                                +        return [(key, self.config[key][1]) for key in self.config.keys()]
                                +
                                +    def setConfig(self, key, value):
                                +        """ Set a config setting for `key` with the given `value`. """
                                +        self.config[key][0] = value
                                +
                                +    def extendMarkdown(self, md, md_globals):
                                +        """
                                +        Add the various proccesors and patterns to the Markdown Instance.
                                +
                                +        This method must be overriden by every extension.
                                +
                                +        Keyword arguments:
                                +
                                +        * md: The Markdown instance.
                                +
                                +        * md_globals: Global variables in the markdown module namespace.
                                +
                                +        """
                                +        pass
                                +
                                +
                                +def load_extension(ext_name, configs = []):
                                +    """Load extension by name, then return the module.
                                +
                                +    The extension name may contain arguments as part of the string in the
                                +    following format: "extname(key1=value1,key2=value2)"
                                +
                                +    """
                                +
                                +    # Parse extensions config params (ignore the order)
                                +    configs = dict(configs)
                                +    pos = ext_name.find("(") # find the first "("
                                +    if pos > 0:
                                +        ext_args = ext_name[pos+1:-1]
                                +        ext_name = ext_name[:pos]
                                +        pairs = [x.split("=") for x in ext_args.split(",")]
                                +        configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
                                +
                                +    # Setup the module names
                                +    ext_module = 'markdown.extensions'
                                +    module_name_new_style = '.'.join([ext_module, ext_name])
                                +    module_name_old_style = '_'.join(['mdx', ext_name])
                                +
                                +    # Try loading the extention first from one place, then another
                                +    try: # New style (markdown.extensons.)
                                +        module = __import__(module_name_new_style, {}, {}, [ext_module])
                                +    except ImportError:
                                +        try: # Old style (mdx.)
                                +            module = __import__(module_name_old_style)
                                +        except ImportError:
                                +           message(WARN, "Failed loading extension '%s' from '%s' or '%s'"
                                +               % (ext_name, module_name_new_style, module_name_old_style))
                                +           # Return None so we don't try to initiate none-existant extension
                                +           return None
                                +
                                +    # If the module is loaded successfully, we expect it to define a
                                +    # function called makeExtension()
                                +    try:
                                +        return module.makeExtension(configs.items())
                                +    except AttributeError:
                                +        message(CRITICAL, "Failed to initiate extension '%s'" % ext_name)
                                +
                                +
                                +def load_extensions(ext_names):
                                +    """Loads multiple extensions"""
                                +    extensions = []
                                +    for ext_name in ext_names:
                                +        extension = load_extension(ext_name)
                                +        if extension:
                                +            extensions.append(extension)
                                +    return extensions
                                +
                                +
                                +"""
                                +EXPORTED FUNCTIONS
                                +=============================================================================
                                +
                                +Those are the two functions we really mean to export: markdown() and
                                +markdownFromFile().
                                +"""
                                +
                                +def markdown(text,
                                +             extensions = [],
                                +             safe_mode = False,
                                +             output_format = DEFAULT_OUTPUT_FORMAT):
                                +    """Convert a markdown string to HTML and return HTML as a unicode string.
                                +
                                +    This is a shortcut function for `Markdown` class to cover the most
                                +    basic use case.  It initializes an instance of Markdown, loads the
                                +    necessary extensions and runs the parser on the given text.
                                +
                                +    Keyword arguments:
                                +
                                +    * text: Markdown formatted text as Unicode or ASCII string.
                                +    * extensions: A list of extensions or extension names (may contain config args).
                                +    * safe_mode: Disallow raw html.  One of "remove", "replace" or "escape".
                                +    * output_format: Format of output. Supported formats are:
                                +        * "xhtml1": Outputs XHTML 1.x. Default.
                                +        * "xhtml": Outputs latest supported version of XHTML (currently XHTML 1.1).
                                +        * "html4": Outputs HTML 4
                                +        * "html": Outputs latest supported version of HTML (currently HTML 4).
                                +        Note that it is suggested that the more specific formats ("xhtml1" 
                                +        and "html4") be used as "xhtml" or "html" may change in the future
                                +        if it makes sense at that time. 
                                +
                                +    Returns: An HTML document as a string.
                                +
                                +    """
                                +    md = Markdown(extensions=load_extensions(extensions),
                                +                  safe_mode=safe_mode, 
                                +                  output_format=output_format)
                                +    return md.convert(text)
                                +
                                +
                                +def markdownFromFile(input = None,
                                +                     output = None,
                                +                     extensions = [],
                                +                     encoding = None,
                                +                     safe_mode = False,
                                +                     output_format = DEFAULT_OUTPUT_FORMAT):
                                +    """Read markdown code from a file and write it to a file or a stream."""
                                +    md = Markdown(extensions=load_extensions(extensions), 
                                +                  safe_mode=safe_mode,
                                +                  output_format=output_format)
                                +    md.convertFile(input, output, encoding)
                                +
                                +
                                +
                                diff --git a/vendor/tornado/website/markdown/blockparser.py b/vendor/tornado/website/markdown/blockparser.py
                                new file mode 100644
                                index 000000000000..e18b3384877d
                                --- /dev/null
                                +++ b/vendor/tornado/website/markdown/blockparser.py
                                @@ -0,0 +1,95 @@
                                +
                                +import markdown
                                +
                                +class State(list):
                                +    """ Track the current and nested state of the parser. 
                                +    
                                +    This utility class is used to track the state of the BlockParser and 
                                +    support multiple levels if nesting. It's just a simple API wrapped around
                                +    a list. Each time a state is set, that state is appended to the end of the
                                +    list. Each time a state is reset, that state is removed from the end of
                                +    the list.
                                +
                                +    Therefore, each time a state is set for a nested block, that state must be 
                                +    reset when we back out of that level of nesting or the state could be
                                +    corrupted.
                                +
                                +    While all the methods of a list object are available, only the three
                                +    defined below need be used.
                                +
                                +    """
                                +
                                +    def set(self, state):
                                +        """ Set a new state. """
                                +        self.append(state)
                                +
                                +    def reset(self):
                                +        """ Step back one step in nested state. """
                                +        self.pop()
                                +
                                +    def isstate(self, state):
                                +        """ Test that top (current) level is of given state. """
                                +        if len(self):
                                +            return self[-1] == state
                                +        else:
                                +            return False
                                +
                                +class BlockParser:
                                +    """ Parse Markdown blocks into an ElementTree object. 
                                +    
                                +    A wrapper class that stitches the various BlockProcessors together,
                                +    looping through them and creating an ElementTree object.
                                +    """
                                +
                                +    def __init__(self):
                                +        self.blockprocessors = markdown.odict.OrderedDict()
                                +        self.state = State()
                                +
                                +    def parseDocument(self, lines):
                                +        """ Parse a markdown document into an ElementTree. 
                                +        
                                +        Given a list of lines, an ElementTree object (not just a parent Element)
                                +        is created and the root element is passed to the parser as the parent.
                                +        The ElementTree object is returned.
                                +        
                                +        This should only be called on an entire document, not pieces.
                                +
                                +        """
                                +        # Create a ElementTree from the lines
                                +        self.root = markdown.etree.Element(markdown.DOC_TAG)
                                +        self.parseChunk(self.root, '\n'.join(lines))
                                +        return markdown.etree.ElementTree(self.root)
                                +
                                +    def parseChunk(self, parent, text):
                                +        """ Parse a chunk of markdown text and attach to given etree node. 
                                +        
                                +        While the ``text`` argument is generally assumed to contain multiple
                                +        blocks which will be split on blank lines, it could contain only one
                                +        block. Generally, this method would be called by extensions when
                                +        block parsing is required. 
                                +        
                                +        The ``parent`` etree Element passed in is altered in place. 
                                +        Nothing is returned.
                                +
                                +        """
                                +        self.parseBlocks(parent, text.split('\n\n'))
                                +
                                +    def parseBlocks(self, parent, blocks):
                                +        """ Process blocks of markdown text and attach to given etree node. 
                                +        
                                +        Given a list of ``blocks``, each blockprocessor is stepped through
                                +        until there are no blocks left. While an extension could potentially
                                +        call this method directly, it's generally expected to be used internally.
                                +
                                +        This is a public method as an extension may need to add/alter additional
                                +        BlockProcessors which call this method to recursively parse a nested
                                +        block.
                                +
                                +        """
                                +        while blocks:
                                +           for processor in self.blockprocessors.values():
                                +               if processor.test(parent, blocks[0]):
                                +                   processor.run(parent, blocks)
                                +                   break
                                +
                                +
                                diff --git a/vendor/tornado/website/markdown/blockprocessors.py b/vendor/tornado/website/markdown/blockprocessors.py
                                new file mode 100644
                                index 000000000000..79f4db93bc57
                                --- /dev/null
                                +++ b/vendor/tornado/website/markdown/blockprocessors.py
                                @@ -0,0 +1,460 @@
                                +"""
                                +CORE MARKDOWN BLOCKPARSER
                                +=============================================================================
                                +
                                +This parser handles basic parsing of Markdown blocks.  It doesn't concern itself
                                +with inline elements such as **bold** or *italics*, but rather just catches 
                                +blocks, lists, quotes, etc.
                                +
                                +The BlockParser is made up of a bunch of BlockProssors, each handling a 
                                +different type of block. Extensions may add/replace/remove BlockProcessors
                                +as they need to alter how markdown blocks are parsed.
                                +
                                +"""
                                +
                                +import re
                                +import markdown
                                +
                                +class BlockProcessor:
                                +    """ Base class for block processors. 
                                +    
                                +    Each subclass will provide the methods below to work with the source and
                                +    tree. Each processor will need to define it's own ``test`` and ``run``
                                +    methods. The ``test`` method should return True or False, to indicate
                                +    whether the current block should be processed by this processor. If the
                                +    test passes, the parser will call the processors ``run`` method.
                                +
                                +    """
                                +
                                +    def __init__(self, parser=None):
                                +        self.parser = parser
                                +
                                +    def lastChild(self, parent):
                                +        """ Return the last child of an etree element. """
                                +        if len(parent):
                                +            return parent[-1]
                                +        else:
                                +            return None
                                +
                                +    def detab(self, text):
                                +        """ Remove a tab from the front of each line of the given text. """
                                +        newtext = []
                                +        lines = text.split('\n')
                                +        for line in lines:
                                +            if line.startswith(' '*markdown.TAB_LENGTH):
                                +                newtext.append(line[markdown.TAB_LENGTH:])
                                +            elif not line.strip():
                                +                newtext.append('')
                                +            else:
                                +                break
                                +        return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
                                +
                                +    def looseDetab(self, text, level=1):
                                +        """ Remove a tab from front of lines but allowing dedented lines. """
                                +        lines = text.split('\n')
                                +        for i in range(len(lines)):
                                +            if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
                                +                lines[i] = lines[i][markdown.TAB_LENGTH*level:]
                                +        return '\n'.join(lines)
                                +
                                +    def test(self, parent, block):
                                +        """ Test for block type. Must be overridden by subclasses. 
                                +        
                                +        As the parser loops through processors, it will call the ``test`` method
                                +        on each to determine if the given block of text is of that type. This
                                +        method must return a boolean ``True`` or ``False``. The actual method of
                                +        testing is left to the needs of that particular block type. It could 
                                +        be as simple as ``block.startswith(some_string)`` or a complex regular
                                +        expression. As the block type may be different depending on the parent
                                +        of the block (i.e. inside a list), the parent etree element is also 
                                +        provided and may be used as part of the test.
                                +
                                +        Keywords:
                                +        
                                +        * ``parent``: A etree element which will be the parent of the block.
                                +        * ``block``: A block of text from the source which has been split at 
                                +            blank lines.
                                +        """
                                +        pass
                                +
                                +    def run(self, parent, blocks):
                                +        """ Run processor. Must be overridden by subclasses. 
                                +        
                                +        When the parser determines the appropriate type of a block, the parser
                                +        will call the corresponding processor's ``run`` method. This method
                                +        should parse the individual lines of the block and append them to
                                +        the etree. 
                                +
                                +        Note that both the ``parent`` and ``etree`` keywords are pointers
                                +        to instances of the objects which should be edited in place. Each
                                +        processor must make changes to the existing objects as there is no
                                +        mechanism to return new/different objects to replace them.
                                +
                                +        This means that this method should be adding SubElements or adding text
                                +        to the parent, and should remove (``pop``) or add (``insert``) items to
                                +        the list of blocks.
                                +
                                +        Keywords:
                                +
                                +        * ``parent``: A etree element which is the parent of the current block.
                                +        * ``blocks``: A list of all remaining blocks of the document.
                                +        """
                                +        pass
                                +
                                +
                                +class ListIndentProcessor(BlockProcessor):
                                +    """ Process children of list items. 
                                +    
                                +    Example:
                                +        * a list item
                                +            process this part
                                +
                                +            or this part
                                +
                                +    """
                                +
                                +    INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
                                +    ITEM_TYPES = ['li']
                                +    LIST_TYPES = ['ul', 'ol']
                                +
                                +    def test(self, parent, block):
                                +        return block.startswith(' '*markdown.TAB_LENGTH) and \
                                +                not self.parser.state.isstate('detabbed') and  \
                                +                (parent.tag in self.ITEM_TYPES or \
                                +                    (len(parent) and parent[-1] and \
                                +                        (parent[-1].tag in self.LIST_TYPES)
                                +                    )
                                +                )
                                +
                                +    def run(self, parent, blocks):
                                +        block = blocks.pop(0)
                                +        level, sibling = self.get_level(parent, block)
                                +        block = self.looseDetab(block, level)
                                +
                                +        self.parser.state.set('detabbed')
                                +        if parent.tag in self.ITEM_TYPES:
                                +            # The parent is already a li. Just parse the child block.
                                +            self.parser.parseBlocks(parent, [block])
                                +        elif sibling.tag in self.ITEM_TYPES:
                                +            # The sibling is a li. Use it as parent.
                                +            self.parser.parseBlocks(sibling, [block])
                                +        elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
                                +            # The parent is a list (``ol`` or ``ul``) which has children.
                                +            # Assume the last child li is the parent of this block.
                                +            if sibling[-1].text:
                                +                # If the parent li has text, that text needs to be moved to a p
                                +                block = '%s\n\n%s' % (sibling[-1].text, block)
                                +                sibling[-1].text = ''
                                +            self.parser.parseChunk(sibling[-1], block)
                                +        else:
                                +            self.create_item(sibling, block)
                                +        self.parser.state.reset()
                                +
                                +    def create_item(self, parent, block):
                                +        """ Create a new li and parse the block with it as the parent. """
                                +        li = markdown.etree.SubElement(parent, 'li')
                                +        self.parser.parseBlocks(li, [block])
                                + 
                                +    def get_level(self, parent, block):
                                +        """ Get level of indent based on list level. """
                                +        # Get indent level
                                +        m = self.INDENT_RE.match(block)
                                +        if m:
                                +            indent_level = len(m.group(1))/markdown.TAB_LENGTH
                                +        else:
                                +            indent_level = 0
                                +        if self.parser.state.isstate('list'):
                                +            # We're in a tightlist - so we already are at correct parent.
                                +            level = 1
                                +        else:
                                +            # We're in a looselist - so we need to find parent.
                                +            level = 0
                                +        # Step through children of tree to find matching indent level.
                                +        while indent_level > level:
                                +            child = self.lastChild(parent)
                                +            if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
                                +                if child.tag in self.LIST_TYPES:
                                +                    level += 1
                                +                parent = child
                                +            else:
                                +                # No more child levels. If we're short of indent_level,
                                +                # we have a code block. So we stop here.
                                +                break
                                +        return level, parent
                                +
                                +
                                +class CodeBlockProcessor(BlockProcessor):
                                +    """ Process code blocks. """
                                +
                                +    def test(self, parent, block):
                                +        return block.startswith(' '*markdown.TAB_LENGTH)
                                +    
                                +    def run(self, parent, blocks):
                                +        sibling = self.lastChild(parent)
                                +        block = blocks.pop(0)
                                +        theRest = ''
                                +        if sibling and sibling.tag == "pre" and len(sibling) \
                                +                    and sibling[0].tag == "code":
                                +            # The previous block was a code block. As blank lines do not start
                                +            # new code blocks, append this block to the previous, adding back
                                +            # linebreaks removed from the split into a list.
                                +            code = sibling[0]
                                +            block, theRest = self.detab(block)
                                +            code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
                                +        else:
                                +            # This is a new codeblock. Create the elements and insert text.
                                +            pre = markdown.etree.SubElement(parent, 'pre')
                                +            code = markdown.etree.SubElement(pre, 'code')
                                +            block, theRest = self.detab(block)
                                +            code.text = markdown.AtomicString('%s\n' % block.rstrip())
                                +        if theRest:
                                +            # This block contained unindented line(s) after the first indented 
                                +            # line. Insert these lines as the first block of the master blocks
                                +            # list for future processing.
                                +            blocks.insert(0, theRest)
                                +
                                +
                                +class BlockQuoteProcessor(BlockProcessor):
                                +
                                +    RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
                                +
                                +    def test(self, parent, block):
                                +        return bool(self.RE.search(block))
                                +
                                +    def run(self, parent, blocks):
                                +        block = blocks.pop(0)
                                +        m = self.RE.search(block)
                                +        if m:
                                +            before = block[:m.start()] # Lines before blockquote
                                +            # Pass lines before blockquote in recursively for parsing forst.
                                +            self.parser.parseBlocks(parent, [before])
                                +            # Remove ``> `` from begining of each line.
                                +            block = '\n'.join([self.clean(line) for line in 
                                +                            block[m.start():].split('\n')])
                                +        sibling = self.lastChild(parent)
                                +        if sibling and sibling.tag == "blockquote":
                                +            # Previous block was a blockquote so set that as this blocks parent
                                +            quote = sibling
                                +        else:
                                +            # This is a new blockquote. Create a new parent element.
                                +            quote = markdown.etree.SubElement(parent, 'blockquote')
                                +        # Recursively parse block with blockquote as parent.
                                +        self.parser.parseChunk(quote, block)
                                +
                                +    def clean(self, line):
                                +        """ Remove ``>`` from beginning of a line. """
                                +        m = self.RE.match(line)
                                +        if line.strip() == ">":
                                +            return ""
                                +        elif m:
                                +            return m.group(2)
                                +        else:
                                +            return line
                                +
                                +class OListProcessor(BlockProcessor):
                                +    """ Process ordered list blocks. """
                                +
                                +    TAG = 'ol'
                                +    # Detect an item (``1. item``). ``group(1)`` contains contents of item.
                                +    RE = re.compile(r'^[ ]{0,3}\d+\.[ ](.*)')
                                +    # Detect items on secondary lines. they can be of either list type.
                                +    CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ](.*)')
                                +    # Detect indented (nested) items of either type
                                +    INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ].*')
                                +
                                +    def test(self, parent, block):
                                +        return bool(self.RE.match(block))
                                +
                                +    def run(self, parent, blocks):
                                +        # Check fr multiple items in one block.
                                +        items = self.get_items(blocks.pop(0))
                                +        sibling = self.lastChild(parent)
                                +        if sibling and sibling.tag in ['ol', 'ul']:
                                +            # Previous block was a list item, so set that as parent
                                +            lst = sibling
                                +            # make sure previous item is in a p.
                                +            if len(lst) and lst[-1].text and not len(lst[-1]):
                                +                p = markdown.etree.SubElement(lst[-1], 'p')
                                +                p.text = lst[-1].text
                                +                lst[-1].text = ''
                                +            # parse first block differently as it gets wrapped in a p.
                                +            li = markdown.etree.SubElement(lst, 'li')
                                +            self.parser.state.set('looselist')
                                +            firstitem = items.pop(0)
                                +            self.parser.parseBlocks(li, [firstitem])
                                +            self.parser.state.reset()
                                +        else:
                                +            # This is a new list so create parent with appropriate tag.
                                +            lst = markdown.etree.SubElement(parent, self.TAG)
                                +        self.parser.state.set('list')
                                +        # Loop through items in block, recursively parsing each with the
                                +        # appropriate parent.
                                +        for item in items:
                                +            if item.startswith(' '*markdown.TAB_LENGTH):
                                +                # Item is indented. Parse with last item as parent
                                +                self.parser.parseBlocks(lst[-1], [item])
                                +            else:
                                +                # New item. Create li and parse with it as parent
                                +                li = markdown.etree.SubElement(lst, 'li')
                                +                self.parser.parseBlocks(li, [item])
                                +        self.parser.state.reset()
                                +
                                +    def get_items(self, block):
                                +        """ Break a block into list items. """
                                +        items = []
                                +        for line in block.split('\n'):
                                +            m = self.CHILD_RE.match(line)
                                +            if m:
                                +                # This is a new item. Append
                                +                items.append(m.group(3))
                                +            elif self.INDENT_RE.match(line):
                                +                # This is an indented (possibly nested) item.
                                +                if items[-1].startswith(' '*markdown.TAB_LENGTH):
                                +                    # Previous item was indented. Append to that item.
                                +                    items[-1] = '%s\n%s' % (items[-1], line)
                                +                else:
                                +                    items.append(line)
                                +            else:
                                +                # This is another line of previous item. Append to that item.
                                +                items[-1] = '%s\n%s' % (items[-1], line)
                                +        return items
                                +
                                +
                                +class UListProcessor(OListProcessor):
                                +    """ Process unordered list blocks. """
                                +
                                +    TAG = 'ul'
                                +    RE = re.compile(r'^[ ]{0,3}[*+-][ ](.*)')
                                +
                                +
                                +class HashHeaderProcessor(BlockProcessor):
                                +    """ Process Hash Headers. """
                                +
                                +    # Detect a header at start of any line in block
                                +    RE = re.compile(r'(^|\n)(?P#{1,6})(?P
                                .*?)#*(\n|$)') + + def test(self, parent, block): + return bool(self.RE.search(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.search(block) + if m: + before = block[:m.start()] # All lines before header + after = block[m.end():] # All lines after header + if before: + # As the header was not the first line of the block and the + # lines before the header must be parsed first, + # recursively parse this lines as a block. + self.parser.parseBlocks(parent, [before]) + # Create header using named groups from RE + h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level'))) + h.text = m.group('header').strip() + if after: + # Insert remaining lines as first block for future parsing. + blocks.insert(0, after) + else: + # This should never happen, but just in case... + message(CRITICAL, "We've got a problem header!") + + +class SetextHeaderProcessor(BlockProcessor): + """ Process Setext-style Headers. """ + + # Detect Setext-style header. Must be first 2 lines of block. + RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE) + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + lines = blocks.pop(0).split('\n') + # Determine level. ``=`` is 1 and ``-`` is 2. + if lines[1].startswith('='): + level = 1 + else: + level = 2 + h = markdown.etree.SubElement(parent, 'h%d' % level) + h.text = lines[0].strip() + if len(lines) > 2: + # Block contains additional lines. Add to master blocks for later. + blocks.insert(0, '\n'.join(lines[2:])) + + +class HRProcessor(BlockProcessor): + """ Process Horizontal Rules. """ + + RE = r'[ ]{0,3}(?P[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*' + # Detect hr on any line of a block. + SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE) + # Match a hr on a single line of text. + MATCH_RE = re.compile(r'^%s$' % RE) + + def test(self, parent, block): + return bool(self.SEARCH_RE.search(block)) + + def run(self, parent, blocks): + lines = blocks.pop(0).split('\n') + prelines = [] + # Check for lines in block before hr. + for line in lines: + m = self.MATCH_RE.match(line) + if m: + break + else: + prelines.append(line) + if len(prelines): + # Recursively parse lines before hr so they get parsed first. + self.parser.parseBlocks(parent, ['\n'.join(prelines)]) + # create hr + hr = markdown.etree.SubElement(parent, 'hr') + # check for lines in block after hr. + lines = lines[len(prelines)+1:] + if len(lines): + # Add lines after hr to master blocks for later parsing. + blocks.insert(0, '\n'.join(lines)) + + +class EmptyBlockProcessor(BlockProcessor): + """ Process blocks and start with an empty line. """ + + # Detect a block that only contains whitespace + # or only whitespace on the first line. + RE = re.compile(r'^\s*\n') + + def test(self, parent, block): + return bool(self.RE.match(block)) + + def run(self, parent, blocks): + block = blocks.pop(0) + m = self.RE.match(block) + if m: + # Add remaining line to master blocks for later. + blocks.insert(0, block[m.end():]) + sibling = self.lastChild(parent) + if sibling and sibling.tag == 'pre' and sibling[0] and \ + sibling[0].tag == 'code': + # Last block is a codeblock. Append to preserve whitespace. + sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text ) + + +class ParagraphProcessor(BlockProcessor): + """ Process Paragraph blocks. """ + + def test(self, parent, block): + return True + + def run(self, parent, blocks): + block = blocks.pop(0) + if block.strip(): + # Not a blank block. Add to parent, otherwise throw it away. + if self.parser.state.isstate('list'): + # The parent is a tight-list. Append to parent.text + if parent.text: + parent.text = '%s\n%s' % (parent.text, block) + else: + parent.text = block.lstrip() + else: + # Create a regular paragraph + p = markdown.etree.SubElement(parent, 'p') + p.text = block.lstrip() diff --git a/vendor/tornado/website/markdown/commandline.py b/vendor/tornado/website/markdown/commandline.py new file mode 100644 index 000000000000..1eedc6dbb1b4 --- /dev/null +++ b/vendor/tornado/website/markdown/commandline.py @@ -0,0 +1,96 @@ +""" +COMMAND-LINE SPECIFIC STUFF +============================================================================= + +The rest of the code is specifically for handling the case where Python +Markdown is called from the command line. +""" + +import markdown +import sys +import logging +from logging import DEBUG, INFO, WARN, ERROR, CRITICAL + +EXECUTABLE_NAME_FOR_USAGE = "python markdown.py" +""" The name used in the usage statement displayed for python versions < 2.3. +(With python 2.3 and higher the usage statement is generated by optparse +and uses the actual name of the executable called.) """ + +OPTPARSE_WARNING = """ +Python 2.3 or higher required for advanced command line options. +For lower versions of Python use: + + %s INPUT_FILE > OUTPUT_FILE + +""" % EXECUTABLE_NAME_FOR_USAGE + +def parse_options(): + """ + Define and parse `optparse` options for command-line usage. + """ + + try: + optparse = __import__("optparse") + except: + if len(sys.argv) == 2: + return {'input': sys.argv[1], + 'output': None, + 'safe': False, + 'extensions': [], + 'encoding': None }, CRITICAL + else: + print OPTPARSE_WARNING + return None, None + + parser = optparse.OptionParser(usage="%prog INPUTFILE [options]") + parser.add_option("-f", "--file", dest="filename", default=sys.stdout, + help="write output to OUTPUT_FILE", + metavar="OUTPUT_FILE") + parser.add_option("-e", "--encoding", dest="encoding", + help="encoding for input and output files",) + parser.add_option("-q", "--quiet", default = CRITICAL, + action="store_const", const=CRITICAL+10, dest="verbose", + help="suppress all messages") + parser.add_option("-v", "--verbose", + action="store_const", const=INFO, dest="verbose", + help="print info messages") + parser.add_option("-s", "--safe", dest="safe", default=False, + metavar="SAFE_MODE", + help="safe mode ('replace', 'remove' or 'escape' user's HTML tag)") + parser.add_option("-o", "--output_format", dest="output_format", + default='xhtml1', metavar="OUTPUT_FORMAT", + help="Format of output. One of 'xhtml1' (default) or 'html4'.") + parser.add_option("--noisy", + action="store_const", const=DEBUG, dest="verbose", + help="print debug messages") + parser.add_option("-x", "--extension", action="append", dest="extensions", + help = "load extension EXTENSION", metavar="EXTENSION") + + (options, args) = parser.parse_args() + + if not len(args) == 1: + parser.print_help() + return None, None + else: + input_file = args[0] + + if not options.extensions: + options.extensions = [] + + return {'input': input_file, + 'output': options.filename, + 'safe_mode': options.safe, + 'extensions': options.extensions, + 'encoding': options.encoding, + 'output_format': options.output_format}, options.verbose + +def run(): + """Run Markdown from the command line.""" + + # Parse options and adjust logging level if necessary + options, logging_level = parse_options() + if not options: sys.exit(0) + if logging_level: logging.getLogger('MARKDOWN').setLevel(logging_level) + + # Run + markdown.markdownFromFile(**options) diff --git a/vendor/tornado/website/markdown/etree_loader.py b/vendor/tornado/website/markdown/etree_loader.py new file mode 100644 index 000000000000..e2599b2cb965 --- /dev/null +++ b/vendor/tornado/website/markdown/etree_loader.py @@ -0,0 +1,33 @@ + +from markdown import message, CRITICAL +import sys + +## Import +def importETree(): + """Import the best implementation of ElementTree, return a module object.""" + etree_in_c = None + try: # Is it Python 2.5+ with C implemenation of ElementTree installed? + import xml.etree.cElementTree as etree_in_c + except ImportError: + try: # Is it Python 2.5+ with Python implementation of ElementTree? + import xml.etree.ElementTree as etree + except ImportError: + try: # An earlier version of Python with cElementTree installed? + import cElementTree as etree_in_c + except ImportError: + try: # An earlier version of Python with Python ElementTree? + import elementtree.ElementTree as etree + except ImportError: + message(CRITICAL, "Failed to import ElementTree") + sys.exit(1) + if etree_in_c and etree_in_c.VERSION < "1.0": + message(CRITICAL, "For cElementTree version 1.0 or higher is required.") + sys.exit(1) + elif etree_in_c : + return etree_in_c + elif etree.VERSION < "1.1": + message(CRITICAL, "For ElementTree version 1.1 or higher is required") + sys.exit(1) + else : + return etree + diff --git a/vendor/tornado/website/markdown/extensions/__init__.py b/vendor/tornado/website/markdown/extensions/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/tornado/website/markdown/extensions/toc.py b/vendor/tornado/website/markdown/extensions/toc.py new file mode 100644 index 000000000000..3afaea04881b --- /dev/null +++ b/vendor/tornado/website/markdown/extensions/toc.py @@ -0,0 +1,140 @@ +""" +Table of Contents Extension for Python-Markdown +* * * + +(c) 2008 [Jack Miller](http://codezen.org) + +Dependencies: +* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/) + +""" +import markdown +from markdown import etree +import re + +class TocTreeprocessor(markdown.treeprocessors.Treeprocessor): + # Iterator wrapper to get parent and child all at once + def iterparent(self, root): + for parent in root.getiterator(): + for child in parent: + yield parent, child + + def run(self, doc): + div = etree.Element("div") + div.attrib["class"] = "toc" + last_li = None + + # Add title to the div + if self.config["title"][0]: + header = etree.SubElement(div, "span") + header.attrib["class"] = "toctitle" + header.text = self.config["title"][0] + + level = 0 + list_stack=[div] + header_rgx = re.compile("[Hh][123456]") + + # Get a list of id attributes + used_ids = [] + for c in doc.getiterator(): + if "id" in c.attrib: + used_ids.append(c.attrib["id"]) + + for (p, c) in self.iterparent(doc): + if not c.text: + continue + + # To keep the output from screwing up the + # validation by putting a
                                inside of a

                                + # we actually replace the

                                in its entirety. + # We do not allow the marker inside a header as that + # would causes an enless loop of placing a new TOC + # inside previously generated TOC. + + if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag): + for i in range(len(p)): + if p[i] == c: + p[i] = div + break + + if header_rgx.match(c.tag): + tag_level = int(c.tag[-1]) + + # Regardless of how many levels we jumped + # only one list should be created, since + # empty lists containing lists are illegal. + + if tag_level < level: + list_stack.pop() + level = tag_level + + if tag_level > level: + newlist = etree.Element("ul") + if last_li: + last_li.append(newlist) + else: + list_stack[-1].append(newlist) + list_stack.append(newlist) + level = tag_level + + # Do not override pre-existing ids + if not "id" in c.attrib: + id = self.config["slugify"][0](c.text) + if id in used_ids: + ctr = 1 + while "%s_%d" % (id, ctr) in used_ids: + ctr += 1 + id = "%s_%d" % (id, ctr) + used_ids.append(id) + c.attrib["id"] = id + else: + id = c.attrib["id"] + + # List item link, to be inserted into the toc div + last_li = etree.Element("li") + link = etree.SubElement(last_li, "a") + link.text = c.text + link.attrib["href"] = '#' + id + + if int(self.config["anchorlink"][0]): + anchor = etree.SubElement(c, "a") + anchor.text = c.text + anchor.attrib["href"] = "#" + id + anchor.attrib["class"] = "toclink" + c.text = "" + + list_stack[-1].append(last_li) + +class TocExtension(markdown.Extension): + def __init__(self, configs): + self.config = { "marker" : ["[TOC]", + "Text to find and replace with Table of Contents -" + "Defaults to \"[TOC]\""], + "slugify" : [self.slugify, + "Function to generate anchors based on header text-" + "Defaults to a built in slugify function."], + "title" : [None, + "Title to insert into TOC

                                - " + "Defaults to None"], + "anchorlink" : [0, + "1 if header should be a self link" + "Defaults to 0"]} + + for key, value in configs: + self.setConfig(key, value) + + # This is exactly the same as Django's slugify + def slugify(self, value): + """ Slugify a string, to make it URL friendly. """ + import unicodedata + value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') + value = unicode(re.sub('[^\w\s-]', '', value).strip().lower()) + return re.sub('[-\s]+','-',value) + + def extendMarkdown(self, md, md_globals): + tocext = TocTreeprocessor(md) + tocext.config = self.config + md.treeprocessors.add("toc", tocext, "_begin") + +def makeExtension(configs={}): + return TocExtension(configs=configs) diff --git a/vendor/tornado/website/markdown/html4.py b/vendor/tornado/website/markdown/html4.py new file mode 100644 index 000000000000..08f241d57aae --- /dev/null +++ b/vendor/tornado/website/markdown/html4.py @@ -0,0 +1,274 @@ +# markdown/html4.py +# +# Add html4 serialization to older versions of Elementree +# Taken from ElementTree 1.3 preview with slight modifications +# +# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved. +# +# fredrik@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The ElementTree toolkit is +# +# Copyright (c) 1999-2007 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + + +import markdown +ElementTree = markdown.etree.ElementTree +QName = markdown.etree.QName +Comment = markdown.etree.Comment +PI = markdown.etree.PI +ProcessingInstruction = markdown.etree.ProcessingInstruction + +HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", + "img", "input", "isindex", "link", "meta" "param") + +try: + HTML_EMPTY = set(HTML_EMPTY) +except NameError: + pass + +_namespace_map = { + # "well-known" namespace prefixes + "http://www.w3.org/XML/1998/namespace": "xml", + "http://www.w3.org/1999/xhtml": "html", + "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", + "http://schemas.xmlsoap.org/wsdl/": "wsdl", + # xml schema + "http://www.w3.org/2001/XMLSchema": "xs", + "http://www.w3.org/2001/XMLSchema-instance": "xsi", + # dublic core + "http://purl.org/dc/elements/1.1/": "dc", +} + + +def _raise_serialization_error(text): + raise TypeError( + "cannot serialize %r (type %s)" % (text, type(text).__name__) + ) + +def _encode(text, encoding): + try: + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_cdata(text, encoding): + # escape character data + try: + # it's worth avoiding do-nothing calls for strings that are + # shorter than 500 character, or so. assume that's, by far, + # the most common case in most applications. + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _escape_attrib(text, encoding): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if "<" in text: + text = text.replace("<", "<") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + if "\n" in text: + text = text.replace("\n", " ") + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + +def _escape_attrib_html(text, encoding): + # escape attribute value + try: + if "&" in text: + text = text.replace("&", "&") + if ">" in text: + text = text.replace(">", ">") + if "\"" in text: + text = text.replace("\"", """) + return text.encode(encoding, "xmlcharrefreplace") + except (TypeError, AttributeError): + _raise_serialization_error(text) + + +def _serialize_html(write, elem, encoding, qnames, namespaces): + tag = elem.tag + text = elem.text + if tag is Comment: + write("" % _escape_cdata(text, encoding)) + elif tag is ProcessingInstruction: + write("" % _escape_cdata(text, encoding)) + else: + tag = qnames[tag] + if tag is None: + if text: + write(_escape_cdata(text, encoding)) + for e in elem: + _serialize_html(write, e, encoding, qnames, None) + else: + write("<" + tag) + items = elem.items() + if items or namespaces: + items.sort() # lexical order + for k, v in items: + if isinstance(k, QName): + k = k.text + if isinstance(v, QName): + v = qnames[v.text] + else: + v = _escape_attrib_html(v, encoding) + # FIXME: handle boolean attributes + write(" %s=\"%s\"" % (qnames[k], v)) + if namespaces: + items = namespaces.items() + items.sort(key=lambda x: x[1]) # sort on prefix + for v, k in items: + if k: + k = ":" + k + write(" xmlns%s=\"%s\"" % ( + k.encode(encoding), + _escape_attrib(v, encoding) + )) + write(">") + tag = tag.lower() + if text: + if tag == "script" or tag == "style": + write(_encode(text, encoding)) + else: + write(_escape_cdata(text, encoding)) + for e in elem: + _serialize_html(write, e, encoding, qnames, None) + if tag not in HTML_EMPTY: + write("") + if elem.tail: + write(_escape_cdata(elem.tail, encoding)) + +def write_html(root, f, + # keyword arguments + encoding="us-ascii", + default_namespace=None): + assert root is not None + if not hasattr(f, "write"): + f = open(f, "wb") + write = f.write + if not encoding: + encoding = "us-ascii" + qnames, namespaces = _namespaces( + root, encoding, default_namespace + ) + _serialize_html( + write, root, encoding, qnames, namespaces + ) + +# -------------------------------------------------------------------- +# serialization support + +def _namespaces(elem, encoding, default_namespace=None): + # identify namespaces used in this tree + + # maps qnames to *encoded* prefix:local names + qnames = {None: None} + + # maps uri:s to prefixes + namespaces = {} + if default_namespace: + namespaces[default_namespace] = "" + + def encode(text): + return text.encode(encoding) + + def add_qname(qname): + # calculate serialized qname representation + try: + if qname[:1] == "{": + uri, tag = qname[1:].split("}", 1) + prefix = namespaces.get(uri) + if prefix is None: + prefix = _namespace_map.get(uri) + if prefix is None: + prefix = "ns%d" % len(namespaces) + if prefix != "xml": + namespaces[uri] = prefix + if prefix: + qnames[qname] = encode("%s:%s" % (prefix, tag)) + else: + qnames[qname] = encode(tag) # default element + else: + if default_namespace: + # FIXME: can this be handled in XML 1.0? + raise ValueError( + "cannot use non-qualified names with " + "default_namespace option" + ) + qnames[qname] = encode(qname) + except TypeError: + _raise_serialization_error(qname) + + # populate qname and namespaces table + try: + iterate = elem.iter + except AttributeError: + iterate = elem.getiterator # cET compatibility + for elem in iterate(): + tag = elem.tag + if isinstance(tag, QName) and tag.text not in qnames: + add_qname(tag.text) + elif isinstance(tag, basestring): + if tag not in qnames: + add_qname(tag) + elif tag is not None and tag is not Comment and tag is not PI: + _raise_serialization_error(tag) + for key, value in elem.items(): + if isinstance(key, QName): + key = key.text + if key not in qnames: + add_qname(key) + if isinstance(value, QName) and value.text not in qnames: + add_qname(value.text) + text = elem.text + if isinstance(text, QName) and text.text not in qnames: + add_qname(text.text) + return qnames, namespaces + +def to_html_string(element, encoding=None): + class dummy: + pass + data = [] + file = dummy() + file.write = data.append + write_html(ElementTree(element).getroot(),file,encoding) + return "".join(data) diff --git a/vendor/tornado/website/markdown/inlinepatterns.py b/vendor/tornado/website/markdown/inlinepatterns.py new file mode 100644 index 000000000000..89fa3b2ef41d --- /dev/null +++ b/vendor/tornado/website/markdown/inlinepatterns.py @@ -0,0 +1,371 @@ +""" +INLINE PATTERNS +============================================================================= + +Inline patterns such as *emphasis* are handled by means of auxiliary +objects, one per pattern. Pattern objects must be instances of classes +that extend markdown.Pattern. Each pattern object uses a single regular +expression and needs support the following methods: + + pattern.getCompiledRegExp() # returns a regular expression + + pattern.handleMatch(m) # takes a match object and returns + # an ElementTree element or just plain text + +All of python markdown's built-in patterns subclass from Pattern, +but you can add additional patterns that don't. + +Also note that all the regular expressions used by inline must +capture the whole block. For this reason, they all start with +'^(.*)' and end with '(.*)!'. In case with built-in expression +Pattern takes care of adding the "^(.*)" and "(.*)!". + +Finally, the order in which regular expressions are applied is very +important - e.g. if we first replace http://.../ links with tags +and _then_ try to replace inline html, we would end up with a mess. +So, we apply the expressions in the following order: + +* escape and backticks have to go before everything else, so + that we can preempt any markdown patterns by escaping them. + +* then we handle auto-links (must be done before inline html) + +* then we handle inline HTML. At this point we will simply + replace all inline HTML strings with a placeholder and add + the actual HTML to a hash. + +* then inline images (must be done before links) + +* then bracketed links, first regular then reference-style + +* finally we apply strong and emphasis +""" + +import markdown +import re +from urlparse import urlparse, urlunparse +import sys +if sys.version >= "3.0": + from html import entities as htmlentitydefs +else: + import htmlentitydefs + +""" +The actual regular expressions for patterns +----------------------------------------------------------------------------- +""" + +NOBRACKET = r'[^\]\[]*' +BRK = ( r'\[(' + + (NOBRACKET + r'(\[')*6 + + (NOBRACKET+ r'\])*')*6 + + NOBRACKET + r')\]' ) +NOIMG = r'(?|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)''' +# [text](url) or [text]() + +IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)' +# ![alttxt](http://x.com/) or ![alttxt]() +REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3] +IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2] +NOT_STRONG_RE = r'( \* )' # stand-alone * or _ +AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # +AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # + +HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...> +ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # & +LINE_BREAK_RE = r' \n' # two spaces at end of line +LINE_BREAK_2_RE = r' $' # two spaces at end of text + + +def dequote(string): + """Remove quotes from around a string.""" + if ( ( string.startswith('"') and string.endswith('"')) + or (string.startswith("'") and string.endswith("'")) ): + return string[1:-1] + else: + return string + +ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123} + +def handleAttributes(text, parent): + """Set values of an element based on attribute definitions ({@id=123}).""" + def attributeCallback(match): + parent.set(match.group(1), match.group(2).replace('\n', ' ')) + return ATTR_RE.sub(attributeCallback, text) + + +""" +The pattern classes +----------------------------------------------------------------------------- +""" + +class Pattern: + """Base class that inline patterns subclass. """ + + def __init__ (self, pattern, markdown_instance=None): + """ + Create an instant of an inline pattern. + + Keyword arguments: + + * pattern: A regular expression that matches a pattern + + """ + self.pattern = pattern + self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL) + + # Api for Markdown to pass safe_mode into instance + self.safe_mode = False + if markdown_instance: + self.markdown = markdown_instance + + def getCompiledRegExp (self): + """ Return a compiled regular expression. """ + return self.compiled_re + + def handleMatch(self, m): + """Return a ElementTree element from the given match. + + Subclasses should override this method. + + Keyword arguments: + + * m: A re match object containing a match of the pattern. + + """ + pass + + def type(self): + """ Return class name, to define pattern type """ + return self.__class__.__name__ + +BasePattern = Pattern # for backward compatibility + +class SimpleTextPattern (Pattern): + """ Return a simple text of group(2) of a Pattern. """ + def handleMatch(self, m): + text = m.group(2) + if text == markdown.INLINE_PLACEHOLDER_PREFIX: + return None + return text + +class SimpleTagPattern (Pattern): + """ + Return element of type `tag` with a text attribute of group(3) + of a Pattern. + + """ + def __init__ (self, pattern, tag): + Pattern.__init__(self, pattern) + self.tag = tag + + def handleMatch(self, m): + el = markdown.etree.Element(self.tag) + el.text = m.group(3) + return el + + +class SubstituteTagPattern (SimpleTagPattern): + """ Return a eLement of type `tag` with no children. """ + def handleMatch (self, m): + return markdown.etree.Element(self.tag) + + +class BacktickPattern (Pattern): + """ Return a `` element containing the matching text. """ + def __init__ (self, pattern): + Pattern.__init__(self, pattern) + self.tag = "code" + + def handleMatch(self, m): + el = markdown.etree.Element(self.tag) + el.text = markdown.AtomicString(m.group(3).strip()) + return el + + +class DoubleTagPattern (SimpleTagPattern): + """Return a ElementTree element nested in tag2 nested in tag1. + + Useful for strong emphasis etc. + + """ + def handleMatch(self, m): + tag1, tag2 = self.tag.split(",") + el1 = markdown.etree.Element(tag1) + el2 = markdown.etree.SubElement(el1, tag2) + el2.text = m.group(3) + return el1 + + +class HtmlPattern (Pattern): + """ Store raw inline html and return a placeholder. """ + def handleMatch (self, m): + rawhtml = m.group(2) + inline = True + place_holder = self.markdown.htmlStash.store(rawhtml) + return place_holder + + +class LinkPattern (Pattern): + """ Return a link element from the given match. """ + def handleMatch(self, m): + el = markdown.etree.Element("a") + el.text = m.group(2) + title = m.group(11) + href = m.group(9) + + if href: + if href[0] == "<": + href = href[1:-1] + el.set("href", self.sanitize_url(href.strip())) + else: + el.set("href", "") + + if title: + title = dequote(title) #.replace('"', """) + el.set("title", title) + return el + + def sanitize_url(self, url): + """ + Sanitize a url against xss attacks in "safe_mode". + + Rather than specifically blacklisting `javascript:alert("XSS")` and all + its aliases (see ), we whitelist known + safe url formats. Most urls contain a network location, however some + are known not to (i.e.: mailto links). Script urls do not contain a + location. Additionally, for `javascript:...`, the scheme would be + "javascript" but some aliases will appear to `urlparse()` to have no + scheme. On top of that relative links (i.e.: "foo/bar.html") have no + scheme. Therefore we must check "path", "parameters", "query" and + "fragment" for any literal colons. We don't check "scheme" for colons + because it *should* never have any and "netloc" must allow the form: + `username:password@host:port`. + + """ + locless_schemes = ['', 'mailto', 'news'] + scheme, netloc, path, params, query, fragment = url = urlparse(url) + safe_url = False + if netloc != '' or scheme in locless_schemes: + safe_url = True + + for part in url[2:]: + if ":" in part: + safe_url = False + + if self.markdown.safeMode and not safe_url: + return '' + else: + return urlunparse(url) + +class ImagePattern(LinkPattern): + """ Return a img element from the given match. """ + def handleMatch(self, m): + el = markdown.etree.Element("img") + src_parts = m.group(9).split() + if src_parts: + src = src_parts[0] + if src[0] == "<" and src[-1] == ">": + src = src[1:-1] + el.set('src', self.sanitize_url(src)) + else: + el.set('src', "") + if len(src_parts) > 1: + el.set('title', dequote(" ".join(src_parts[1:]))) + + if markdown.ENABLE_ATTRIBUTES: + truealt = handleAttributes(m.group(2), el) + else: + truealt = m.group(2) + + el.set('alt', truealt) + return el + +class ReferencePattern(LinkPattern): + """ Match to a stored reference and return link element. """ + def handleMatch(self, m): + if m.group(9): + id = m.group(9).lower() + else: + # if we got something like "[Google][]" + # we'll use "google" as the id + id = m.group(2).lower() + + if not id in self.markdown.references: # ignore undefined refs + return None + href, title = self.markdown.references[id] + + text = m.group(2) + return self.makeTag(href, title, text) + + def makeTag(self, href, title, text): + el = markdown.etree.Element('a') + + el.set('href', self.sanitize_url(href)) + if title: + el.set('title', title) + + el.text = text + return el + + +class ImageReferencePattern (ReferencePattern): + """ Match to a stored reference and return img element. """ + def makeTag(self, href, title, text): + el = markdown.etree.Element("img") + el.set("src", self.sanitize_url(href)) + if title: + el.set("title", title) + el.set("alt", text) + return el + + +class AutolinkPattern (Pattern): + """ Return a link Element given an autolink (``). """ + def handleMatch(self, m): + el = markdown.etree.Element("a") + el.set('href', m.group(2)) + el.text = markdown.AtomicString(m.group(2)) + return el + +class AutomailPattern (Pattern): + """ + Return a mailto link Element given an automail link (``). + """ + def handleMatch(self, m): + el = markdown.etree.Element('a') + email = m.group(2) + if email.startswith("mailto:"): + email = email[len("mailto:"):] + + def codepoint2name(code): + """Return entity definition by code, or the code if not defined.""" + entity = htmlentitydefs.codepoint2name.get(code) + if entity: + return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity) + else: + return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code) + + letters = [codepoint2name(ord(letter)) for letter in email] + el.text = markdown.AtomicString(''.join(letters)) + + mailto = "mailto:" + email + mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' % + ord(letter) for letter in mailto]) + el.set('href', mailto) + return el + diff --git a/vendor/tornado/website/markdown/odict.py b/vendor/tornado/website/markdown/odict.py new file mode 100644 index 000000000000..bf3ef0718255 --- /dev/null +++ b/vendor/tornado/website/markdown/odict.py @@ -0,0 +1,162 @@ +class OrderedDict(dict): + """ + A dictionary that keeps its keys in the order in which they're inserted. + + Copied from Django's SortedDict with some modifications. + + """ + def __new__(cls, *args, **kwargs): + instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs) + instance.keyOrder = [] + return instance + + def __init__(self, data=None): + if data is None: + data = {} + super(OrderedDict, self).__init__(data) + if isinstance(data, dict): + self.keyOrder = data.keys() + else: + self.keyOrder = [] + for key, value in data: + if key not in self.keyOrder: + self.keyOrder.append(key) + + def __deepcopy__(self, memo): + from copy import deepcopy + return self.__class__([(key, deepcopy(value, memo)) + for key, value in self.iteritems()]) + + def __setitem__(self, key, value): + super(OrderedDict, self).__setitem__(key, value) + if key not in self.keyOrder: + self.keyOrder.append(key) + + def __delitem__(self, key): + super(OrderedDict, self).__delitem__(key) + self.keyOrder.remove(key) + + def __iter__(self): + for k in self.keyOrder: + yield k + + def pop(self, k, *args): + result = super(OrderedDict, self).pop(k, *args) + try: + self.keyOrder.remove(k) + except ValueError: + # Key wasn't in the dictionary in the first place. No problem. + pass + return result + + def popitem(self): + result = super(OrderedDict, self).popitem() + self.keyOrder.remove(result[0]) + return result + + def items(self): + return zip(self.keyOrder, self.values()) + + def iteritems(self): + for key in self.keyOrder: + yield key, super(OrderedDict, self).__getitem__(key) + + def keys(self): + return self.keyOrder[:] + + def iterkeys(self): + return iter(self.keyOrder) + + def values(self): + return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder] + + def itervalues(self): + for key in self.keyOrder: + yield super(OrderedDict, self).__getitem__(key) + + def update(self, dict_): + for k, v in dict_.items(): + self.__setitem__(k, v) + + def setdefault(self, key, default): + if key not in self.keyOrder: + self.keyOrder.append(key) + return super(OrderedDict, self).setdefault(key, default) + + def value_for_index(self, index): + """Return the value of the item at the given zero-based index.""" + return self[self.keyOrder[index]] + + def insert(self, index, key, value): + """Insert the key, value pair before the item with the given index.""" + if key in self.keyOrder: + n = self.keyOrder.index(key) + del self.keyOrder[n] + if n < index: + index -= 1 + self.keyOrder.insert(index, key) + super(OrderedDict, self).__setitem__(key, value) + + def copy(self): + """Return a copy of this object.""" + # This way of initializing the copy means it works for subclasses, too. + obj = self.__class__(self) + obj.keyOrder = self.keyOrder[:] + return obj + + def __repr__(self): + """ + Replace the normal dict.__repr__ with a version that returns the keys + in their sorted order. + """ + return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()]) + + def clear(self): + super(OrderedDict, self).clear() + self.keyOrder = [] + + def index(self, key): + """ Return the index of a given key. """ + return self.keyOrder.index(key) + + def index_for_location(self, location): + """ Return index or None for a given location. """ + if location == '_begin': + i = 0 + elif location == '_end': + i = None + elif location.startswith('<') or location.startswith('>'): + i = self.index(location[1:]) + if location.startswith('>'): + if i >= len(self): + # last item + i = None + else: + i += 1 + else: + raise ValueError('Not a valid location: "%s". Location key ' + 'must start with a ">" or "<".' % location) + return i + + def add(self, key, value, location): + """ Insert by key location. """ + i = self.index_for_location(location) + if i is not None: + self.insert(i, key, value) + else: + self.__setitem__(key, value) + + def link(self, key, location): + """ Change location of an existing item. """ + n = self.keyOrder.index(key) + del self.keyOrder[n] + i = self.index_for_location(location) + try: + if i is not None: + self.keyOrder.insert(i, key) + else: + self.keyOrder.append(key) + except Error: + # restore to prevent data loss and reraise + self.keyOrder.insert(n, key) + raise Error diff --git a/vendor/tornado/website/markdown/postprocessors.py b/vendor/tornado/website/markdown/postprocessors.py new file mode 100644 index 000000000000..80227bb9097e --- /dev/null +++ b/vendor/tornado/website/markdown/postprocessors.py @@ -0,0 +1,77 @@ +""" +POST-PROCESSORS +============================================================================= + +Markdown also allows post-processors, which are similar to preprocessors in +that they need to implement a "run" method. However, they are run after core +processing. + +""" + + +import markdown + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Postprocessor(Processor): + """ + Postprocessors are run after the ElementTree it converted back into text. + + Each Postprocessor implements a "run" method that takes a pointer to a + text string, modifies it as necessary and returns a text string. + + Postprocessors must extend markdown.Postprocessor. + + """ + + def run(self, text): + """ + Subclasses of Postprocessor should implement a `run` method, which + takes the html document as a single text string and returns a + (possibly modified) string. + + """ + pass + + +class RawHtmlPostprocessor(Postprocessor): + """ Restore raw html to the document. """ + + def run(self, text): + """ Iterate over html stash and restore "safe" html. """ + for i in range(self.markdown.htmlStash.html_counter): + html, safe = self.markdown.htmlStash.rawHtmlBlocks[i] + if self.markdown.safeMode and not safe: + if str(self.markdown.safeMode).lower() == 'escape': + html = self.escape(html) + elif str(self.markdown.safeMode).lower() == 'remove': + html = '' + else: + html = markdown.HTML_REMOVED_TEXT + if safe or not self.markdown.safeMode: + text = text.replace("

                                %s

                                " % + (markdown.preprocessors.HTML_PLACEHOLDER % i), + html + "\n") + text = text.replace(markdown.preprocessors.HTML_PLACEHOLDER % i, + html) + return text + + def escape(self, html): + """ Basic html escaping """ + html = html.replace('&', '&') + html = html.replace('<', '<') + html = html.replace('>', '>') + return html.replace('"', '"') + + +class AndSubstitutePostprocessor(Postprocessor): + """ Restore valid entities """ + def __init__(self): + pass + + def run(self, text): + text = text.replace(markdown.AMP_SUBSTITUTE, "&") + return text diff --git a/vendor/tornado/website/markdown/preprocessors.py b/vendor/tornado/website/markdown/preprocessors.py new file mode 100644 index 000000000000..712a1e8755d1 --- /dev/null +++ b/vendor/tornado/website/markdown/preprocessors.py @@ -0,0 +1,214 @@ + +""" +PRE-PROCESSORS +============================================================================= + +Preprocessors work on source text before we start doing anything too +complicated. +""" + +import re +import markdown + +HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:" +HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Preprocessor (Processor): + """ + Preprocessors are run after the text is broken into lines. + + Each preprocessor implements a "run" method that takes a pointer to a + list of lines of the document, modifies it as necessary and returns + either the same pointer or a pointer to a new list. + + Preprocessors must extend markdown.Preprocessor. + + """ + def run(self, lines): + """ + Each subclass of Preprocessor should override the `run` method, which + takes the document as a list of strings split by newlines and returns + the (possibly modified) list of lines. + + """ + pass + +class HtmlStash: + """ + This class is used for stashing HTML objects that we extract + in the beginning and replace with place-holders. + """ + + def __init__ (self): + """ Create a HtmlStash. """ + self.html_counter = 0 # for counting inline html segments + self.rawHtmlBlocks=[] + + def store(self, html, safe=False): + """ + Saves an HTML segment for later reinsertion. Returns a + placeholder string that needs to be inserted into the + document. + + Keyword arguments: + + * html: an html segment + * safe: label an html segment as safe for safemode + + Returns : a placeholder string + + """ + self.rawHtmlBlocks.append((html, safe)) + placeholder = HTML_PLACEHOLDER % self.html_counter + self.html_counter += 1 + return placeholder + + def reset(self): + self.html_counter = 0 + self.rawHtmlBlocks = [] + + +class HtmlBlockPreprocessor(Preprocessor): + """Remove html blocks from the text and store them for later retrieval.""" + + right_tag_patterns = ["", "%s>"] + + def _get_left_tag(self, block): + return block[1:].replace(">", " ", 1).split()[0].lower() + + def _get_right_tag(self, left_tag, block): + for p in self.right_tag_patterns: + tag = p % left_tag + i = block.rfind(tag) + if i > 2: + return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag) + return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block) + + def _equal_tags(self, left_tag, right_tag): + if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc. + return True + if ("/" + left_tag) == right_tag: + return True + if (right_tag == "--" and left_tag == "--"): + return True + elif left_tag == right_tag[1:] \ + and right_tag[0] != "<": + return True + else: + return False + + def _is_oneliner(self, tag): + return (tag in ['hr', 'hr/']) + + def run(self, lines): + text = "\n".join(lines) + new_blocks = [] + text = text.split("\n\n") + items = [] + left_tag = '' + right_tag = '' + in_tag = False # flag + + while text: + block = text[0] + if block.startswith("\n"): + block = block[1:] + text = text[1:] + + if block.startswith("\n"): + block = block[1:] + + if not in_tag: + if block.startswith("<"): + left_tag = self._get_left_tag(block) + right_tag, data_index = self._get_right_tag(left_tag, block) + + if data_index < len(block): + text.insert(0, block[data_index:]) + block = block[:data_index] + + if not (markdown.isBlockLevel(left_tag) \ + or block[1] in ["!", "?", "@", "%"]): + new_blocks.append(block) + continue + + if self._is_oneliner(left_tag): + new_blocks.append(block.strip()) + continue + + if block[1] == "!": + # is a comment block + left_tag = "--" + right_tag, data_index = self._get_right_tag(left_tag, block) + # keep checking conditions below and maybe just append + + if block.rstrip().endswith(">") \ + and self._equal_tags(left_tag, right_tag): + new_blocks.append( + self.markdown.htmlStash.store(block.strip())) + continue + else: #if not block[1] == "!": + # if is block level tag and is not complete + + if markdown.isBlockLevel(left_tag) or left_tag == "--" \ + and not block.rstrip().endswith(">"): + items.append(block.strip()) + in_tag = True + else: + new_blocks.append( + self.markdown.htmlStash.store(block.strip())) + + continue + + new_blocks.append(block) + + else: + items.append(block.strip()) + + right_tag, data_index = self._get_right_tag(left_tag, block) + + if self._equal_tags(left_tag, right_tag): + # if find closing tag + in_tag = False + new_blocks.append( + self.markdown.htmlStash.store('\n\n'.join(items))) + items = [] + + if items: + new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items))) + new_blocks.append('\n') + + new_text = "\n\n".join(new_blocks) + return new_text.split("\n") + + +class ReferencePreprocessor(Preprocessor): + """ Remove reference definitions from text and store for later use. """ + + RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL) + + def run (self, lines): + new_text = []; + for line in lines: + m = self.RE.match(line) + if m: + id = m.group(2).strip().lower() + t = m.group(4).strip() # potential title + if not t: + self.markdown.references[id] = (m.group(3), t) + elif (len(t) >= 2 + and (t[0] == t[-1] == "\"" + or t[0] == t[-1] == "\'" + or (t[0] == "(" and t[-1] == ")") ) ): + self.markdown.references[id] = (m.group(3), t[1:-1]) + else: + new_text.append(line) + else: + new_text.append(line) + + return new_text #+ "\n" diff --git a/vendor/tornado/website/markdown/treeprocessors.py b/vendor/tornado/website/markdown/treeprocessors.py new file mode 100644 index 000000000000..1dc612a95e85 --- /dev/null +++ b/vendor/tornado/website/markdown/treeprocessors.py @@ -0,0 +1,329 @@ +import markdown +import re + +def isString(s): + """ Check if it's string """ + return isinstance(s, unicode) or isinstance(s, str) + +class Processor: + def __init__(self, markdown_instance=None): + if markdown_instance: + self.markdown = markdown_instance + +class Treeprocessor(Processor): + """ + Treeprocessors are run on the ElementTree object before serialization. + + Each Treeprocessor implements a "run" method that takes a pointer to an + ElementTree, modifies it as necessary and returns an ElementTree + object. + + Treeprocessors must extend markdown.Treeprocessor. + + """ + def run(self, root): + """ + Subclasses of Treeprocessor should implement a `run` method, which + takes a root ElementTree. This method can return another ElementTree + object, and the existing root ElementTree will be replaced, or it can + modify the current tree and return None. + """ + pass + + +class InlineProcessor(Treeprocessor): + """ + A Treeprocessor that traverses a tree, applying inline patterns. + """ + + def __init__ (self, md): + self.__placeholder_prefix = markdown.INLINE_PLACEHOLDER_PREFIX + self.__placeholder_suffix = markdown.ETX + self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ + + len(self.__placeholder_suffix) + self.__placeholder_re = re.compile(markdown.INLINE_PLACEHOLDER % r'([0-9]{4})') + self.markdown = md + + def __makePlaceholder(self, type): + """ Generate a placeholder """ + id = "%04d" % len(self.stashed_nodes) + hash = markdown.INLINE_PLACEHOLDER % id + return hash, id + + def __findPlaceholder(self, data, index): + """ + Extract id from data string, start from index + + Keyword arguments: + + * data: string + * index: index, from which we start search + + Returns: placeholder id and string index, after the found placeholder. + """ + + m = self.__placeholder_re.search(data, index) + if m: + return m.group(1), m.end() + else: + return None, index + 1 + + def __stashNode(self, node, type): + """ Add node to stash """ + placeholder, id = self.__makePlaceholder(type) + self.stashed_nodes[id] = node + return placeholder + + def __handleInline(self, data, patternIndex=0): + """ + Process string with inline patterns and replace it + with placeholders + + Keyword arguments: + + * data: A line of Markdown text + * patternIndex: The index of the inlinePattern to start with + + Returns: String with placeholders. + + """ + if not isinstance(data, markdown.AtomicString): + startIndex = 0 + while patternIndex < len(self.markdown.inlinePatterns): + data, matched, startIndex = self.__applyPattern( + self.markdown.inlinePatterns.value_for_index(patternIndex), + data, patternIndex, startIndex) + if not matched: + patternIndex += 1 + return data + + def __processElementText(self, node, subnode, isText=True): + """ + Process placeholders in Element.text or Element.tail + of Elements popped from self.stashed_nodes. + + Keywords arguments: + + * node: parent node + * subnode: processing node + * isText: bool variable, True - it's text, False - it's tail + + Returns: None + + """ + if isText: + text = subnode.text + subnode.text = None + else: + text = subnode.tail + subnode.tail = None + + childResult = self.__processPlaceholders(text, subnode) + + if not isText and node is not subnode: + pos = node.getchildren().index(subnode) + node.remove(subnode) + else: + pos = 0 + + childResult.reverse() + for newChild in childResult: + node.insert(pos, newChild) + + def __processPlaceholders(self, data, parent): + """ + Process string with placeholders and generate ElementTree tree. + + Keyword arguments: + + * data: string with placeholders instead of ElementTree elements. + * parent: Element, which contains processing inline data + + Returns: list with ElementTree elements with applied inline patterns. + """ + def linkText(text): + if text: + if result: + if result[-1].tail: + result[-1].tail += text + else: + result[-1].tail = text + else: + if parent.text: + parent.text += text + else: + parent.text = text + + result = [] + strartIndex = 0 + while data: + index = data.find(self.__placeholder_prefix, strartIndex) + if index != -1: + id, phEndIndex = self.__findPlaceholder(data, index) + + if id in self.stashed_nodes: + node = self.stashed_nodes.get(id) + + if index > 0: + text = data[strartIndex:index] + linkText(text) + + if not isString(node): # it's Element + for child in [node] + node.getchildren(): + if child.tail: + if child.tail.strip(): + self.__processElementText(node, child, False) + if child.text: + if child.text.strip(): + self.__processElementText(child, child) + else: # it's just a string + linkText(node) + strartIndex = phEndIndex + continue + + strartIndex = phEndIndex + result.append(node) + + else: # wrong placeholder + end = index + len(prefix) + linkText(data[strartIndex:end]) + strartIndex = end + else: + text = data[strartIndex:] + linkText(text) + data = "" + + return result + + def __applyPattern(self, pattern, data, patternIndex, startIndex=0): + """ + Check if the line fits the pattern, create the necessary + elements, add it to stashed_nodes. + + Keyword arguments: + + * data: the text to be processed + * pattern: the pattern to be checked + * patternIndex: index of current pattern + * startIndex: string index, from which we starting search + + Returns: String with placeholders instead of ElementTree elements. + + """ + match = pattern.getCompiledRegExp().match(data[startIndex:]) + leftData = data[:startIndex] + + if not match: + return data, False, 0 + + node = pattern.handleMatch(match) + + if node is None: + return data, True, len(leftData) + match.span(len(match.groups()))[0] + + if not isString(node): + if not isinstance(node.text, markdown.AtomicString): + # We need to process current node too + for child in [node] + node.getchildren(): + if not isString(node): + if child.text: + child.text = self.__handleInline(child.text, + patternIndex + 1) + if child.tail: + child.tail = self.__handleInline(child.tail, + patternIndex) + + placeholder = self.__stashNode(node, pattern.type()) + + return "%s%s%s%s" % (leftData, + match.group(1), + placeholder, match.groups()[-1]), True, 0 + + def run(self, tree): + """Apply inline patterns to a parsed Markdown tree. + + Iterate over ElementTree, find elements with inline tag, apply inline + patterns and append newly created Elements to tree. If you don't + want process your data with inline paterns, instead of normal string, + use subclass AtomicString: + + node.text = markdown.AtomicString("data won't be processed with inline patterns") + + Arguments: + + * markdownTree: ElementTree object, representing Markdown tree. + + Returns: ElementTree object with applied inline patterns. + + """ + self.stashed_nodes = {} + + stack = [tree] + + while stack: + currElement = stack.pop() + insertQueue = [] + for child in currElement.getchildren(): + if child.text and not isinstance(child.text, markdown.AtomicString): + text = child.text + child.text = None + lst = self.__processPlaceholders(self.__handleInline( + text), child) + stack += lst + insertQueue.append((child, lst)) + + if child.getchildren(): + stack.append(child) + + for element, lst in insertQueue: + if element.text: + element.text = \ + markdown.inlinepatterns.handleAttributes(element.text, + element) + i = 0 + for newChild in lst: + # Processing attributes + if newChild.tail: + newChild.tail = \ + markdown.inlinepatterns.handleAttributes(newChild.tail, + element) + if newChild.text: + newChild.text = \ + markdown.inlinepatterns.handleAttributes(newChild.text, + newChild) + element.insert(i, newChild) + i += 1 + return tree + + +class PrettifyTreeprocessor(Treeprocessor): + """ Add linebreaks to the html document. """ + + def _prettifyETree(self, elem): + """ Recursively add linebreaks to ElementTree children. """ + + i = "\n" + if markdown.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']: + if (not elem.text or not elem.text.strip()) \ + and len(elem) and markdown.isBlockLevel(elem[0].tag): + elem.text = i + for e in elem: + if markdown.isBlockLevel(e.tag): + self._prettifyETree(e) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + if not elem.tail or not elem.tail.strip(): + elem.tail = i + + def run(self, root): + """ Add linebreaks to ElementTree root object. """ + + self._prettifyETree(root) + # Do
                                's seperately as they are often in the middle of + # inline content and missed by _prettifyETree. + brs = root.getiterator('br') + for br in brs: + if not br.tail or not br.tail.strip(): + br.tail = '\n' + else: + br.tail = '\n%s' % br.tail diff --git a/vendor/tornado/website/static/base.css b/vendor/tornado/website/static/base.css new file mode 100644 index 000000000000..543d6f24c323 --- /dev/null +++ b/vendor/tornado/website/static/base.css @@ -0,0 +1,120 @@ +body { + background: white; + color: black; + font-family: Georgia, serif; + font-size: 11pt; + margin: 10px; + margin-top: 15px; + margin-bottom: 15px; +} + +h1, +h2, +h3, +h4 { + font-family: Calibri, sans-serif; + margin: 0; +} + +img { + border: 0; +} + +pre, +code { + color: #060; +} + +a, +a code { + color: #216093; +} + +table { + border-collapse: collapse; + border: 0; +} + +td { + border: 0; + padding: 0; +} + +#body { + margin: auto; + max-width: 850px; +} + +#header { + margin-bottom: 15px; + margin-right: 30px; +} + +#content, +#footer { + margin-left: 31px; + margin-right: 31px; +} + +#content p, +#content li, +#footer { + line-height: 16pt; +} + +#content pre { + line-height: 14pt; + margin: 17pt; + padding-left: 1em; + border-left: 1px solid #ccc; +} + +#footer { + margin-top: 5em; +} + +#header .logo { + line-height: 0; + padding-bottom: 5px; + padding-right: 15px; +} + +#header .logo img { + width: 286px; + height: 72px; +} + +#header .title { + vertical-align: bottom; +} + +#header .title h1 { + font-size: 35px; + font-weight: normal; +} + +#header .title h1, +#header .title h1 a { + color: #666; +} + +#content h1, +#content h2, +#content h3 { + color: #4d8cbf; + margin-bottom: 2pt; + margin-top: 17pt; +} + +#content h2 { + font-size: 19pt; +} + +#content h3 { + font-size: 15pt; +} + +#content p { + margin: 0; + margin-bottom: 1em; +} diff --git a/vendor/tornado/website/static/facebook.png b/vendor/tornado/website/static/facebook.png new file mode 100755 index 0000000000000000000000000000000000000000..47738323ed50f47074a68991256acefe17ad9030 GIT binary patch literal 7457 zcmV++9p2)JP)zf1J>zkraL3o(eXDNO`M&dg zzwh@gWnQ~*39j6%;RkQkaCGb-bpU(Q+D_+9YK(=AwItL&HkmVVIB+n`5|}R=C`u1` zj>ktScpo823@nx&j&7##@(-twSOYErc;xI5?#FjgHDJL2K!g}0|Fnt4-xDzSaR?FL z0h%U2RW~r3m#~i4-r5}CXA$aJKHr}%T%IQRu_J4vvTn}I7u7L*Z|+UXqj~O*l~r?Q zHm~lQ%&K*IU*BDfaJ{A|n}u_Biuc(c4P0^0>GAH9=THB+qnqn{yg#ddy{!Bv=Y@Z? zZsI0@kBh1)ajjEp4Jx6T4l?^g6!r%w-z5}$kKX}i`27N9s8I%wzt?zvND};}feb%O z=yR0Xs16L>)8hY4f?5DTRfQymI7a21Kt*iC5H|N`<*D6Cd3rjp&Mb>&3xyl*P8P=} z)8fRUXx8Yn`La%NGt9DVU|uw=KvGzU0Tl}JV)^iYqS1kOKr{I77oVI76J z@6z?<7ng;9GQ`MhG)BHCy9MnZ{2T2y?J%C>6KiWUloEw0@OXuvdsM{CC8k0J_}xi; zBgEea2_WertTqfj0f?qRts z*iu9)6%1e2%KGTS^iJ@fb5YLY7yWMKT>J>vIb9oO>+YHVxcJq15uYkcJEV;u!Q@MF z6S{$E<(L1l9&jyW%t?$*g69nIvrXPy77)L!dW*5u8|_NO?*$P6KbuacaP8VQ-y^ZO z;50))oL;}da{?;K;P-IXyhm4S)V0vGbVp@v23P6N$Z?Uj+w|Bf5dUcHLDkXifd1$flo&YlGW^}txlTFs-N?R= z*Cii8AxZEM8x$NDiKa!T22xtAQ7BZLiLyD7&8d^9uGGI`Yr13}Ru>NZcW29ZsP-Xy z=gMTm4RSYY575Rqm$J|VU~~c1Ap;#Lhjy3&E2;|QKUGxhDeU0ybUuBru4}xO2vC#- zltsxF5y(Su$I(4dc*_g+_%|t}s_`6D4&5g)HDBiz{feqo$V87Pvh%GDfQ3RU1DGwU z(KF9o_*1vFmb|zq!XrMQX`$4#^UEqio|3Nw-X#Pd_CGOiQbNzPIx|Bh5ytS($h#I< zu*d>8UcO(KOK4CDtW>rr5+?AX61q0}Dd~sIofJ|x1)g8wTEt3Lm~^{WSnW6oOG-7a zQ1l<9VZGoV3V~4_;#jrPE9~FBS!CIfI zahFQm(HPKINpSobf&h={fvFEQ@3y!^WS_RGR8pYF?==mYJGuc~t3u%oA$q)ZzsY?F zaoez(IwSW2##{WCcDuiFTTwZE1;hHpEsH7}j1xgmWB5jG#2~ZB<1ox~$Oi*8A-P#y zlYj(XvVf{8SqXHQ1Z33q6*yLy?N-gg6ADupv{nHR{XaBp?uL%X_8t{20-y(IbS`Za z9Y++0qgw+QBo>Dbl~&OdY-KK{g6c=!XS;PlCL807@47XIuD&%nPw{|1Z(sY+0($tSmZ zOJoJS-VrlL?zKIjtA1t)^8hCr)^#e9K&SwohM*hdLCi(-N1U$P=da9VVV zA&tTn3Ff_F;hYNRqlC1&X}UN`;FqYSG-M5pgfy}wa;e83IR(G)^uzEoA3g_npFVmU z05tHak3R%o`_`N4@1$e`TM7?@9v@gmpkeN|-L;$ZS(2cY8XWunE)dI*R9slf~06;y)rz6i-+gK|)=P*CGotcoPyfQ99ZlC#6ta z9}P12{O>&tzxebccpZ0J=#%IH@Z^Wi!l`5Hu(LO3Ko?`iYXO07c0gSu+r}DN0Q&I2 zS1o9W?~$M3Ci6uJkjkoHMQ;yNJ+w^&oh}qNb%B~CGRH5mButsJ_ z#3nK$lU#r>h^(jfcpooTv@sgOum1d_@XMb%4|jSQjrZgA95G>xpgj|gqL2p&s9>Bj zL5a;75{*HYylK}**_Ket2z|)(>VqP%;or#ug2r}<=>&WQF4nPC5QmUmZcy;Zl(SKC zLpDHwNY`yp$ixxrbiBkC6WC%J33bHmp7`Jy`0Qt%g!g}mxCGclpN&Q-!veRgHmm@U zB0!c@TjTtKMT=ffkezk3vi%Chae@%0sg?yHPENZ_;fanf7Ynw6EF197*Gs#hpe%z913d*nttXo7Ty|RQeyf-cIdk&j;z7we)FS%Ss(mpua=`gj{(xv+^xzY z1*f(c=IJg){dK2k-am(Y*LV;RG-B4o0R)u;gA<*X<`*75^XbRn(Fab!kN&c7>FNab_V;kZf|@(g`BbAoKO9m* zXG=_HE??V046^8s z2^U#{$q%K%2Ng#w@X-g3F9^IX32{4M;hiM_Gl?WZEsioT7b;v%RxD@M&mT$UKE3d{uvDO>)p5k+~r>@Wc#sg;Bch>Vs=xTXz)xt&2PGH(&lXt}8PC5cD*+rMtISfTrbH zp12P^deWi-iYkPxE!C|YzzT8~%LM}<3(NI^$b|_~PXFTK_Zre@K}r}HBQW%l&r6Zc z_qHVI3GZ2PM>p5tB(k|Xy}tYMMVK!NPKM-Cwu@rqo@}1|V#sY+113}($OUq{KV57u z7c*WcJ5Q9@+*&}5W?mF?Sk_D2eF8Gul~uIHE8^q%6yhQXe*gx|hErsUCX4}5?m=tQ z;G$F&w1Q;O0tN_B*&P+|%A4BG`^&1wV5$=u`kBX*5VmMMNL25%}myOw|6;iDOB$kUH zjfXDu86!ABr+@n658iS2uic!&b>#X~qk)jN-OR7g;)Urm(Z{CF4)B>Uw zMI&bCm|r3xaI=|GMEsIN4d&?p7F6D1)kQ6|q5wkM(A=W}rm#UW0ZNQKS`B?hJYUQ| zbm&e1O2SKG6}iYs${G=Q!Aq2wk=}J`<9+>qXLk;RJcUif3YU215mdODEh_%m)+x;a z|BrAe(;*Hss2acKZtg9dL4l(tOXORLh87=#hB@&p!z9D9xf0-Ui4lMLC{q|`68!C#pNBkis&sp(NZOYbc2vPP&^ET285fO zIXQ+TxixqB+;4pXzWv>|;q?obQ3;7grwE6c5Gm~L6Kj`$_9J)y!($JgxbBLoSx0Oa zoH9~rwY*p>1rSz%Vk^g6qQ@8vlgQ_IGC|}B1c0#@e2lgNU1GbG7r4M8{`uR#@-cYg z{JlSNu17+X>04tCBtq>g$74VD(+|L_Z(PKK)MZL|r&mC^>V0f&&pGX1BR^6(|F-QLL*1Okv=AiR8 zVusLkEirOSi=nsi&Y|$56xU^)4clHjEbQmjbe+|PPmMMk3*t<|0ARoQ*Rc?0Dcd;2qZ_M6{>^|b*U*;+?X zlyEqmFX8UHj>Bk>-&WA?zxppWKOR@K!28q@)CM z2@0WT4vYb`h_8J0KXFqe_ETDWWOE(fy?h-$_SD1hN5B7Tw-tUDmG&2Z{m<~N|9*qP zxOGz6v^>ud7+qH&ZMToCA#JXsp^hYnSruqEDXzI_GaV~QI-jEapF$ng?+qm{#&#rz z{}D*mo=auWCNkFb3?Wse1u1@Dq;PmMZ!qp+1jsS|P9_UcEz)qm8DKgqFwvRujL&<$ z7ISLQc($61x=Ln+?53?WQ=}mUa9uO-S}Q8KLxmWCjK@iLdv|}i$(`x-VLGJsw-qts z(`&;qv%ZQeJ`&GOmwQ9U@eQZ9#1Sa5siEW3wnsqN^cqflTv#%J3PIXs)Xq*4AvG1( z60Oux-eUxS0Gik!%`&t-Oy5T7uJaTQz$5QAtu)BsRO=Raok#%f#vAF;^5TpO7g?4u z;1M|GE{8PBbdW(y*nO@?kuA{R!a#wt(*B$4FhLqk(%ZLOIFlM<0?mpY;<<~Gnx~1> z6+^ru*!i}wq`x;dvQMXkU5Un^-aHVk-7}{#y)hYaX%3~B-)}SsO2u-{6`Xe1U}Iwo z2B-w#|6x^piIM?!2@_xzXqrRZyGKH(eU9QUiGa|+1@-s6XD|(36|3blV{PYV?-d<* zyvE42gYs6dBRi%Wfv~zMfh|O{7A~2z=W$rZ?nbHdVvp>G7Q+Z} z*yT~&CX)Bo2`;A9T_v!A@&u-1BI(q^_sl2?FzHe2N}SWif!|r2~nc$_U2HttEN6h zj7ol6E*P~qC+-udI$f9Og1F(e-{98Y(Q&?X+|g+;v#X?&!=R^wl8G#y%!#QlA~k8X zrgC?XF&Hk9#dfgDYwC<(M1064l`hd_~0)+P_{?6r9v%-58 zI*P!)=LjLLkcn}OtPw#tOL?bWd-3IMa9iX23ev}j36SCTwpu{DKvIMykTEoILlb0) z`jeEmX*hzcVU==9?J(iVSXq=T#@wbbDaWGuX3TfDxKJ6-L{b>)XCx2Loa~1b)qdxDY#m+C{g1EU{mv$0?-EW{X?q0qJkbDyb+E zx~hd zL36uaE5s5+kRdi=PMRiDbmtbiX_pl_@n8a?zFNoFO_uZnC6!&tcbmAX8!VS4EX%5+ zV}kx^ffGueT$Q zsv>2ivDDzZi3|Ci=&t3K82P?4g5V9$3TiRMwB$$Ui~@Akn0EHi9tdCZJ1NOQSB~Te zFdXIwB7oq4*8LH&8#H4lxefOuob5#c4_u+*2xi6i{d^yTHk*=lQBkD5ycIxjwZVXr4#NOz1hGG&n+pZi+M<=+wspe>0$w zg&5Jy?(P(3)5W1qt`t@z3|hvMMwZppe<7{_m3jm0%HDSk(%9A_v|ce^mWxZ*_Q7s# z4Ph{FOD<~apkqrmx*;j{op8X%I3?CfveY2FHa@jEqV5aSgM@ zQX3v(RBaMQYiY~ivBwOl;Y=>n;tAy%4AO{c^|Hcut*kY6mPg7N4V=5@%ofSL++a*r(<7$e;f;d0w%?M7oyo7TFm~0XZgxql(p}lx z93I)htZ`m+!}l_z5j%x@&I9y$1#-_u0om&4e> zQ}bsivf*pdB>{?Oe%O9VZKvVse^^ ztj^?J$f2h=Osi29k`)-_Q4&_0H_|+2^OUL6HsxMw*fJeUi;Vn4a#F2E^KeF4b2MOn z5ajyC+Fl7>{oZTqy&aZN9upLll^N*wWNzu&jmg^UKfH#yz#ygAx&Y1kI2wTXTu=pY z7RC_UNN+&N3oVAqWKqjq6lwxzh;mE%*oW0rMw`kuU1HP5anJRDM!Pc`>q7=GjzBT? z;Pcizu~n+vOjUwzEWjwP92Fq?o#JItx&6r;+^J(5G%T*#w8 z`^`R&qikk5=k8p6!Q&0GDiYdJTnM2{JrBEp@A-a9vOk?Xzx)3X1Xa5>xNyd9o77OuX>Ln<0_W-^lD-jNMY)wCL?7%U~l&Q`Qb0^3- zEm}yquEdmmcYn$K2Hx@7v-R-Usg*9*7G!X&=EG7ximb=O2~@2l!gX#=$V{$Mn^*+=Fv>A7|Ckz{cgPyYS{aSK*uAc@-{h@4$dKOWCl0#V*1`F6elA z#D*cE78C4GzA+>9Jj!4BWbeYoDOq(Q=os6F6`X>fSN3LBv%|h^oJ!eg<$!G%TPb73 z0VMZNy2oCOOw52(pxaK4WgvCP0tfJK&wUU6<=OAT&SVKU_7`yF`aax1f<%RqoZ8}E zcRt2dp+q^0lsoEfe1@KZqGT?rOVYB(M>gU5Zk24`ybSJvb0;YO-g))HWpXCWi~|e@ zW0=P&rlk|+%_LN@a$>X)yJ|dWB_`+`5OZMd1=1!vkQqXfo&eODgx$0x;hog#xQ9?v zWw?Zkc;4&V6Ma-Ja;=~Gh)&awSd=1o>_3>eDn)=kk3{3~Q3pEO=NlVP=kQ z<={5fb7WrbD(1+GvM$El&!?^~JrvPCK1y|W{9;3ZsBFeiNaD!hxkkW|02j*T=$L3t z0Oh7fT^g^Cy;-P@&hNC3r?$}t1zq5^cHvR#EK*T!X00xS$d=K*?n1%%53`(1a26cpwW=T6Zkap zf?!G1G^hC<_QwP=8gz;^_vy*bCurg`R8rN*Lkx`Wy+9Z>zp#2;Vwlo1kUWEaCpKqO zZh_asVRq&Ck&Tz`J$q`7tn@@#lxJ`aNt-*gkMgzFgF?sMy=B~+%-83O@)&-1|KZ0T zn%#fy_?HG*{rdi77u-kA--En3@p+zneljcmWL}l0aL2tcJ20mubl>1{AQG{V86g2_|4XuP4(o+uNHxaC2vN zUsc!ZebXKdbID_ZUmQESf$r}w-*e`8b#r$Xc6MfPu>Fvx36xefB3r}HOrzrdGf^8Ua-Sy?wX}F fB|o0~e*z2ufi)%*HPsp500000NkvXXu0mjfx!qwj literal 0 HcmV?d00001 diff --git a/vendor/tornado/website/static/friendfeed.png b/vendor/tornado/website/static/friendfeed.png new file mode 100755 index 0000000000000000000000000000000000000000..ac09f4e45342ae36713f21d535aa3af62af1429a GIT binary patch literal 7906 zcmV<89v$I{P)?Vmz#(T*S5G&4sp_iAtjvszjLjD_)BD~t_lT?+=nt@FqBAqX z-Q4Ug-}~MYCdNpAp8vW3@crkMSwrVbPxG~>fwMFnI(#-UF@EQE-}vRxKjG!qo!;f7ZJ>iy<$s+IH?l;tm$T~L z`MmzkbrnAHfhOaj``TsMSI)EzZ_rxHM=_>9qY(kfQRSeNR*w{WDd_Je2|@&dJE>)9 zt(wZr*4|y-HaWk|%b92`i#s#1%x%mw<8PL}ovr+=_VGC{)$+bfCty7Pi}Q_LSAOv9 zs-1W$-gjJqM`#xX%RmrCdKIo>h0(Pw%&Q!v9WScaMk1VVMhv)y}dsBQjr=|?r!*e! zQ5A51U^jqE8MVlu3WRkmHtpH}_^kbxgChLVi+Pw12KJBKz5RkE7^}uF7-=Af&ob98RQ+RvOEG$vtn2h4aMjNNSVY|e;Iz^S0t)d?NXQRQSbE&{!M z8u$$CCVYBVWTMGS=dul7I9^vRnUb{-3)xBtjjqNwcz!@=RP)$?7xS_`P(`)agzO|* z!}1wo8l=nu!%JKRYvEcJQqCLer~_NO3V{!jC7}Uyz%@)V8&O(-05ahVIV*)jIL%hA zc&LMWgiQ9;hP1C_vv7^`8^5gD?D}{ZtgD+q9P+ODAh*2cxNh}uPE3Y3hC^ft*|AO5 zz6a6<1tL5orpVAm4ft#4_bvZk@wNErJFK&Mj}3`LMb)?5$N%zl_$=Xr0YG;_#ppBe zH}1vZ@w*rQ(J;vV29B!G%(RUuuKikyyAAA1O-9Id-YGY&Vst&Co4U5oP z6va25pTOY)COaGiF|)|v({fm`b|eoI=m{YqdQ5m><@YwI)}z(vAs>V7;efdn@C4eo z2|o%e-XqLX{5BYkK8YE?VxWE8mpOH{4TCTib_BqL)tY1}Nn#0Gya?IQd=nl$xkrn| ziudv5WettjY%ps|;3f!dJ5x4PHyMdi**%6utu&UOT(by8fv^Y(S0;g^6`yNc&jN%7 zja~=AMh#Ws79wD;heyaV>Z>bW7l+GCE>@FdL&}pBG)>wgGecH}-rK^JIzDFA{IMfg zaLa;$6~bcA=iyg==``l!0X2^KH#Sr=@4mRL%(vn$XG;{{Bw?&;fDX8@6V?@3b52N; zlhb+G420&Dkl=q#2~686K-!$O&3`vom@N+X1U?CX7W{TquX(c-4Sh*DTNenL9zKd7 zeWRh);df}Xjq+LBVTibuf-TG+K!<4N7%Zzs545W2-gICm98|8|cc1Kul>Y0}%7|a4 z_}mi`Aw){Nn-<9nG!36_#1~g}MU!Gku<>=%By{G`1n0EM4RCnGG`J765M=F4fHew; zvJ3G=t7^?cUeKh>X@6L15DU5w{HFo%C2NRQBT#pEueq4w2ETmreYP)ZI5ZA(t8U4E z*A3@Bbv$(tzcQ!jSl$5DK+`#;YsfUsczh{xrseJ(FCNPv70n(GO!el2v%M;B{NEgJ^xhjbWCsxQz|{? z_bLyR;VW2JH7{Cmm@5$TGjIY0Xfb;rDxY@4mN^|o35#n+wBR8;V2K9UQ30Je-?Z69 z;evNy(g>OFKP>70e7^qGRRgZ0+VKD;cILGtCwyFIMKGmPGqPqU)$P;Z4yQE|1kot0 zNYUJO1gQ(=MzI0_D9jHUaO_+Z!2q*zMmo6RV&g}Pb@=jNXkL#>brVDh5FzvY zy7`Xa1AHdMb9pRZ0TRN7EDLh3L?sKM1lVNAuQM@qaP%Ta=4G~-p5)|Fv`UGFYKei( zxz&Sm4ni-k+0_it-U{MN#DN?6?$D7=55NSGty&uJb1Jsb)BT)=Wy{>ZD|bk8jF>CJ zgCMM_MG2FkqsyDRxZU`_9!&AmB@1;>s1V%YI6IlG!neu9R7hx(7VI`@F`Ay%wZ>d* z9LVtSXhio8N*WD0fnxgXT#EOQF8Dt114O3Pa3#RR{~4#c!G@#8^`d4VgJyaAS%)-6 zuw=j%^6sMn3m{kQFj4aS%Xuq;ZkV66;3%Qv{a~%I0I#Hq$pqp+mMG=rt^e+P8D;;d z1>jxx>5KZE`Np4SLNUFNN+X==ZU!fYorDBnJeU-mR7~h#TGEiCU|Ep3Mt7eVvv(Ty zkVkY0P)i~VgvPQd=tEAupIx_fJ%<|vp|UVdtoP`&q!ZTk-cS=ApJM#!nWfncuUoT4 z45CCR2)y+y5D_}A_iX--US3?0cN4Lo%Y}dc%hwGxetR;<*T27MBa%UWK%2PvmIQSP z&KB^3Tm$V-M!6h*GSu+Xv9_+_lVys^olC{!+wf!fCtNC9v)^7p>rKa^h2$LVTQObu z>wsUa)f@%63M_pzMqw{ylu>~Z46|X}sy4ai2E69yUp%9?-~Tq_2>>yi9ga+aWjYZc zae9Y|foK--ewe9A-6?7s&Az&K_IO7}doXa-0at?0;ea@#<@^jodt(+9zgeZ?MofhxV4);IzOOpVnh=7|ffqTwy1Zb^TF?ij zM zy+R<3x}s=ZO`lR?0{><7=hM7;bw#t;hK`P>8c7W3t7X%7bpL=!7P84C$u1*oSz*;u z=`}*AsK%s9RM2b@e*fb272{TuO7}p0GvV`2Sk~9Q05}Tl!>@ddck7pOSPfw&c7+cn z9Gf+p<{o2!zSDdyID+RairLltc%y8y0gtTk>=AZB-fI z#R52H?R!4lrd7>{F6{C;8$LWUaLx5e63-N!UXP(O2b=+qL z`1Mu*Z9Fu6oj&F1!U(O@2Ehk>Xp;GR$=<-^${bOd1;P)mlDT&h3@ixL=b$Ont=f%h z-=rc)XcLV_Pk`fCX3JjSZ~}zXGDlVzdxV#jAy{4u!bB)-I3lgr4PDKy8BaKw<$dmr zCk%`^ZK|4Xmuq_X)&pjdjR=VsDe5L3$DLxeV~{Gh^}Li|Fexd^ftHh`+tc!9y)ZYM zIzd^g0yRc8+13h9Fn{iT0Zy<{piO(qMO^frr!MFa5SP~FrfSQ?$Ns+ejqIL>T1S&-h-u?%cbAT zvmF79N?T_srzh&#T$I{enq678gq9_WFp0tf$O!6iE3fEJjcN#+M#dSE=6$7yhY13O zwUD7c3wCB42vk=NClM;@1mmIUnVPICNf$$R6(xe8(lcM&(x3e451E-}bar$qq1iEA z-J4EnG#c=_xv)#*B5kV)xMf#ke29LhRO!a0eTL*Mgt!y=V=8i3VSZ9g2?evRrI_i_ zUZE7Z>rW+A7i+=lwH7|%+Nf;5c=3Y%?AZlndCB0U^?}85MT5~;LhlgL1c?C~UHbt3cKP~(=JQ+n=%bIR9PerAJ4+ap`i@>xLSdLbnw^wxr#U((?CXxa zND&eTms*ovmZkdKcz_1PSWsw>BS4kPGW}vU;ebcUOdBd_T)cesn*P)Gen2ZGMf-bG zv4D#6e!&XEtjweWkBdnP+*m^T&+OZReTZmzj^VJPEOBg@8AI~*O!u` zG)=kFS+c1%L7H;=E`@rdJIpNhMV@K=Eg2q*!u5se-6||-?qoROI9o}G@pn32QGgFp zHf`Tf5B*B1L?GI5spWXa43eIMkQ|a4fK8@DN`ZG`yW>56Ivr+G_{t=yabog#5Xx0q z3T@ibGAhj^OS8lLB!z;1reKY=Uc8#qYYuZCfAECeVJt4AJi~SctUiqfu8cT^J>J&} zah>XZO@}jnTO0JP7wtk>Y{8836J~jpgb6T8YX{=K-Y~5V${M=kRBLB3-Fj3Kp2 zjVz>&@Ih2&2Bj7_rK(fM-Xf$Q(3Q;-pdtPXg!D~PEv_yWl~jLE4kxr??yHsKR6&lW z?H_Qd zQb2g^RkXDFrP1nNmoqa^zi%@_hTL^zNEa-3imA}Yx&ZX&D=A2;; zc&#i|wUeT%Cez9?M}pgxv~%gtUfX|gyR`q5le8aV!|rsN{p{gk{`LR-tbH4(i9-~* za{rt?C>++=_pflg0~#r>9agdW!{Xo`V5hw+T~rhA5=}D(&o)Hpkuk z^UdxmKZ})Z|6slg|Za!VCSgyd=Zn<$<=xpyIyLV14v91mgP}$FPSd4^y=c0FF274 zeF~-Qa-kQ%*|ebNSM_5&2oFYrGh(8HMUb$7!x!bYRTNTUB6+w~pOp4_-w6)li^w4W zbaET+V{U-;Owx3hGDC|9aY2|1^;xL8h|wBlvkl=U)fDQL*Kh^_pc0h{*H4wFpryB} zVVb<6=~N57l7INvr|cqtUwbm5*~&j`gFbA;X6lShom>VzXx=5v>xV#$n0K4d-^J~G zkjc7Yh)}~c&XU^V6nAV|Ia$H$_I>+T?!Z51vs>XFC#MfMcPUw+es@$!1#mIn(5uXR%G^@5}QLuGqS$0PU*`<5ObXWAF| zU!N`L*{m69Tg(Xgd3L^}t2s12p@&c2qlanI17roCuj`hke0ZHf3<-V}on&f*dBSE> z)3a}0qEyKi#sZM8u9NgPXL7P)kgYJCT2<;^tj(hXqO<)GKey2#r(~yv&9wuDnuKo? zdo*;-K=}OioXWw3_74xV9B(x|Z}{4rt>DQ4W*<3>S0T0gBTYx<@wXmwLdSb@f9uzd z=nsE<6Tf&}$pJxorRmsKJE^XAa0`A7`L*G|FKz1PjZw*{GXjVLv)PUC@h4B-(NcBP z!~{Gqw{7YP5>2vC}`Q1DHMfe~Hah7X=ydYLV7w+vLgL8A=~R8>NXtG4d|cTpHEvYgY_l1-|q zFxaGHDgC%3Iy<=6X&Q_JZK_tJF9{Hz1f5d z#mF+DQWgU~Y#~`Ds#!JDW0dZkE17jpQl_IsjkjO_`iI5uKR8G(u&GiX znY1b-6Jc4Ve%GbMk@{&+FTFnH@X||&Tj>fDx0)GVT+MXG4+VWTc`Zz)rl@G10!qLx|yXk z$JZr9Cb2#fExkEj-E)5ctf78L8&@q>E2&(oX@q_>S?dkkvOaA`nK5Yj#miY(cJ)T* zvTkO#%fVCO(wJ>AKB2C5VpP>unvUI!pi;H2k-A!}g@6nO10_u+Y_;2Q0Fx#`DelDl z{Ob!28pqM4FUCNN*tM;h`|OrxQMJ<#24iespYOora3s{y0N5 zCUm1KUCli1j6Q3BQ0*~9pQgf1vc}XI$rEGbPv$oZd3aeALWFcU!t@1=-Qn50@9iMK zPLJ_!(yV*g@CnfMdBkzvCx)jZcdZm>t;ysiT_U?(`)HE==z!1M7eo%mITN6)xjX(J zclrb=qqAw0o#IY(*p7F`Ce0S{CKk(EDNAFx8KW(oD$VYweAjkNc~n>F=_zmAX%_9^ zK}-TzvWu-a%E-i8RNqGgfn{ig*eSd8UBONpE^NV{{PcO#f2+kkd%Z!Ep;9Sr8=%s0 z?R~1#3Mwy{)6sYj7qH!IY*f(1A<>l?kA~?>4&hF@{m!)Xoz`>Q8MWVrUX09iUybR; z>A@VA%LN~}PTx%k(lEh1f|XG0M5;p7F{O&sJs<+a^`La0-)W4TOmo`1s+Y85{+$hW zoIvn5&FY~siZJ>0^Gl(~V`&dtAv@5<*~x^CrekJi$Fy$R)be`cyj^CvmEBHxpdY?r zHd!%?oCzyzP;g}|$=R|d<1t;|tT`tc(TH=FifQkFg>qB3sf?zRD=~CAhnL35*rGn=XDq7a&#>GfLE?!?r^TGQx>BZsm z;mJNdx_?a5V@|W#M2*RIzh89M(SPpEZtmEKRuPRZIidRO%U6<%z{C&s4(VY3m;rV~ z-}%#;#rZR zg3jo;r2Uj9WI@6ken->s!J`KPGH5+PK%+rU)5$>M^7ZXTVz3rOx>@ab7xZ>!F}8I< zoj9HDF@NU}`xn0bgiaqkq^5?M``(=)XVR{9ZKW(ZL9nVXI$L0l|pd?*IVR`BO%9?G~Y-VbYW=8Y*C8 zLl!OEcfM?Bxz^gf^!qa0?{z9XRfjZGneDV$GJhmeD4wD_jLEXg1qz=*Xn2UZAc}m) zz^K3G!UJe`6;VnfR@5@Ud)kDO^B7B~`({)Q<3lE8&w2|u+jsVQZT?y-44z3$t=R`i zk@&Uu`V9?IK*3;=*41?ETZ$Fw!Rsm$^j)s&K6J|5K#Kxp-Iu_Pe7Px4(_{(^xd_tA zxXp64p+9(5i6Huj(-%bS4nNZV0vWUOjijjjg!h3fU(T(h!}ksh3xG1FfX45H7k4;z zKmPcw{6GG|kC(qQ8F03jo0@H;C|MJ$8vT3yzK#zX0x{`lyVjXa9559ystP7w%h-&U zIp8#2+f`!HKUf2*I6iSm+l|t*bzy{UU3syv-J-}Z`WL$?Ze#m-MUsI|Cqr`pK9t*- z{LmD$OaprH zAvHhx;e`aA5DlieFvzIMPwK~IccY(_7h|K9A5?J&HNEYZtZbIw#mQfQ8M{b|iZ-%G z6>)jTqd}runFZWy5a&3*#9T{5sC5bV`(!&i6n6uNd=-wpCeVWS_RaCgd|EpD z#pNolP|uMsceR>@R&DIR@cDD?@n0n0RD`_XL;Tx=Vf=Tz$%m_oo-pL?v9OMr-9Fef z=0Ow8_FJM-a3nL}193ufJ}hWHEbOPPH_tPCBh5y9$2g|Sk>P+*v2o3YrKx7in1&nE zJc6Y@IeU{ph9nFyQC@>FgXpr2{k&NJ-f$D>|m%@hV_iCqKqy03R#6w%CbZN z4?kg3KM+iOaK$oZe#qQtu?&13x`N&N>72$oaUfVi!&L9cc&dcP z-#ks1N4vZC_xCm@f$b=s?FO=IvU@%{8D5NsJ0P?odK(5JNmwlYmH9C!JXtZm1fg6$ z0Z{{f&!U*|SuEgP8llklv7h?ktjm&Un%?tRAREt5(%7G5X~kx_h@iRN0j$PDW(>w` zG&b1t!FV+8vUj7amzQs^klDMzn>T}ttI=@GF5j?|%Zt;|)#&m9p3d0d;urSQ=;E}? z1Xv8l5g(Q@Y5-#TXieeSY%I{;Lh~e>MpC3?DO`W*yP#S)%lJ&NS#&4j5Hw;-5ik4% zy_$f49t-@1pYjx+kyS%48(Mceoh?6fgUka2yk4dAD75EB+4cjf9nS3ek9ylGQ6UGK zJ6oQZ`k`R22LI;_e;!_3joIEorz3GXMx_8AF?a<==WzsLFd}x`SnyOyk)WV1d%e1x z{3_h^JLc$p%)7|>c_wj_wgZA|DkcMX$I#2-uYS0eLIt1dntMUFF#5V(cg47{K60;EBY?x#y>dQ0V7} zQ=A9_3}i|G+=_r^vCC5qo{W}r-?f;y!!Gx85hAS@iGKMk2ki-I7Tn<~3VS?Kz#Q#kJH*Q-H1b6)c}5my-))kOAPX8688 zGP>*=$^9E-{|(*A0jyjC&1DT&F2o`csVPaitOudM|4l?wlNv0AA9}DQTLhb?fPCeF zO>F|=)GHxZnyP95IhyqlJYJ56=;BRie7$l;7bkCqXF%dyM4?bZ7bvC12SW3n`zc{F z9}P$=piKP*KqSgi3@ga39mM!d*o<5^ixZn&B82aKTyE|Q?i^s5q_7GgBeTGU#ZvTh zvKfLX>H{&j4rR%887gj`)!>Snf}UL#W(!FKp!1(8o9Dxz0`)eM{$U8#@Tvp+8|0KT<}aEI`y70pZ%bz2+!YtPePE*i4R$r813P>dBb>k<&3U|uT1Jm zQ@s|z6(szdd^%g3==&S!^8iHTq*pH>_q5+qh`3eDLPQN?V%%wmXAuo!#H6K=p` z_?NHa+T?geT~5W6-44;|@;SM_0h=J6emC?b5?}{DIyuWim;6&%VF4Y(tC!Bj<>1YW zaTi51PUr>AYV0VJKTjlX>@T74Fg|wmA<_W&mkWjB_w(S5#xM$D2?`#_5dmDX#HTi% z@1UKqq7W)`vrX&~j9n(LJ$du7^YK}3`d5*c0hu2{mdFGDH?ljK9qfI1I=UELIcIM# zPN)ZsQ%O!RFvAPU#gwXm78UH~Z!%s{H_Fm0m01%2DMRKfY?HQ!yhDe-;Tz> zQj2OL6WAaEtfYXv)SVYf0NfGBp>VX=@rfzbV-$F(vFi*{&gJ?Db%KR}( z$~$`r<#PTA)41b!V#>21Eg@!z(0MyHI!;;w9nwP z4dAT%q#;%P_TL&hBu!~kP=!G(mrIl*L1ql=w%6bm_EI>Fy zHgpKZhg7l!sfYq_qtto6V#<745lDj}X2No6$Z!>7h{M_xh2r6g3PmM{nw}6!l$**a z>CPbxN|@!NDQnTRq=HbSHAh0In1~>{w{x%QDfxL3Q5@yKny(cITG-Fl~rJFlD@}rarjUlOzyXXc3!8or^_)c#`P=H7ZK08dsWd1)*$V6XS~EF^D56 z6gIfR-I6C3uzn&3YA)PR6G&kIOahC9423yhAmdNuhE%ZXQZ)6V@LLJbP%Xo*Dl>F( zWL}ZuwK`taS+gLTK&-8Kpz^9Qx#?{)K%m#;tIgq{5Q7)J1SF}`1k=)qbNzxT)8gj(WGJy1O z9t99Vq(Sn4@M=Xp5NF#a-dTxZ(W(x<@M8pDu30t)1H;KLwWgvpVzgXE=++$BXI zUKFO0YdBcfgFF$0w%O}iOFkU&n&6TuP;^658aTL-z+f5F>4wb)Dp;#!4uccPIf2EH z<^6~kilT-`8$dKW!$S50r>#08w?1v-dX#S)lt9Z!Pb@rtIu$YQZNW-00rkUQvzfH9 zCzb+K01+@ZnG4D>3!aoQNVt=?=BOnhnD8_=C_RJtos6Lq=5l8lt8kjq|&CHYtwief}qhXq$)@8*p8WrBOS5r#KKfsN>X7{ z{i%f1{x$N$Qui*&YgrY-hMrTqkbZuBw#ntMFmiXD}xB;Y5s>CoOIzmz-{+(XQ4H&t^D>3sT z1cd9dwyN`wSDuB)d7Q{gIenDY$bfIwgaYCrvucPhlx~{F4o(ZlsDg3HV>-B^t}2?> z`{dgT*mcdMZRm$Vzc2bg+`ds&JY^Vt#93uWe5V*=0{Tn;O(^In{T5hkVubX2j zSb!U8w~A`TMtG?U^2d1B*;t)59cz=!>THCfgrHXBwjfVJdjsUkglRNYUoaq`3MJ(} z1jo$={hZm>2dt+ObDr=tjir2sBQ!HI0<281`_vS%C?;Y#>f#$xJ!K>J=BQ;s1+uBYJ5n?@KaB(?Ee zD3xiX&n-5}PYlZ<9Cg7>Oe*gHnYejbEVRI? z!oFr=X93}Cjssp-7iiP?2M;qIuW+vpp3cN1)*fH+*qy8U!7}y(xUVyK#q-|($U;%v z26^is!`cBK>Xc)cg<>%gvFdpVIcfQH0dIC$@usHRy%4t^U$tuYDhjKPmRL^oUp;7o#H)%T2Tecn7kOB!Za|-T9u75^3M?bgG$P!H2&2 zqR#AV+M&n#yWEzgtVSiU1lr29Y^tEK!wvtn!MUjohFLV=n&^rgwO-BjHrJpz!^`87 z>Zt2FI*?_j^~qQS=rXgf2kG|G;bBa*B0*={Wce_5d7V+^rhKZipzZ~>)s0n70~x0r zubc=TeH$`cq)s%QLY{uygA!8U=y&26BWM>lh+qMXk139~S0>Xe2pq{=cB{^tGkTL~ z0HdgB-p_qVTgwfo7q6}?ZJLxP3)Sz-a{1^Zr($1ok1ZfOOrh-Hvj{3uJUS>?xy4tO zm~HS$5(UszilMfOXdkLjowU$o$QDbFJr3?jF)(89bpFZgsR(UW{w-B0j z?Nn-A?uI*D>*tc2=6Q!cW()95w$RNT^yJ-;dw8n|VKQ2z=?BXU%UEHIl zuLD!QSW1=!ZAg0otz_^@OQN^f8#?jv0K<490B>z}n$gLE8~E-mOK!z|7lS3Psj`t2q7WuBQD=I>0i!dI<;_6%Wv{hrtM6SaL)=RLoOlP1q!=oeNsb7UuvlM zfrfh6`-Fx$kpAcDgmcr6Kh$;zd zg3`=ii>3muAkVgm6AO>Ln%f9Di@yA*ric|bP1EE0%%x`R`M%TQISKU5!W67vwLm1v zg_hEUHl|woTZRwd^+}xonxC?Y%Gl{F@bg5`Xdfud{{F!c#`AuE5p~tyH~_4s8KzB? zi6&ToQh~$*4!Q73s3W!Y3Q%uHosMjqe^i$?HZ`^MfTv`DRJNJyepwJm^Rn_FOxslF6}o8RWn$wl?pMdwjf7t$z)60&2M=ibqFZ_hJe z>YCa-Lej22v1HuQ6J$LD!a1j8*f3P057w}mnSJ{{I}knoK0Dk$c=BzRsfLMau&!$Y zvL7J=?A>%*w)Ct0wxJuIJoNWIp;JBSZ?P2aXSd4#S%WrZHVwW&Pc9e6Iz=^rsIaYo zyRH4r0wt;?>S$3cH!*b1a@Ov!e$9f0iV_8BzDJ-Ft2WcB6Dz_rHmhN|wlQGf1ZZ}o zX;JHm4v;;`7sbnuE!EO$L$L%&EhY31s(3gaGhoz?@RKfas}&gP`1y#b4U#TX+dyn@ z&nUlxiiP|x3qQjs`BZBS6iL-DZAHcxNP;)yh8X4{`|h--1KMJN@oG)bt|RY1M_(HP z-wlU9oeq8(KLmiE1Hz9RroL{8KUet&3tzyWz73VYmxn8@dtvyI_hP|!60yWTV!^{Q zJVvV>45*Ve;-i87YA(tpRDL%L!D`TD&ndAw!O%6J+voIs$FI0CW(?#PjmWetXMj8Y zmEXEes?CV49w7COaz9l|^J=et(JMcODR&#tBKUppx9ok^dvx%GeY?K2pK@?It@s(L z+i8OeF-$D^wfDA(IZ-QC4B62*k|M}Y3#52QJN=t_3`9QAS9Z{}t3pVRh_Nq1FIYKY zh8w>M9d|s)#6!^`DQo!f!&J;fY_Ho3Y0prs*13fcml73k=@9|Ye3hR~UYFjxpW`=A z)bFTyj%m{$(t485_0Im$O*PTY$CB@lcJ^=T9yPj#bWAMnM4SjunH|rp^%EEMM_yo0 zK6U;%07v~!CoT@y;0VAn0B_uKKr>oIg$BL#V^>kf&fraxI_4AKt&k#C_Z|~s5AeGG zl&*bhe?sk??w=OCCo|{yiw5uKSuo?V&%-+VgFZ4lAT|g46T0rxwMXspN`3ZapC{!| zp)(l)cx_?UdrCaBITf}{d->o&` zEv*$#_osNJwc_c4NB58Dz6^i!*m>Es=}8>%w65OE_zr3)E$sfAy*F=c<2dq$^KbAe z#-nfvNFaFVutN)SXo|9#NTQac>|`k}o*^(K#{w`22H+9L?`MDO>T`MkkfN_FnOFd3 zx~uQ%>guXroliU8zAsm3It=VKZ<4d?WURAhz0U5-@@Ncu`7kZb(ytS=G>P=w^x8r1xv-`pLBK)%lTo8@T(xt%;5fzbpHCTl=ZI(@PxETy^_;s z@}Jp0KjxjkmHYi~`8f5L=vCT-|A4EcWkI^)iwt+J&mS8S z7oFGNl}RL%i&B1kCL7;W=O29RyeaQqZ<21>D>F8dHM&N!5NYBm{riOeP3hm>42fgV z56%BKu*YA+e*?e3-UgEFJ>H=2r}UklAbabrZ5YBu=gmLDghTW24NR|V(-Fgy&fC3G z?zj2)GChNSi0UW#$3fC}`5e41KJqIV*r$ENUZ9i+xIT- z`-~VK+014%>3kj7)bjCUdNze67uCNWC;!U4ewO#RfC@@cWL94T&3A`0Y4Z1ECRLM0 z(H-ZV{qjKU=Y#U%goi+8V@TqqS)Jo)XTQu^_oqc@3xBe~g8E~#c-$pK|C&=m@v(DI zUb+YU{33BBL41^Q)q1wXTl6~zyFmhJsey*Zv-JR_e|EOYvtg@$QtqjP^M7U|lX2YH z56F1w_$1gt>*hQ|n_?A@DK0t(Whvp{GVPTloeOs_DK0yI3*}7U`06f~zh#9yRe2}D zrFNJMvcBJxljI5gi;L^zJ41i8FB{)lp+k?y7+JMYU9>idslI3i?Gc^*ABvqokU{rmKDfA@OQwdR-M zeivEiyYkR{2Rk%5fw_XiA?(@f>B;HvEc%YVc0ZqJN^%k73vTDxkQf&MP?7U)!xtx> zpfFO$e!w3Mo8q^BtETvE4)ypO{8LTv&-le5c=&xa!NVlGOor9eo?;b=+9$xb{4DsN z$#imlcjzy3@L$%~@ns4B^Vzd|{Fi(DmwWt|-z)yhE?zGpUWr(`!?0Yyu{odOEoC`_LZT8sb=}mT(0rei7o6m+C!+>B5 z=#ug>JpoP@?KA4iw^02))K-YHANXjT4e;XAMyW{?v=mLp7g?7|t6B9-J1`r_e-go0 z@I^k({^dO-)S5}h&erQ76bfQ7sCy`=iLpjSXhd#+B1G-SJn&H6R^g+)Q99gxS@XJX zl!*88dCEN;aSux?n#$)MMtCyOCD%;J(^0OiR-qP9eNzEMvttASz6zcsl9JX%t)k2p zLA+eEhgrqw{jUYV4Z4HYU(b``yq}%upM#{UzrzmI1y((rs3&RBO-5Ca zMK+qzblkUy!mklRh3enatf&1%X_oeAe!0$~(44Ik*o|=YWe)#F_RW)Yxi>(LNIIu{a$h zcwf~LBZ{F{}HV!w2+o{Z3r^sR5JWT|xi5itcZ|lw)=1F~WZ7g>L35QS~V4`E!G_?f4JBI3<1_489 z%pJnp(S(>)=Ok`9b+{&L+|h^~jo6}yt9_bY$_g_EgIHl1u;^oYeVN0CqBvLB&cdD$ z=vrw1M8i&U&*Ksq(@WQQd{!tMjSllfWRgYQCebwWM}6^K4(<8b>g>JSdAs`(!EfK~ z?Y=!khu^Qz;g=U@3qH~z0T~dou;nQ;gWsB2@7a8%6A+e)s$r%*TDEkqqhkmI(^e2+ zOA^Qlf!6_>Vp)kGQXqc{bF5$-dClEr`f9iq&+hw8LA;#D#daN!~;*!Qg6};DLq!l>j9Cn zz1uvht62~G6gqMYVniS2YehMnZgTzemcoKrXOg2<3Z_RzqNG2tx(sKiGakV4CpdRk zD@viY0r_cRz$5_~YC4K-Q!h)RQxnB=;e-KgO-JNH7%)`SBil-@teRR~Q;aWGldIMB z(!kpJlPg_US~4AJp_j#T+0=U#F6&9XyhSjR6!n2)^d)U(p@gv5f~|K-S>a-oB!gxP z<6@$Jz}isX(a=r=nt-p2)aOwf-QnkxC|79aka*pSzJV=M%4Vg!riL!LA{{_Nx*5!a zb{+X}m1tCW@07|@4Y_r33l^k%>Gtg+qsVe;-c%!pTs_+Y3xUPj&2VuDWdbt&s>r_O z+*%l}Ab|J>t43lj6VM{`=KO&aqt3X>j9K}IGi>0@rD1QxYD|zzVOHFet!_J_XO+uA z*;$gPTbosg%0ZjHWn9Tm9q$!uySpVt8zEckY>dy<)6d+C`}9I2%>pDoE`XIvhN>1g{toxQII_e^RHmVC6nws|}Um_;AKu27JzqevOVry{!A z5PurGKyHFF)7I+&SrJzSge2ikz62rIHzin#yXbqVx!0L+G{!#nN2LD-ut$U>Hne{w4_BzhPsY7f6*UDS5&K@KcJ#uY0X3Rldjkvg}#J>Cg8 zQlNjV?of9cBOwuX*{ccnZu8SgYsf2^z$A1bL6aF}>6cWL{xzp0oz5w!stirup-fJ# zhV;yg(>N0O#bof@V`;w6Btj{ai0~H@5$xJC0m;OxN9QUdtg3|ki#yL?a7$7(Q&nQ4 zz?CQmPKW6Ilx`7>7JLgtjGHULF_)$6J_Ozg?3cCO$W~v`dU%zly-(7Bd#bVu^|!CD z=G29KaVYoWTygJ?Y=#C~+T}HgQKv=LWz8VRv^dKON*!%W6*VE$l+)$CQU&)#rsErY z(LQoqI7=%z!8jmij=GT=lnwPo=8$t$U9&3K7SUsBGIoWbMM=Rm+ct{!*MtMIeL1 zZ-4iHb~-RuJWb@M>GZ!*u^rsis&x8v%iYB}7xB!=hnmo$+I_97SNeT@O=A{?ro`2l zximP}SZokvMd1AumRG*U(sn%MD~qOD2*5}s>Z0jIGNibH#*k<b8n3lk#YQrz~qUWOA4lOLq>|OwW<=pC|oO zWsZam46i>m(V(j8e!#o2r|C=s9ydzaguhrh<$5{}tpgWbgTS~QI&)6IZ_o^AcqV;i zE?hnmZXXheZ+ezBV}aSR7n4?s^GSsHHy+NbyT0&HFaOwCjk4> zjF|8K&VRq(IXLVb?)`1&Ei&NXEuYgGuM8dsyTGFup2ePGOVn4{*%0AA(+)G#cKq#| zt?iYAZ@1RtPfzwc zx!H7de$E_{ebUInR44ataS`R;9QkCMwntp>Xz%85v;ebJb65J2t|~jKyn2OBk8a}I z5%4QMY8^S$)ZB}_r_2xH<73a_pqR(u2{$Hb1eb-im9#^(XjDmsn<16BE4&?tu5T*< zlNY|vb&V{X$*{O_MHM^b8RqoE&%m&@wzqc<4t^EPtbB6DRJBgD9Ln$f~SjV@aDqHsRb z$Gp3|%_rLUJ?*jasW+}SAiucr>*6ECS~I&mnzr_XO6^&N+=qBOz(ZiUDwADWWKNSz zn)Z)BEwc6$Q^I9iZJNo45kp*H&k*9;h5b?6PR)knQJLhRtm1Z|X6fDD!J%cJj;C)R zFxJt&^efAP5yOe0Y3B)=dJZS%ysHZjnlavLuR)9B&d<1u*rWF!y@@ERv7c(83wqtn zjo@oDhiL2j$I>qc8{)Z|J>Y?M{z#$Ls5%pFA!F9*rj zGtk6VTby_YQ<(rD=i|s9S8EKzj zU$AVgr*3sfTZX@DqOx*^#_@V^^#6D=(vpFM?S!S9CIg96=**V|++C{Yx8nz6Pe6

                                NV0_{Nr*NgZop>&!ME^! z3j}_2PjA{r-S`2H(@(%=NA;h1-DICNUDYRBt#-Rqil&| zbNrw;LV1o#*O&f;1X7d8^r-jb_e`j>YqO8aT%j5fv%Ppc=8k9nNs@BUp9e$yrtZ78@R@mQbHH+^t||3!J-;-y9nO zgHzSL#PNqXu*L=&BWxCjB&FU0M&)GG8g8DZRT<+{iy--?a&1sYDb($(A|i?LuJj&e zI@IV35dWAi6yIqcGtdITEdTdHezL~CpYa!?zoIGyjbzy_&#}k@+;EQ)BlkhHPZEPR zB+gce=tHcr?~=`pD$c*R@b@OZypQ)&bADuuXPklrwpfD%QfnHoz9>ra{kkulAF_5t zQRj(cmEDKSEV!N&4mbG<-bZ&*o6jHv_&RT88GHGIdgjuD31^BV1vDhvib#Kz8|Zj3}y;yEl1>6lOJ z@iV&hZ;Y398<$3_Xn@3v6m17uGYblS5)$+| z9xEDb&7m;dF6rs9abkm3UG{hb%5jA+??anbxXH85rSNh9ieKMXW?n?a6*KcQOAz=N z44~udz1lU+E0DsZUUeca4ueV4;Bb<9OrbFgj(hDywS@kovM4ZQpJt}8#=eFjh14GA zzp7u}bNi?3EPb)XGP5P!kdE&PCmulxp9TJHjKfJ?4S|s%B>J1D2%Gr+3y&66)MnvW-49nT`2yaY zzT{h5z3vG^53c*U8;B_8;J9X!z)ElzL!az;H7N0Q0wYlg0>f$RNrGca#=IhoxFwkd za}=+!er2#;3MkKiG*dEwW)_2l)7I?)2Ou7bs&WJ5(PVF#QD_DiApUNxvDcZ(o7Sf5 z&v4+)E9aO>(&2?Q5 zto23p3bToMG}E`S_#c`!sj$MG{TBU+Uy(=fAFwEN|h zK80Y|_Z(@xzo4ISn^za^suoMU2a7y=A*Qm?B4u*`Be*mKO?8{X{!2pMg7Z5Y6g&-n zR|B&shC-{{H7(d@9*_$d_QPe?kYF+2m?3t5<`<7}?kTi}KTOFPGgkVUvW8;eFITgI zWsCT{dD6N~Jr-Pw!bC3`36jL>89j!8x$tUr%gal0}jQ_|VEJbR-vfQ?*FFb2JIi zp)v8Gp@~$+=RM ztH@C50lrjtSxYO?>Thp*bf#I^j6i3Fps-)xsABt2f^k~6R+R{2R6n1jU-|(ujtS5q zHudgy*XGK9bZ*}Raew#Bxi24p^+BB+K@+3^IPBbafFa)B>b*Mi{FT)QNaTPoKMf%D zd&df=^Lb_^%vQPPX%zE8nkSH0V*gkaRJC9o>F~q6@^cnMgsGt*iy0)0S4tU5y=n%r z_M8C37(md&veT_bZ{~Acj0K*yTpan1`-i8_{rkChhm8`DKvUG$V2P3&t3c2I^G7tb zfD68R=G^xIslT)|pOMR7!Wt#ux>3q=E0*;Jt6>&?DVkh?F~Vi<7d1Fh4+wJv9sMxX z@m*1K^G@w4`l$zylOKl7oWt*yckPb78hT~;;NhTt!&@cCnj@Ne+dAB2s!D^XRaBI^ zk7LjAyMG1V_~KF>s5cEE^A}FdbR~3%egsoKm*D;6Y+XjM(cKGS(hyBV3i~a?7E|B4 z02r;=(~$w3C@*?lfHr+unTHN2un6fjF{=Dny3?#J8=hCbJ-7Tl-OuV2@3xBdtHPPm zVY2{x6x59S>|esvtdTMli!KHez8UamTaCq!T#_zp^NJCoA;OXYjcxu(w;T5}@mcyl zH7oFnT5*=#X_DPGh?MD@)f%!p%9$rY*J-~SZBm&Yua{ZA3)Atb+AL#Y6fH7nBoH6= zf^2d~8nJ*8O?<8ZM2nMkp&98sUf+UBzPEK_?SD!FiVjY%*mX=z4K9wpR(4a*>-j>) z23xFqg0wkJwI{KOVLZrJw(&%n3+?+*p$r$RqK)gz9`0e zLy+EVK4h}JM!Ae3qTvnruXkvYT@{G2r)Gz<4v*N5_lPLrb&5}eD0cynnSFWF-4bm`W$40#cIb|t5|}% zUxJ9gyi~^<3PJP*u*z#inKW(4PSBWJsY0D1K@#x>Nk>cYrHm^rUKGSfH!V!hKExLd z_qt?rnDm{VTh7abMN&#kqATkqR#u}tZM_vvocrZvbiV_fe9Q8SEM4|17`n-nX9Pdy zXb-W|tFcQZsU|RHp}}3j1kP(#!6@C}We2}=7ug}EP7BN-%CO_-sBR2Bb;=<*aI=mDk`pU#Eg(WqQ*)?dj1MH0GFA@f!B` zX4_?p*N_f*$xiZ@fN}ry6>)cK?Hs?g(QK2XEv!-;%rUT)cI-98^)i*X-o>Ajf}vdz zeY2Vm7WyiLt3$k>f|9`y8)&{<0|V9I{xM@bQ2-;HH3#xv> z6-D^cBpTHUGVOC9=o0|$tI6u%M-*l5bf+vbxoc^&H;>OgMQ8V{{Ex`L3!Er6htNfD zjxhB*PMOz*o3&gfHR`o$sm z!F8#7Fil~kqC&JXnW|lFQGk12R3)OFdP-#}DwoJo8C{C0vm$6*mz_6lOd+nURF9>3 zpOkwn2Kn8DEMn^YjJSm>cGfGN?EAZMd-lu_qdLJsc6Y@TZOV!3wy z_pv48u()Fw^*7G^3$;O4)b_}%BiPP=nFBbkZ7u;PZ-JOcy-ugi%n5QYE5in#TKJtx z?|ppZ@4t`Vq)wEL@}>TPXdQ4%p%LR~4>?7P%`kQJd2P+9%!**hTeZu}=x<}cadka& zw`aAxQ}4N}AzA}Mk=N8jMLLR5!c_D;PN;GU4T=>GDrEwl?wf0`lWCUl@SPQ+#{5cc zm5kPE5xmhC7OE995o_dncNd%gVJE2_Pg%D^j+dexWFM>q22C4$LajE!*KnYMxxo5^ z&b3%)PYN}1q6_Rd6&Rr1qJtXPU~psQxVNzrhN~qOdwC}A`HCn`+-`i*MC)OvJw%SF zvKO$ZEW+fzzhzbd*Hwc$**#qF80&xSxp%+Ky5LOBq_JG(HU#zqR+W1NN;>Nn0jZ<) z2gUpm%mpg7jT#=?=$2QwN8O2BIZnC-7G2F&vLm7j!iKJz8XGpxUul(IKxf0Y;Ei)887N3RXJb*o5h#(UF(?94BYdq8FUWwA3#lo@0T+Ms%DeZR4maj3W#1$}jCBavL4zU+rH`5+`5-Ilg~WijdV#}~Rmc-v zUI*cv*-o?#ZV4D{5~O!pJ2=dcxK;;uA75@a7O!!=bk?Y!N=-Hx?RP3YHWe_%f7vMw zy7yr^?_<%cf#YqG`U`nZva)W|oGtWT6SeezoGn)POF`X$dP4ORqmX?z0D*`C!VF#B znKl0N`&EUw*l0=F%Co)Gkyg&tuwC616J!mJA5tqQ0W8MS^?lgr1BAJzHWg0AP36<9qOn}(lRxQHd43d zz}BGA3$Xa+RWDWeBx1@@Bv3eAYS3Nb|e58UULeoQvnYQ4^-VA+_KblKsRi0@a1F zVUoizeZy1a%NMa4>B8Rk2S(&x7pdv&o~*l+@$FW9vfb;%#(Aq{vtZgWn1)o=0{YAz zhZQx}(@ZGbE%A6zbBY!jqR6!tT87YyE?A?~mpU(2ggysDejN<{a5FXSvhFNWUqI)J z6z_|*`m}*_nYu(f#l-#B7Es)^+Vw4MjK*YJQcWdi&lZyv^Jrhp2A@MkKD4KRs5i{c zo;+{z>1A}kaof3ngR|Onxj+=t}~`~f69v|HGE1#@v)(1Bn#k*ZI4FXPiZ2#JSn^f&4{4b}Q}2SV!^b9AlB z6}u}!3fxx zr1+je*%8xc0IVC>C&%tyhs(d$Op~BeF!UDOS<4Yz5WeLE>d+;vz1Ys(6@<3fn+mBM z5Ct8_b(Nfy{14}BTxx-PtNQ53Ol^&|)@c%wpV&F=Bu#bPtkj{kGL->2#r5-tgvH`M z*oe*+6tg;yJI%{KGWYJIOJ(vru=Zz<=j4Yc=(%eSXtLu2><{kNUIESECODo4s$Cl0 z>)l1ag)o8*asuTsIs~m^ZDmZH>Ma~0{-g%vk2|y11cI#Y7j14RWs1-p-ng3j>+V^v z#1XF2BX=@AEyDi(ZyrOU2od{nFo>#ky0Mo*A0MG3`V8x4f3QWGC-e zhdEfEFr&&aCSsNea#9^h%2})KnjmhY&31Rix!^{tOLX+9>Wpi;RF{xX*TlnMz;KmI zPi&KuyFQyi!pq~lPM*tc3}{?%)S{PEgKyG947%At^01;lNRoXAj&ZY&rtFR9Urm8vm$*es+V{`=x^~iqqNIgP)-@23yW`uxb z=o9rKg)6`94KQ$wJrzB~LXk2tRyJDvwCmuC-hrs8U9{1X zB411@`SG`5oM4hMt!{C{XZ_$FfdMVs+z=9qVp8jrv`u330;;w@fq9 zm%V$nwjSMi%r=ir?S*NcCLuAe#nH;1{%|JcrIn$yO-`+dX*`~5E%+D=zEd*KvxJ>i zk9MZg1~DjHJS}1^Z5_yvR29n4Co&zsLaO%wBD;*&qssryWKj1S0XEX;Js2;SLa=;N80*$|U;@t1T_CLADI11HSfCK5Nifk`Q z^(7TD-aFeM9ggV{mC9-Y$v4!s6;LJF;4)|dLX#jRlFZ!LW@TqU!e`L^g1uX(uILRE z)s_&fc;l0HtP%dw9Ft{KOyiGlH0}{N&<;9{ng>2$rt35(2yF4-V^#dC?+)=Vsu?W1 zBIF%X!DCkW@ljZ2TRo80B_MNp_`pd4Mh#&XebVQy=oO6ZE;qHsz29jc={#`G5q6=j z6wl0#?lFzp!iT`N**zxG-69gKTAv2KlJ;6^ak~jA6C~Qb8~k_q?8nP<_s{j6T3s8G ztFLZbx%bYd{(a>A$F!6dOyB@w@ZK7Wa35W-QXeZmIl(hE-g$mcb~-*Wt9n+M{NRH% zQ_8`-va2>FWrbrEZ^IPRnbgpNAIOCg9y$#EpI~aY_m}=Y&MZGN8~*x!RsGqA=W%o1 zhech;Z}U=+Kb}kfwI`e5Pfj}dg@!63?h%Cv$GASMeTSscBY&s|?W)T>B*u$1PSZq6 z8znu~6M$I2EciK}@)JZHq~C6i&j($Mckp=$YHIVTJ!|z?>dI+@Y7f^fY>t8sIKtVU zvHAs59dM9}DLA&;3~@llDac6Afc+oAG%rpEe|FUP5AMwT*B@+sb+?jR!N(;+^$%Dn fw*l_+{}*5Yv9#IFDZ($700000NkvXXu0mjf)o>)r literal 0 HcmV?d00001 diff --git a/vendor/tornado/website/templates/base.html b/vendor/tornado/website/templates/base.html new file mode 100644 index 000000000000..48da01678105 --- /dev/null +++ b/vendor/tornado/website/templates/base.html @@ -0,0 +1,27 @@ + + + + + {% block title %}Tornado Web Server{% end %} + + {% block head %}{% end %} + + +

                                + {% block bottom %}{% end %} + + diff --git a/vendor/tornado/website/templates/documentation.html b/vendor/tornado/website/templates/documentation.html new file mode 100644 index 000000000000..8c28740874ec --- /dev/null +++ b/vendor/tornado/website/templates/documentation.html @@ -0,0 +1,9 @@ +{% extends "base.html" %} + +{% block title %}Tornado Web Server Documentation{% end %} + +{% block headertitle %}

                                documentation

                                {% end %} + +{% block body %} + {{ markdown("documentation.txt", toc=True) }} +{% end %} diff --git a/vendor/tornado/website/templates/documentation.txt b/vendor/tornado/website/templates/documentation.txt new file mode 100644 index 000000000000..81262a605a09 --- /dev/null +++ b/vendor/tornado/website/templates/documentation.txt @@ -0,0 +1,866 @@ +Overview +-------- +[FriendFeed](http://friendfeed.com/)'s web server is a relatively simple, +non-blocking web server written in Python. The FriendFeed application is +written using a web framework that looks a bit like +[web.py](http://webpy.org/) or Google's +[webapp](http://code.google.com/appengine/docs/python/tools/webapp/), +but with additional tools and optimizations to take advantage of the +non-blocking web server and tools. + +[Tornado](http://github.com/facebook/tornado) is an open source +version of this web server and some of the tools we use most often at +FriendFeed. The framework is distinct from most mainstream web server +frameworks (and certainly most Python frameworks) because it is +non-blocking and reasonably fast. Because it is non-blocking +and uses [epoll](http://www.kernel.org/doc/man-pages/online/pages/man4/epoll.4.html), it can handle 1000s of simultaneous standing connections, +which means the framework is ideal for real-time web services. We built the +web server specifically to handle FriendFeed's real-time features — +every active user of FriendFeed maintains an open connection to the +FriendFeed servers. (For more information on scaling servers to support +thousands of clients, see +[The C10K problem](http://www.kegel.com/c10k.html).) + +Here is the canonical "Hello, world" example app: + + import tornado.httpserver + import tornado.ioloop + import tornado.web + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write("Hello, world") + + application = tornado.web.Application([ + (r"/", MainHandler), + ]) + + if __name__ == "__main__": + http_server = tornado.httpserver.HTTPServer(application) + http_server.listen(8888) + tornado.ioloop.IOLoop.instance().start() + +See [Tornado walkthrough](#tornado-walkthrough) below for a detailed +walkthrough of the `tornado.web` package. + +We attempted to clean up the code base to reduce interdependencies between +modules, so you should (theoretically) be able to use any of the modules +independently in your project without using the whole package. + + +Download +-------- +Download the most recent version of Tornado from GitHub: + +> [tornado-0.2.tar.gz](/static/tornado-0.2.tar.gz) + +You can also [browse the source](http://github.com/facebook/tornado) on GitHub. To install Tornado: + + tar xvzf tornado-0.2.tar.gz + cd tornado-0.2 + python setup.py build + sudo python setup.py install + +After installation, you should be able to run any of the demos in the `demos` +directory included with the Tornado package. + + ./demos/helloworld/helloworld.py + +### Prerequisites + +Tornado has been tested on Python 2.5 and 2.6. To use all of the features of Tornado, you need to have [PycURL](http://pycurl.sourceforge.net/) and a JSON library like [simplejson](http://pypi.python.org/pypi/simplejson/) installed. Complete installation instructions for Mac OS X and Ubuntu are included below for convenience. + +**Mac OS X 10.5/10.6** + + sudo easy_install setuptools pycurl==7.16.2.1 simplejson + +**Ubuntu Linux** + + sudo apt-get install python-dev python-pycurl python-simplejson + + +Module index +------------ +The most important module is [`web`](http://github.com/facebook/tornado/blob/master/tornado/web.py), which is the web framework +that includes most of the meat of the Tornado package. The other modules +are tools that make `web` more useful. See +[Tornado walkthrough](#tornado-walkthrough) below for a detailed +walkthrough of the `web` package. + +### Main modules + * [`web`](http://github.com/facebook/tornado/blob/master/tornado/web.py) - The web framework on which FriendFeed is built. `web` incorporates most of the important features of Tornado + * [`escape`](http://github.com/facebook/tornado/blob/master/tornado/escape.py) - XHTML, JSON, and URL encoding/decoding methods + * [`database`](http://github.com/facebook/tornado/blob/master/tornado/database.py) - A simple wrapper around `MySQLdb` to make MySQL easier to use + * [`template`](http://github.com/facebook/tornado/blob/master/tornado/template.py) - A Python-based web templating language + * [`httpclient`](http://github.com/facebook/tornado/blob/master/tornado/httpclient.py) - A non-blocking HTTP client designed to work with `web` and `httpserver` + * [`auth`](http://github.com/facebook/tornado/blob/master/tornado/auth.py) - Implementation of third party authentication and authorization schemes (Google OpenID/OAuth, Facebook Platform, Yahoo BBAuth, FriendFeed OpenID/OAuth, Twitter OAuth) + * [`locale`](http://github.com/facebook/tornado/blob/master/tornado/locale.py) - Localization/translation support + * [`options`](http://github.com/facebook/tornado/blob/master/tornado/options.py) - Command line and config file parsing, optimized for server environments + +### Low-level modules + * [`httpserver`](http://github.com/facebook/tornado/blob/master/tornado/httpserver.py) - A very simple HTTP server built on which `web` is built + * [`iostream`](http://github.com/facebook/tornado/blob/master/tornado/iostream.py) - A simple wrapper around non-blocking sockets to aide common reading and writing patterns + * [`ioloop`](http://github.com/facebook/tornado/blob/master/tornado/ioloop.py) - Core I/O loop + +### Random modules + * [`s3server`](http://github.com/facebook/tornado/blob/master/tornado/s3server.py) - A web server that implements most of the [Amazon S3](http://aws.amazon.com/s3/) interface, backed by local file storage + + +Tornado walkthrough +------------------- + +### Request handlers and request arguments + +A Tornado web application maps URLs or URL patterns to subclasses of +`tornado.web.RequestHandler`. Those classes define `get()` or `post()` +methods to handle HTTP `GET` or `POST` requests to that URL. + +This code maps the root URL `/` to `MainHandler` and the URL pattern +`/story/([0-9]+)` to `StoryHandler`. Regular expression groups are passed +as arguments to the `RequestHandler` methods: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write("You requested the main page") + + class StoryHandler(tornado.web.RequestHandler): + def get(self, story_id): + self.write("You requested the story " + story_id) + + application = tornado.web.Application([ + (r"/", MainHandler), + (r"/story/([0-9]+)", StoryHandler), + ]) + +You can get query string arguments and parse `POST` bodies with the +`get_argument()` method: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write('
                                ' + '' + '' + '
                                ') + + def post(self): + self.set_header("Content-Type", "text/plain") + self.write("You wrote " + self.get_argument("message")) + +If you want to send an error response to the client, e.g., 403 Unauthorized, +you can just raise a `tornado.web.HTTPError` exception: + + if not self.user_is_logged_in(): + raise tornado.web.HTTPError(403) + +The request handler can access the object representing the current request +with `self.request`. The `HTTPRequest` object includes a number of useful +attribute, including: + + * `arguments` - all of the `GET` and `POST` arguments + * `files` - all of the uploaded files (via `multipart/form-data` POST requests) + * `path` - the request path (everything before the `?`) + * `headers` - the request headers + +See the class definition for `HTTPRequest` in `httpserver` for a complete list +of attributes. + + +### Templates + +You can use any template language supported by Python, but Tornado ships +with its own templating language that is a lot faster and more flexible +than many of the most popular templating systems out there. See the +[`template`](http://github.com/facebook/tornado/blob/master/tornado/template.py) module documentation for complete documentation. + +A Tornado template is just HTML (or any other text-based format) with +Python control sequences and expressions embedded within the markup: + + + + {{ title }} + + +
                                  + {% for item in items %} +
                                • {{ escape(item) }}
                                • + {% end %} +
                                + + + +If you saved this template as "template.html" and put it in the same +directory as your Python file, you could render this template with: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + items = ["Item 1", "Item 2", "Item 3"] + self.render("template.html", title="My title", items=items) + +Tornado templates support *control statements* and *expressions*. Control +statements are surronded by `{%` and `%}`, e.g., `{% if len(items) > 2 %}`. +Expressions are surrounded by `{{` and `}}`, e.g., `{{ items[0] }}`. + +Control statements more or less map exactly to Python statements. We support +`if`, `for`, `while`, and `try`, all of which are terminated with `{% end %}`. +We also support *template inheritance* using the `extends` and `block` +statements, which are described in detail in the documentation for the +[`template` module](http://github.com/facebook/tornado/blob/master/tornado/template.py). + +Expressions can be any Python expression, including function calls. We +support the functions `escape`, `url_escape`, and `json_encode` by default, +and you can pass other functions into the template simply by passing them +as keyword arguments to the template render function: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.render("template.html", add=self.add) + + def add(self, x, y): + return x + y + +When you are building a real application, you are going to want to use +all of the features of Tornado templates, especially template inheritance. +Read all about those features in the [`template` module](http://github.com/facebook/tornado/blob/master/tornado/template.py) +section. + +Under the hood, Tornado templates are translated directly to Python. +The expressions you include in your template are copied verbatim into +a Python function representing your template. We don't try to prevent +anything in the template language; we created it explicitly to provide +the flexibility that other, stricter templating systems prevent. +Consequently, if you write random stuff inside of your template expressions, +you will get random Python errors when you execute the template. + + +### Cookies and secure cookies + +You can set cookies in the user's browser with the `set_cookie` method: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + if not self.get_cookie("mycookie"): + self.set_cookie("mycookie", "myvalue") + self.write("Your cookie was not set yet!") + else: + self.write("Your cookie was set!") + +Cookies are easily forged by malicious clients. If you need to set cookies +to, e.g., save the user ID of the currently logged in user, you need to +sign your cookies to prevent forgery. Tornado supports this out of the +box with the `set_secure_cookie` and `get_secure_cookie` methods. To use +these methods, you need to specify a secret key named `cookie_secret` when +you create your application. You can pass in application settings as keyword +arguments to your application: + + application = tornado.web.Application([ + (r"/", MainHandler), + ], cookie_secret="61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=") + +Signed cookies contain the encoded value of the cookie in addition to a +timestamp and an [HMAC](http://en.wikipedia.org/wiki/HMAC) signature. If the +cookie is old or if the signature doesn't match, `get_secure_cookie` will +return `None` just as if the cookie isn't set. The secure version of the +example above: + + class MainHandler(tornado.web.RequestHandler): + def get(self): + if not self.get_secure_cookie("mycookie"): + self.set_secure_cookie("mycookie", "myvalue") + self.write("Your cookie was not set yet!") + else: + self.write("Your cookie was set!") + + +### User authentication + +The currently authenticated user is available in every request handler +as `self.current_user`, and in every template as `current_user`. By +default, `current_user` is `None`. + +To implement user authentication in your application, you need to +override the `get_current_user()` method in your request handlers to +determine the current user based on, e.g., the value of a cookie. +Here is an example that lets users log into the application simply +by specifying a nickname, which is then saved in a cookie: + + class BaseHandler(tornado.web.RequestHandler): + def get_current_user(self): + return self.get_secure_cookie("user") + + class MainHandler(BaseHandler): + def get(self): + if not self.current_user: + self.redirect("/login") + return + name = tornado.escape.xhtml_escape(self.current_user) + self.write("Hello, " + name) + + class LoginHandler(BaseHandler): + def get(self): + self.write('
                                ' + 'Name: ' + '' + '
                                ') + + def post(self): + self.set_secure_cookie("user", self.get_argument("name")) + self.redirect("/") + + application = tornado.web.Application([ + (r"/", MainHandler), + (r"/login", LoginHandler), + ], cookie_secret="61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=") + +You can require that the user be logged in using the +[Python decorator](http://www.python.org/dev/peps/pep-0318/) +`tornado.web.authenticated`. If a request goes to a method with this +decorator, and the user is not logged in, they will be redirected to +`login_url` (another application setting). The example above could +be rewritten: + + class MainHandler(BaseHandler): + @tornado.web.authenticated + def get(self): + name = tornado.escape.xhtml_escape(self.current_user) + self.write("Hello, " + name) + + settings = { + "cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + "login_url": "/login", + } + application = tornado.web.Application([ + (r"/", MainHandler), + (r"/login", LoginHandler), + ], **settings) + +If you decorate `post()` methods with the `authenticated` decorator, and +the user is not logged in, the server will send a `403` response. + +Tornado comes with built-in support for third-party authentication schemes +like Google OAuth. See the [`auth` module](http://github.com/facebook/tornado/blob/master/tornado/auth.py) for more details. Check +out the Tornado Blog example application for a complete example that +uses authentication (and stores user data in a MySQL database). + + +### Cross-site request forgery protection + +[Cross-site request forgery](http://en.wikipedia.org/wiki/Cross-site_request_forgery), or XSRF, is a common problem for personalized web applications. See the +[Wikipedia article](http://en.wikipedia.org/wiki/Cross-site_request_forgery) +for more information on how XSRF works. + +The generally accepted solution to prevent XSRF is to cookie every user +with an unpredictable value and include that value as an additional +argument with every form submission on your site. If the cookie and the +value in the form submission do not match, then the request is likely +forged. + +Tornado comes with built-in XSRF protection. To include it in your site, +include the application setting `xsrf_cookies`: + + settings = { + "cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + "login_url": "/login", + "xsrf_cookies": True, + } + application = tornado.web.Application([ + (r"/", MainHandler), + (r"/login", LoginHandler), + ], **settings) + +If `xsrf_cookies` is set, the Tornado web application will set the `_xsrf` +cookie for all users and reject all `POST` requests hat do not contain a +correct `_xsrf` value. If you turn this setting on, you need to instrument +all forms that submit via `POST` to contain this field. You can do this with +the special function `xsrf_form_html()`, available in all templates: + +
                                + {{ xsrf_form_html() }} +
                                Username:
                                +
                                Password:
                                +
                                +
                                + +If you submit AJAX `POST` requests, you will also need to instrument your +JavaScript to include the `_xsrf` value with each request. This is the +[jQuery](http://jquery.com/) function we use at FriendFeed for AJAX `POST` +requests that automatically adds the `_xsrf` value to all requests: + + function getCookie(name) { + var r = document.cookie.match("\\b" + name + "=([^;]*)\\b"); + return r ? r[1] : undefined; + } + + jQuery.postJSON = function(url, args, callback) { + args._xsrf = getCookie("_xsrf"); + $.ajax({url: url, data: $.param(args), dataType: "text", type: "POST", + success: function(response) { + callback(eval("(" + response + ")")); + }}); + }; + + +### Static files and aggressive file caching + +You can serve static files from Tornado by specifying the `static_path` +setting in your application: + + settings = { + "static_path": os.path.join(os.path.dirname(__file__), "static"), + "cookie_secret": "61oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", + "login_url": "/login", + "xsrf_cookies": True, + } + application = tornado.web.Application([ + (r"/", MainHandler), + (r"/login", LoginHandler), + ], **settings) + +This setting will automatically make all requests that start with `/static/` +serve from that static directory, e.g., [http://localhost:8888/static/foo.png](http://localhost:8888/static/foo.png) +will serve the file `foo.png` from the specified static directory. We +also automatically serve `/robots.txt` and `/favicon.ico` from the static +directory (even though they don't start with the `/static/` prefix). + +To improve performance, it is generally a good idea for browsers to +cache static resources aggressively so browsers won't send unnecessary +`If-Modified-Since` or `Etag` requests that might block the rendering of +the page. Tornado supports this out of the box with *static content +versioning*. + +To use this feature, use the `static_url()` method in your templates rather +than typing the URL of the static file directly in your HTML: + + + + FriendFeed - {{ _("Home") }} + + +
                                + + + +The `static_url()` function will translate that relative path to a URI +that looks like `/static/images/logo.png?v=aae54`. The `v` argument is +a hash of the content in `logo.png`, and its presence makes the Tornado +server send cache headers to the user's browser that will make the browser +cache the content indefinitely. + +Since the `v` argument is based on the content of the file, if you update +a file and restart your server, it will start sending a new `v` value, +so the user's browser will automatically fetch the new file. If the file's +contents don't change, the browser will continue to use a locally cached +copy without ever checking for updates on the server, significantly +improving rendering performance. + +In production, you probably want to serve static files from a more +optimized static file server like [nginx](http://nginx.net/). You can +configure most any web server to support these caching semantics. Here +is the nginx configuration we use at FriendFeed: + + location /static/ { + root /var/friendfeed/static; + if ($query_string) { + expires max; + } + } + + +### Localization + +The locale of the current user (whether they are logged in or not) is +always available as `self.locale` in the request handler and as `locale` +in templates. The name of the locale (e.g., `en_US`) is available as +`locale.name`, and you can translate strings with the `locale.translate` +method. Templates also have the global function call `_()` available +for string translation. The translate function has two forms: + + _("Translate this string") + +which translates the string directly based on the current locale, and + + _("A person liked this", "%(num)d people liked this", len(people)) % {"num": len(people)} + +which translates a string that can be singular or plural based on the value +of the third argument. In the example above, a translation of the first +string will be returned if `len(people)` is `1`, or a translation of the +second string will be returned otherwise. + +The most common pattern for translations is to use Python named placeholders +for variables (the `%(num)d` in the example above) since placeholders can +move around on translation. + +Here is a properly localized template: + + + + FriendFeed - {{ _("Sign in") }} + + +
                                +
                                {{ _("Username") }}
                                +
                                {{ _("Password") }}
                                +
                                + {{ xsrf_form_html() }} +
                                + + + +By default, we detect the user's locale using the `Accept-Language` header +sent by the user's browser. We choose `en_US` if we can't find an appropriate +`Accept-Language` value. If you let user's set their locale as a preference, +you can override this default locale selection by overriding `get_user_locale` +in your request handler: + + class BaseHandler(tornado.web.RequestHandler): + def get_current_user(self): + user_id = self.get_secure_cookie("user") + if not user_id: return None + return self.backend.get_user_by_id(user_id) + + def get_user_locale(self): + if "locale" not in self.current_user.prefs: + # Use the Accept-Language header + return None + return self.current_user.prefs["locale"] + +If `get_user_locale` returns `None`, we fall back on the `Accept-Language` +header. + +You can load all the translations for your application using the +`tornado.locale.load_translations` method. It takes in the name of the +directory which should contain CSV files named after the locales whose +translations they contain, e.g., `es_GT.csv` or `fr_CA.csv`. The method +loads all the translations from those CSV files and infers the list of +supported locales based on the presence of each CSV file. You typically +call this method once in the `main()` method of your server: + + def main(): + tornado.locale.load_translations( + os.path.join(os.path.dirname(__file__), "translations")) + start_server() + +You can get the list of supported locales in your application with +`tornado.locale.get_supported_locales()`. The user's locale is chosen to +be the closest match based on the supported locales. For example, if the +user's locale is `es_GT`, and the `es` locale is supported, `self.locale` +will be `es` for that request. We fall back on `en_US` if no close match +can be found. + +See the [`locale` module](http://github.com/facebook/tornado/blob/master/tornado/locale.py) documentation for detailed information +on the CSV format and other localization methods. + + +### UI modules + +Tornado supports *UI modules* to make it easy to support standard, reusable +UI widgets across your application. UI modules are like special functional +calls to render components of your page, and they can come packaged with +their own CSS and JavaScript. + +For example, if you are implementing a blog, and you want to have +blog entries appear on both the blog home page and on each blog entry page, +you can make an `Entry` module to render them on both pages. First, create +a Python module for your UI modules, e.g., `uimodules.py`: + + class Entry(tornado.web.UIModule): + def render(self, entry, show_comments=False): + return self.render_string( + "module-entry.html", show_comments=show_comments) + +Tell Tornado to use `uimodules.py` using the `ui_modules` setting in your +application: + + class HomeHandler(tornado.web.RequestHandler): + def get(self): + entries = self.db.query("SELECT * FROM entries ORDER BY date DESC") + self.render("home.html", entries=entries) + + class EntryHandler(tornado.web.RequestHandler): + def get(self, entry_id): + entry = self.db.get("SELECT * FROM entries WHERE id = %s", entry_id) + if not entry: raise tornado.web.HTTPError(404) + self.render("entry.html", entry=entry) + + settings = { + "ui_modules": uimodules, + } + application = tornado.web.Application([ + (r"/", HomeHandler), + (r"/entry/([0-9]+)", EntryHandler), + ], **settings) + +Within `home.html`, you reference the `Entry` module rather than printing +the HTML directly: + + {% for entry in entries %} + {{ modules.Entry(entry) }} + {% end %} + +Within `entry.html`, you reference the `Entry` module with the +`show_comments` argument to show the expanded form of the entry: + + {{ modules.Entry(entry, show_comments=True) }} + +Modules can include custom CSS and JavaScript functions by overriding +the `embedded_css`, `embedded_javascript`, `javascript_files`, or +`css_files` methods: + + class Entry(tornado.web.UIModule): + def embedded_css(self): + return ".entry { margin-bottom: 1em; }" + + def render(self, entry, show_comments=False): + return self.render_string( + "module-entry.html", show_comments=show_comments) + +Module CSS and JavaScript will be included once no matter how many times +a module is used on a page. CSS is always included in the `` of the +page, and JavaScript is always included just before the `` tag +at the end of the page. + + +### Non-blocking, asynchronous requests + +When a request handler is executed, the request is automatically finished. +Since Tornado uses a non-blocking I/O style, you can override this default +behavior if you want a request to remain open after the main request handler +method returns using the `tornado.web.asynchronous` decorator. + +When you use this decorator, it is your responsibility to call +`self.finish()` to finish the HTTP request, or the user's browser +will simply hang: + + class MainHandler(tornado.web.RequestHandler): + @tornado.web.asynchronous + def get(self): + self.write("Hello, world") + self.finish() + +Here is a real example that makes a call to the FriendFeed API using +Tornado's built-in asynchronous HTTP client: + + class MainHandler(tornado.web.RequestHandler): + @tornado.web.asynchronous + def get(self): + http = tornado.httpclient.AsyncHTTPClient() + http.fetch("http://friendfeed-api.com/v2/feed/bret", + callback=self.async_callback(self.on_response)) + + def on_response(self, response): + if response.error: raise tornado.web.HTTPError(500) + json = tornado.escape.json_decode(response.body) + self.write("Fetched " + str(len(json["entries"])) + " entries " + "from the FriendFeed API") + self.finish() + +When `get()` returns, the request has not finished. When the HTTP client +eventually calls `on_response()`, the request is still open, and the response +is finally flushed to the client with the call to `self.finish()`. + +If you make calls to asynchronous library functions that require a callback +(like the HTTP `fetch` function above), you should always wrap your +callbacks with `self.async_callback`. This simple wrapper ensures that if +your callback function raises an exception or has a programming error, +a proper HTTP error response will be sent to the browser, and the connection +will be properly closed. + +For a more advanced asynchronous example, take a look at the `chat` example +application, which implements an AJAX chat room using +[long polling](http://en.wikipedia.org/wiki/Push_technology#Long_polling). + + +### Third party authentication + +Tornado's `auth` module implements the authentication and authorization +protocols for a number of the most popular sites on the web, including +Google/Gmail, Facebook, Twitter, Yahoo, and FriendFeed. The module includes +methods to log users in via these sites and, where applicable, methods to +authorize access to the service so you can, e.g., download a user's address +book or publish a Twitter message on their behalf. + +Here is an example handler that uses Google for authentication, saving +the Google credentials in a cookie for later access: + + class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin): + @tornado.web.asynchronous + def get(self): + if self.get_argument("openid.mode", None): + self.get_authenticated_user(self.async_callback(self._on_auth)) + return + self.authenticate_redirect() + + def _on_auth(self, user): + if not user: + self.authenticate_redirect() + return + # Save the user with, e.g., set_secure_cookie() + +See the `auth` module documentation for more details. + + +Performance +----------- +Web application performance is generally bound by architecture, not frontend +performance. That said, Tornado is pretty fast relative to most popular +Python web frameworks. + +We ran a few remedial load tests on a simple "Hello, world" application +in each of the most popular Python web frameworks +([Django](http://www.djangoproject.com/), [web.py](http://webpy.org/), and +[CherryPy](http://www.cherrypy.org/)) to get the baseline performance of +each relative to Tornado. We used Apache/mod_wsgi for Django and web.py +and ran CherryPy as a standalone server, which was our impression of how +each framework is typically run in production environments. We ran 4 +single-threaded Tornado frontends behind an [nginx](http://nginx.net/) +reverse proxy, which is how we recommend running Tornado in production +(our load test machine had four cores, and we recommend 1 frontend per +core). + +We load tested each with Apache Benchmark (`ab`) on the a separate machine +with the command + + ab -n 100000 -c 25 http://10.0.1.x/ + +The results (requests per second) on a 2.4GHz AMD Opteron processor with +4 cores: + +
                                + +In our tests, Tornado consistently had 4X the throughput of the next fastest +framework, and even a single standalone Tornado frontend got 33% more +throughput even though it only used one of the four cores. + +Not very scientific, but at a high level, it should give you a sense that we +have cared about performance as we built Tornado, and it shouldn't add too +much latency to your apps relative to most Python web development frameworks. + + +Running Tornado in production +----------------------------- +At FriendFeed, we use [nginx](http://nginx.net/) as a load balancer +and static file server. We run multiple instances of the Tornado web +server on multiple frontend machines. We typically run one Tornado frontend +per core on the machine (sometimes more depending on utilization). + +This is a barebones nginx config file that is structurally similar to the +one we use at FriendFeed. It assumes nginx and the Tornado servers +are running on the same machine, and the four Tornado servers +are running on ports 8000 - 8003: + + user nginx; + worker_processes 1; + + error_log /var/log/nginx/error.log; + pid /var/run/nginx.pid; + + events { + worker_connections 1024; + use epoll; + } + + http { + # Enumerate all the Tornado servers here + upstream frontends { + server 127.0.0.1:8000; + server 127.0.0.1:8001; + server 127.0.0.1:8002; + server 127.0.0.1:8003; + } + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + access_log /var/log/nginx/access.log; + + keepalive_timeout 65; + proxy_read_timeout 200; + sendfile on; + tcp_nopush on; + tcp_nodelay on; + gzip on; + gzip_min_length 1000; + gzip_proxied any; + gzip_types text/plain text/html text/css text/xml + application/x-javascript application/xml + application/atom+xml text/javascript; + + # Only retry if there was a communication error, not a timeout + # on the Tornado server (to avoid propagating "queries of death" + # to all frontends) + proxy_next_upstream error; + + server { + listen 80; + + # Allow file uploads + client_max_body_size 50M; + + location ^~ /static/ { + root /var/www; + if ($query_string) { + expires max; + } + } + location = /favicon.ico { + rewrite (.*) /static/favicon.ico; + } + location = /robots.txt { + rewrite (.*) /static/robots.txt; + } + + location / { + proxy_pass_header Server; + proxy_set_header Host $http_host; + proxy_redirect false; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Scheme $scheme; + proxy_pass http://frontends; + } + } + } + + +WSGI and Google AppEngine +------------------------- +Tornado comes with limited support for [WSGI](http://wsgi.org/). However, +since WSGI does not support non-blocking requests, you cannot use any +of the asynchronous/non-blocking features of Tornado in your application +if you choose to use WSGI instead of Tornado's HTTP server. Some of the +features that are not available in WSGI applications: +`@tornado.web.asynchronous`, the `httpclient` module, and the `auth` module. + +You can create a valid WSGI application from your Tornado request handlers +by using `WSGIApplication` in the `wsgi` module instead of using +`tornado.web.Application`. Here is an example that uses the built-in WSGI +`CGIHandler` to make a valid +[Google AppEngine](http://code.google.com/appengine/) application: + + import tornado.web + import tornado.wsgi + import wsgiref.handlers + + class MainHandler(tornado.web.RequestHandler): + def get(self): + self.write("Hello, world") + + if __name__ == "__main__": + application = tornado.wsgi.WSGIApplication([ + (r"/", MainHandler), + ]) + wsgiref.handlers.CGIHandler().run(application) + +See the `appengine` example application for a full-featured AppEngine +app built on Tornado. + + +Caveats and support +------------------- +Tornado was refactored from the [FriendFeed](http://friendfeed.com/) +code base to reduce dependencies. This refactoring may have introduced +bugs. Likewise, because the FriendFeed servers have always run +[behind nginx](#running-tornado-in-production), Tornado has not been +extensively tested with HTTP/1.1 clients beyond Firefox. Tornado +currently does not attempt to handle multi-line headers and some types +of malformed input. + +You can discuss Tornado and report bugs on [the Tornado developer mailing list](http://groups.google.com/group/python-tornado). diff --git a/vendor/tornado/website/templates/index.html b/vendor/tornado/website/templates/index.html new file mode 100644 index 000000000000..4aa716598bed --- /dev/null +++ b/vendor/tornado/website/templates/index.html @@ -0,0 +1,51 @@ +{% extends "base.html" %} + +{% block body %} +

                                Tornado is an open source version of the scalable, non-blocking web server and tools that power FriendFeed. The FriendFeed application is written using a web framework that looks a bit like web.py or Google's webapp, but with additional tools and optimizations to take advantage of the underlying non-blocking infrastructure.

                                +

                                The framework is distinct from most mainstream web server frameworks (and certainly most Python frameworks) because it is non-blocking and reasonably fast. Because it is non-blocking and uses epoll, it can handle thousands of simultaneous standing connections, which means it is ideal for real-time web services. We built the web server specifically to handle FriendFeed's real-time features — every active user of FriendFeed maintains an open connection to the FriendFeed servers. (For more information on scaling servers to support thousands of clients, see The C10K problem.)

                                +

                                See the Tornado documentation for a detailed walkthrough of the framework.

                                + +

                                Download and install

                                +

                                Download: tornado-0.2.tar.gz

                                +
                                tar xvzf tornado-0.2.tar.gz
                                +cd tornado-0.2
                                +python setup.py build
                                +sudo python setup.py install
                                +

                                The Tornado source code is hosted on GitHub.

                                + +

                                Prerequisites

                                +

                                Tornado has been tested on Python 2.5 and 2.6. To use all of the features of Tornado, you need to have PycURL and a JSON library like simplejson installed. Complete installation instructions for Mac OS X and Ubuntu are included below for convenience.

                                +

                                Mac OS X 10.5/10.6

                                +
                                sudo easy_install setuptools pycurl==7.16.2.1 simplejson
                                + +

                                Ubuntu Linux

                                +
                                sudo apt-get install python-dev python-pycurl python-simplejson
                                + +

                                Hello, world

                                +

                                Here is the canonical "Hello, world" example app for Tornado:

                                +
                                import tornado.httpserver
                                +import tornado.ioloop
                                +import tornado.web
                                +
                                +class MainHandler(tornado.web.RequestHandler):
                                +    def get(self):
                                +        self.write("Hello, world")
                                +
                                +application = tornado.web.Application([
                                +    (r"/", MainHandler),
                                +])
                                +
                                +if __name__ == "__main__":
                                +    http_server = tornado.httpserver.HTTPServer(application)
                                +    http_server.listen(8888)
                                +    tornado.ioloop.IOLoop.instance().start()
                                +

                                See the Tornado documentation for a detailed walkthrough of the framework.

                                + +

                                Discussion and support

                                +

                                You can discuss Tornado and report bugs on the Tornado developer mailing list. + +

                                Updates

                                +

                                Follow us on Facebook, Twitter, or FriendFeed to get updates and announcements:

                                +
                                FacebookTwitterFacebook
                                + +{% end %} diff --git a/vendor/tornado/website/website.py b/vendor/tornado/website/website.py new file mode 100644 index 000000000000..f073b67e6b3c --- /dev/null +++ b/vendor/tornado/website/website.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# Copyright 2009 Bret Taylor +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import markdown +import os +import os.path +import time +import tornado.web +import tornado.wsgi +import wsgiref.handlers + + +class ContentHandler(tornado.web.RequestHandler): + def get(self, path): + paths = ("documentation", "index") + if not path: path = "index" + if path not in paths: + raise tornado.web.HTTPError(404) + self.render(path + ".html", markdown=self.markdown) + + def markdown(self, path, toc=False): + if not hasattr(ContentHandler, "_md") or self.settings.get("debug"): + ContentHandler._md = {} + if path not in ContentHandler._md: + full_path = os.path.join(self.settings["template_path"], path) + f = open(full_path, "r") + contents = f.read().decode("utf-8") + f.close() + if toc: contents = u"[TOC]\n\n" + contents + md = markdown.Markdown(extensions=["toc"] if toc else []) + ContentHandler._md[path] = md.convert(contents).encode("utf-8") + return ContentHandler._md[path] + + +settings = { + "template_path": os.path.join(os.path.dirname(__file__), "templates"), + "xsrf_cookies": True, + "debug": os.environ.get("SERVER_SOFTWARE", "").startswith("Development/"), +} +application = tornado.wsgi.WSGIApplication([ + (r"/([a-z]*)", ContentHandler), +], **settings) + + +def main(): + wsgiref.handlers.CGIHandler().run(application) + + +if __name__ == "__main__": + main()

                                _O&}LCiy^5YRuqM0peZwP7b4% zvuHBPF0+qW%jP;lW@Ufp;BaN@-R{a8;^0e+hl+jmGMj{2MWy^ZZx463w+93$?(dEw^zU-Me-{WkQ7qqPu64~kD zLy{XS{dV|V0xU$z6-3%q5RWHQ^hw8cO9C^7>RdgUB-j01Mch@)l;YzG2Vtv56M9(C zZtIsc%X0Xhb!mdI!HKC?>jyMV;uz9+Qm5R1J=l92<GM$O7!Nbh|&pEu3^ot z36*fSo$_TW=fvtN|9Sh~)7;eRJ?0`ytS0Fj=T_u+M_ZAs#bF=1;*&(gg=ECCoM<;< zMzWANIY*>4k^MZ561Lj57B?LrD_ji>EBP_z9B5mAhzOdnY7V*1(f8>IoM^^y!i_65 zDh>k+DSpD)6bEe2lx(q*jYh51Q|g^C)nR(sfzEfrb$e2(P7=01eL&|R5ktR3M2hod z1XE7#qYXmESG91comq1A0)n|Fa+)C~Xgz)htwZjjqMm(b+myQAi(9>(rKcLF1)SWv zhJ6}cL5Ar1a9RW*5YCh2$(8gA?`k0+uCO63(0-)&xZ_u4jkb4<)9-i6st@~((s8)j zk5CH3`9XRp1t7hZddp}4vDvu6#1#Bbmt;lsV?=XO6qXf0UV#CXWofJf+Dp}9rLbx9 zN2AiQgGzI5)bQ;lUjnoj^Z3+uGwo>RLiAXVrJcT&xK`E8?)>pxOqKeNh6)7Oym#F$ zwNKM+RPuPud&-R{hdA&nQIXkEzlTM?JH1(&OS?c!u@uO6E)}^QpFv2} znq|E>D#F!GI_dk4!j<$kMzPkJms4HafBI$8LQ9`-)##L3L4Ed2O?05&emy0pj>TKn zcAmf-m&OhP zv3p!wOy5|o)|f-?CR)d;3NNI1oKeHyqV92?=n}8uyKH!x2b7OSx99eZNTBUF>op15 z#X8Z(%^8Zx+8j3U3o0qI-hADp1*lxNu$!AvNJ(iSkw?2Pqt#ie%D1_rFLPYEksYVl zX9$firb9yeH21NMiCRu@6LM6Mq)HO;PWlP^LkD%GM2oXG4a6+z7AoLnThX2Zu5TLa zm72}aIDhro=X)~O?AFFx3Jc# ziV87`Z_>#Yt>8n}Tg?Zhv0?sh2+;mX=>^>c6p?7>+Js!RgvzWXfW+hvXRr$6O7tHQPYL%>324iPa%vrDn8&p&pNcD|w;UJP#}Ed5hw24*GF1l+!Xl zx0<&o8l_^54Vg)n&V@==Ug%8d(=g}33Nkl23x+DV&^fT8R(Eg)yvZlwf zAs)hTaIBTv#JBN?N%R(($AKFCdyIx)so5=K8i0|5%;3OqViEZMmS%1Y%+ zxU+7ih4J`GC8ls5SvmxzF_vI%j=JU5*CAEQVNk_FhcY&$>1RdQjE21RjHKrFZwAf3 z!v0;|DfdOoDSd$9iXhK`YA)p?D!(>nadkb>XnF#bI`1&pfp^)J^g_>FSZkUmgTg|a z56x7ui%G=m*0MBUba_;_Q}4ir4ZAW!xox;`W>qXP$Vac7Lor*7J6cD19x6>mU3SNm1NzZ~>srCJCCO0Rly zR6x?Z!_z#jxXham4`w@(tFv(jZy@cvJ*jfTKZ@%iYs>ee*m~$o6)=v4NjSwjKR3Z? zmi8eDzG|9i15F(cfUh_CHmNzvx}D;mQ(&2~HR(&8$kSbc3z;T`%r!BJ44PN!!N&70 z!dnC|v%dFT$!&jghrmZUFl*~00yQwQ0ointY>Cj+<3iomWhYr5amy*FI@2z%l4NLX zJaYnZC+j0dasB3sj+UhDM3G&V!utt6!IOT^2lYNj=xS@3C?QZ z!Ca4@-MX>jWd7^eHqPV$J})r66@Q9pA(N@le!7diWZ>v5_X_hwrY|c|3JY2%@;jUw zW%DO}Mt_@6Yz%S=6rc30*%Mu-lb~&4;Bq4%QO1@`6E}ipi?gg2$G9br>uPMyCxd>h zTHTh$*M82;XeJTMdZ(I(Xj+6_*w+?%b;sTS8skBeBzh!|J*ZH9=2UkJv(BYn-U~jO z;K|f`!$r~SV5vCmF={E5IvLmdIdqJLxEDQ)*3)OD@V1N`!lk-C<@_F)x@m}oZs*z- zy7r`KMC$O#F)CEZ&RCt0y!)V^F(B^ZqJnMi6tHyiust1FqGT*2Y=aejMcHhq2s43O znB93Q5sA)7WgM*?tN1;>n!|)hveVj;le`=z(24W0T-&k+4u#u`gD-G|Jn>|;&*OAiVrJ@b9C|Zi@Lr!%x zlql#!y57&nAm0&(ZNb99kmqI>!XY9H0alH1q%ai^I2H zcc*=_G(vex1Kf&rrSY$rg*c4y!c-tjlEBf7p_`9WG+kJIfJC4dJK|vFToF!K}kPxJgyRy;RzkM0` zStu}e&$1S$_-7%!=wq6WIw$>P__4zlNNtX9Rtp;6jE)??_+t!OKH?zA;ZK$cK%n$dTR zx*u({)}lHlh~-t>_>yjy2FW#064MqoI5^GvsT?kl15u#CjO?DZorVx$%MKu+9N-^N ztj*E2GRly2c%6t9wl%kxm$1EJWhn@8pA#0VuXne1-X83%K!qvG`(YnFS+rXDL7Wf;X<2hr|9 z9DTiYuzS#qzTZ9kcJKWm(E5IBe}C)k;qJ~sw6`B^@4bDwd$_yz7Jj{ow%-0D`rGc? zm(3{6NF5G`*)i51Dv1$iGlWJ!2^0-)^Z*+DGcx{zAsOo2f zOoG<}rBb6n+6A%oYmgp467mAZ8qbF-C+Y#6ahbjNv2fY)2qs|{HhQu$sX+>i6sk<) z_3ymjPrFDg{k@_Xa zMD>&FsD7a;O`AVj>Zej<;V9S*c&#cDLFo%Q}OD% z_lK)5cV6!t?r5v8vh`M{lMS;;r=u>~k}EiGqGLreO!H~c5qqJK*%d1kyLJ3Kru==zqLLe_{c_YK^ zptj)k7OAlEF4P4$+;psbN~ji1bWdQzMWI8ZT^XL3Oi$`?e4wC56vnXtzwdo z>Wu|9%Gno3i|vR_ug9#L#p?Pc);=6w>#UfcX!?%9x%hUHc$iu>eRb^=$$O@;OK~%wbjt}`dXQb++?GFfPKj} z9|#~L^9~v)HAaiSE{WvVQE`rDCnOZ8aV@O*Lsf)UsBb9NS4=8L=iaMJjs~5m`bEp_ zn9UFqE+;#(2p!LG@KUwFU0{luUvH8$k#?hH^?xInzg22+I9Z2WSy9i;ybhV~8+=r+ zpiA2lsbX&+crBn&&aiv0#&CLKVlssq#}(B$g>z$vj5^zX<&HM1GHD-#wtWC<_3&|J zgHL+_^d<8;y`VKcBSq2^TT2u{IuXkjmoAO0{Bl3UjI<29a!w&EagS%Vthev*VblSsZ$Ukn{Q&OTZfbbc$f;%{^c8IEHL27dtluiv&5(vQ9LGCbA zJr}OiM3F_BWdI6j3q#g^dKyB) zq;-MQs^r3_cM&tn6Sx8+;6ycQAsl?bItz}!Rs73Jaao)pdoi;HHTQeKQRkrvo>eu zE9Y&!2~;9KI@{wo9po48wxUUdHb`%asd-CP*!kwnoTdF)m0oJ2ZBY9tT5D~naD?t| zX!YfT==&sN*Eu?@@2>6{Wsxtk*OBewy;GAenfMz^WB9${acan)+UpGqImO`Sk)q?a z8`8^`58udQ4Ml{tM)dD!?P{I=-3SmPg7ZM{xKyj_IWBd zx46%gl?jz=sa5w5!nZ-TAMw!|Ap+4AUTf#=9vmDuD>>keduFki z>rT}xnpCaDbo9iY49iO%{cI*@~wn+UHNbama{;1g}z(v_Q(ZkvKP9+a+yKWe^$KcsN*?36)T(C5`x7z`5VvVTBS&U+OSQMx`ugWNzMI{nQR=c(OX)VAAJA=HJon~<0CFQYXKh697{8Bq} z6>U7}1@fb6(jn#lSwD7{gI znh`mg(3B(P0rkh(U_!m&u(tT!I5`_6u@WexDx2Z@N#bJwVL+b07iEvwsF(pvC#PT3 z%S=`~n0d}!^wQi$%QyG6WmRAH$=7pN7w$Z51ahdE?dDKVy&T>&CzZAP1eol`T5GKq zLKd;I`A>q`{P~Z2srSh1S9>8~_u9X`0n$rEV4L|zDG@CjPC{SZ1f_L@h=l$#kBJF` zp2x+U+wcGfi({V$S8x|wuyELK$biKtXyuzf{=8Y*oAhlbEsB56<`|TSDZ9UkT1fUY zw)|jL=eJWA5)guQP={OBzQR1JGP2+bh~QytEp*1l0+-|tjX5%olh(b3FD*lF4H^M; zoYK`mKn-rlhb$z_Tl+?KoQ}q2nu{hd6^ZZW7K8BUNhah`> zsc4V~iR7XM2iZF0u-NH{2bHrFc{Fy^^+)wmO}OQeV~4<{9@f{I3v+wT zZtZwZYr}Ml$t@jwqkb=T9u zQ1`u6Yhz*N<#_t%Kb!P5e`yT!NKTtK?C<7cUC#BLbM6!wK&p7#@sY0ah_S!SqPO7o zWELH}XJ4Osfhof5|WHGCy}{jAs-qJF3MDn|!&PVjQ)>-XQ-K_`$_kxuf_M4fiN z+u8qm?_j4Htv6;RBTBE{zuiV_=%zHOm!3>TnIFjVX6x_UdvD*uS?XZ6?IyDCr;ZN{ z+5Os`H&buxTlj5O)d(-XCJTiHW*#@#X7AC6QomPpEF_<*R3)mUCy$i}wdR`&-}aygh6N za?1ER!pCeH@tgc#Fucj?Q}i0aJB4JD0ps{mcugZNRkS2S&n#>A?IBF#t?iv=HGxoH zkU&3^^3L1sy_dUhzkx;x>}$;lRg{cTK#FwBN;kqM_olPqmFTQ za9)?%8a;HWspz@534uwq5B+MGolCogS3wOIslusdTSdg%EP)$kd3VyUcL%*NoDsah z+`QoSw%@$$AePwH{vp<9vmzd%Q@0{bKi1bg-EUwu?`YT{S{~%DgmK!yQ_N%U&zexKQ z0mng~J*EQA`Bha`ad{}#N0g58exEinvR%|b!cFXJ0%p2|jzzx;GD9aw_P#_a-DL!K zod4+VrST8QsX8jIi%B|Q|BfL>@xzJ51~}0=u*$;snyXm`1?=i~Z33QPU0-Lz=@pHd zObc97xXLiP5I1Cy_on!OZ-6k9{kNtvfZO_JYxiwqQ6sYjP<&0(@SLX> zr+*4(KrynMGa2fEZn3N-vE|ZB&;k5WmtR}2I=gRo4x8%j0kpAmfSQ6gXzS_FzNoEqVIHdb+c}FHgU)o__oO9Y0-Pv!3q1 zRL?;Qv0v=H-8uMn56Rcpqvy|~$1<8yg!N)wtMm4zV`Z{CNJa?4-ktQXl|M5#*K&G# z3uS1x3+zlZe335T4FPWx^h4aFi7)Q&@9pcS*w}wtP5igruv*n+CbH2mYzV%=>4qP` zaywY*x^I~&Q^d0(w_UtKZ`2H8E9FUvs$3RWu#c9{-E=r)kVMbv3T75vF2d$8%0}Y6 zcq#F=Px7;A!O=}&5H2z99lW}PX-L15kEulm$4Cnil$JN(tk*ZmfVEyiC309=P+F-8 z7|jf156uF`^AcS839wRQTBM|(g@R6d7#6e+CC8}vSPSQ%J0>^BCFHmaVrD~`k0>e^OK~n+|jlBt{J{J7&M1(305ZO z)a;%emau@`;KBs2X}T1RJLr2Q<7*{2aMPrR0u3rEN&Xq;p)V(*1Qb z8s}%@WFTRXbr@x#W?{)pXa|?G1BaDS)UZ}8COPpP3orPP#C*8k#!pvPEyWt}c!z~& z(3o$o?}7?c?_U9j-t(kDuVjW%h&w8ikFZP*-xe|e{4Oar-9%RCD|^G!-mCH?AlcFE z=GdTxg~TfvIBG&S%_I~czZ8zhP95docAA@tH;ViW@a2{YOKSN%0K=yQv@2L99G;WJ z49{G|A3jH50rB-_vX>GuYgk=xsuEe#UN2A~&)-E=$gRl11f~g>i%V8+z=j+aac1X{ z2VvRU#om)Dl9YsTt98?%)#W9$9&sy~rB`K88t8z2N{6u19?L(x4r21MA}Ig1buozD z#bT6afI~e*3U_<6@*>ah{)JWyuxY~id6M-B9z--3m`m^9U5duYC^iD~V%(XL1jVMD zD5{t+N=_%K8?=$SH^TjgrqIf?cjCGlrNle^52AK=cVl49oo&qG$s3sRyR zg`1)-T&;q!%(_BXm9Uyp4pz{pPubY68T$))wwI$=0gw?*&>~5>z+?+YDo_18}8+g;ri?lw@Pm7~=AD#&N3=P_Sq# z+Msp?H7hN)CWe#Y6OdPS(NtzU9iiq`G;cT{afQqjxCjmBxAu(nDl`Gq_)W{|nQFg5 zR_FM<5M?$P+*1sj36d*WYwP1XMhi#r8bn^+*?8*1FnMIMNuhR}R&*ecR2Vf~geFw8 z3(!TBD6>eP2(u~D;P=v^JI+Rv9B;!X#Q2fK2^ulM2#@i2#~Yi4GNaCTnbOa49av0q z#Dst>Ao7Etn_Kg!Q3;t)<>G1yO6`(#f<}`8HZ$8T){!3<@rM0ddVP{7}of zUq-EAWAsSsK-HAG0i#q?2n%bGV-2{5{cA3ENo`57pbIW~N$}x9eth&Ca1zB2c_h3(%bwo~`q-hhTw>(%;%t%msd4I;gqL zR=F#Xmb8fH?&GQp9o(rK_gp?S;E>{>%^W%oVgfNU+sI5?qH-#BO$YFCI(6B zb*HceHQfcoYfrD5)}5Fho+%-fB6Z0V=Dv)kQl>0{Vvi zshk`N4O>f>L!Irz*PX4Gc;U3}KYqFM8Xj->kKgQJ>cLUT`_%a@dI%L zbGWYP*Jdm_hXPDiw+MBE;k3-5WJ(ily*{>h&MfIH=M$F9^zBqf2lhV@>XygNb<_5>o(zM zqkFkcuXc-MB!&E@*9@KU21|qi3BsCjX-?^yV&Qe+_D3%(x*#r;i!d#(Ok&z-Cs}mu z*m!8E0HPs5IkuyqEi=C?gDpCT0z!G{_Iw_^S^!klg)_4zTm{TpdV8ugA49Nf)LXZs zNmI4N$T+nr^8gf zis=^{+I)NX2F^5c?1(~46Hu69BCtAJXRW4H8eTiZGjD`cw5iecC~Gl)CdP%oR}G<69Rm9bmy8vK zduj!;r)7yzPgG>7A{WVqxYx3>RL{5ZcT&(lsP?7SpCw21+9;dBhdQu<9|5bnRRipml^3hj~FJr{}$%nwJCLLxI@IZ~eTN`WW z{i@(_t?(=R&Opu!tXuJ`(<#%l3*u?&7o@Jj9Lj}e!(1#)o^T&@%Cx1>;-Awr{a2ts zI0C>1iifEJUD?T`7BJuLn!Tmx!`qh?F;F zH5ZUUcHY0;Md;_wo2_?8Cev}vHRGrX8oQTw9X(IDTDy)kx@jET7*zQ z#1((&iB2tiyMY-;IrLE&t==Migy~TaB>gJf#_MoNT&tD30<{ByCfutEe)H%5%i=7% zyVL)?^Z)v@Cr?Y~|FyLz_vion^Z)(%|MzVd;hN~N7{e;|jfJVzn$@8W*%t0Qx9>R{54>7K+QaYf!z0?PK zy3UbTsg}JwvL6Mn9kU9f7OVnH5%OD8 zP=4CWLmApZf~3?1waZjOFeXD2ff0Hw6wz719dJjtpp$}+fx+9)@%K1AZSiTc!=C$0 zP~6a9iG3@MK^EiZPaUN+(4k4d>b>*UF4BWXC}S?8WGh|vD&Sa$`RXfQhH{765rX5N6L z&@E^bgauovD|pk4rKV<%V`Tge^}Dv}h)7COWYS&4)EG~@4uWslzF5b2k*r7Y{?32D z-#Iw!yx9TS?`78)m||Fbet*Y7$oJP7l@;K2x0;{V0>*F%4I=Q_y2*IFS~EQ;o?f6Bq(1%jKGRO zJL0c!{h5?bLc2a)JlNu>b^gi0kU4$Xb<@(Mkldz;R0sLPKgrOJjxLDI1?E*$PNR$C zkXstR-}HJx@&)C4+5Mf4tL%jb`JmXZYK^& zY&w+Db`IO$aI$iEJ(4I(YSub5j^`HjdgtvohuQbZ@cwNF`?S$O_oH{|Sqo>ci)P49kr^*Sl3f4@aY>EM{>6ii6h$D}@<*GfO@!Os47JNwN4j%_F!;@JB9RSt}W z3J8yHb$OfwNp;3R(t0R$fSui$Y}U@RdbM(SdAWkp)d~!N+=9e`4cli+WqwAm)3ceB z%V!V~OGLdolLt|um0nG7hv$;)SvIRljE4fhSE(ro>*F`#1ntTa+5>DdM~~MwkNxGT z(r&7_@YK)&90fYq%DQ^rBmf~BZ;X+Aba=oDCviIzxJqYY1=F1x3+hD&#fZ*&XMKiq z+oA2;re>HT5GgJVg$|P81Voc6A2-d~9p0|1bUFpu76Yz?YNF*07D_NJ-e8IT#nh|P z!A|o9kZCtpSM{z+3+$2Jb8XXV6Gvi}=N|}C$Iq(yz5p*kY}SgISL?OdhZsppcY zJ>u;F3&!BG{N_x^NPkYuU@4Brpi%b>|A$XUTJh0qx zr%zi1nk%G3J?rt@@oYvb=H-22#Y-6*XJ@R{(dLSi`&a>nudLvV&};aS;EubSE|Npu zL}WKZZ~^#cGwcMP=I4U{4ZIi_5EbARqP7-H+KyJ%%N(v`-x}ad5$QDHw65ZpHfjsG z=?4WXX2JxZezXY{^CGO1HtSqLh6Q(zml+(1iVU{{ZxgXP&Z6TmUfrlKW6~yksFjd1 zxYy^*x0!hFLGHd-pYe?(kLwYR8%)2=m5t+aCusrN+JWuOucKy{)fGqUuz9E{`emq1 zysw!8F!Z7#rVJJofp+YGMh0r9C3Evkh%Cp;jo=M=XdIas&jNg~&|rinGB-(0XC}4S zxJdXD)VeFO(o8YQmVc%y6kJ$VA*n1Zr-&v3^%nJTP%5cs+i9e( z{jr3|WP^oDunr}sw5e&0g`+Xfk+P)r+K$JlHPnF(8)oY*@i+7sa$kbSK-DM-gAO0hK}NP-{0VvC`JzqcW(RnD@XThSTk5|4K%q~k@t2K z^N;DU!BM8<=GnRkr(r9{76?ZRQ2W`uC<4VNgVd|2oOKY8$57|qDR2jPH~@+Fi#6vS z3@ZfX=_zUh=A&4X57qr&ozz>K6{pa^K}koMf_zGdsW|Sbv%eD-tSpDS0m9l&fUt6{ z7K`cS2+J&QShEbZ{FS!=u#}*CIQ*gCX{3A!^w?+&B&kb6B@>95SE(vQ{}#is*1W+~ zHZ9-m97d~3;kOphU)YW16=}2_O>5k3=>E!@1hD)A*OxqRU1(^Id(2m=odV?F_WIhB zFP?t>Y;7%Y5Te?4&*7xTX`ih9`I(OjAk0K9qR>kx{(=>sb2cdT+}H|yjp{ZBu`Jyu z0Sp7vB8qbFJn_-cXmYB!oF$AX`-VF7p0aDMs#MLPB8YERzqjCW0!g?Uv(wOIpOXe} z=JkDt3LW9P{X4I0obK`kjSRjCR$&7p3cg~~F4;lv(Cb7&4(Y9_8mP?ROR1;=wEvj9 zdQHXAGK)Qk1!`({c@t!_KE>xy`)_Q8)$3}&NNcLF8dDeM+znxt&&nE12hppAJ`%+g z&b65O5wT%I&C^79kzFMJdD{KRW8N#>ew1dV{yt~wX{07M8BPP35;TT&XM%7r5_eRY z)ysi>I^f@*G^#9uXz}BY##Xk!(%?3t7j0AVg+@`QzMG*_%tDj>nTL({I9Z3Vmzu%y z!wsMfFpJb?$w{0KlHg<)G`r}ojbmMKmL?7{nb~F&9`eKp%RB}kcT%N{9Dt>qCk0EL z#kAXnV-Q}2T~k6t%oqG`sREYFD7BY~6-9oWUSuFULpHY{E`UVrVU+jPQ!|fHF;|ZI zgH1;H7EY)j?5di`xfWj9c;&NT*=NoPXA^qVcEz1}LoESC=IkeBh&BYibGMYaFp?o$ zMvW^gvN1{d*vb$fqrZcdYsC^>`ATvdpF2}9bjv8BOvZ-c79qE&B5gPCZE`mNw|(`aUhOxWFu+? z_7EFlti;np$fVL1&$4J<#TPA=anY)a6`jeQm&>qf<#Iy{&)jcM&Qs0j3F1+OeDV>Z#=pcHF$v?h%YzQJSk-bEk*O5v8h7 zwOxH!`Qec{I3I_?xy+51`)MrP1g2R$62)C{^BZyWxm>-Pvj;}P1V3_5c46g(H*mov zwTRx)L!CEsSj}}7oMFa0@^^t@7z1qMXcJN%&(zPB^FS7BqG?1MEW@sleZ2M-)m({` zaG(S$z8)WV=h)b8cDQ`fNy@%-lc;J;t@P8=g296a@1 z-g`e=T@|7}@B(4PV8)f+OvgoT9-|yR$@6}pO@y{}$g+Tu(EKJuFTY;ddbevuJ}cx( z10%hnb*`9aUsCYsi>yb3PQ(aZB)??;QZj*Ek-V!z5@e7V#Z0t7XZY{t#-yB2_=1W=|MZ}!L>@``EexUN!IPNA;}5*$%0I? z^Ll6dFnSoh+TVMlk`!^!)a)hG(0HAN(?WVpF)Z^JJWK@vug2 zc&uY4I`0o(t$aaC6z?I;c$3Z8MV|H8uZf5vHJzV<_UEHH5YB=cj3#o$u^4J#q@l}v z-1G0)Z0AS(@B;4$DQg;-*};)8Y1Y+o`LuzZe7CcIfc%8YR;X>N_}bf|mUQT%9ab8RhN4ihT=+3yUn?D%khi<#WkYuB%G zD#)tg>ltj_QH*KGWf;HMnN@$zBS7hTF`F-O7TcTjdtQzs482J%^*9rlGbqD4+R<<^ z(fHIikqb2h+tY#VUnR(XOcZR%FuUq7Nve%A*cDE()%Qmj8B>T<*?eh(uWVdp@5MDE zHo_g0jo~z2kqq0dy|*dp`tc%O6kZ`tyDLg~{CMq|?+Z%2Fkt0E&#hgAZ1)~MM2d}4 z(pEA>=g0xG&=jP*^S^YXC=W6NL<$O;V&r+&OS+ati+L+jvxH;THd=!{_S zoTnu&qjjmaRckjj?WW`?LNE38tED+T&4#d(gj-XZ4!r%|aAc^i7^?i%17l_okH>Fh97K6@gx#z~x>i=v zoKQo^l5cdVK;i*@b}r!M?1Yzlog z62~JN?6di0W?7w=yZYy!>MBMdXVR6r-4v8EL8$V$!A(00POj)SIEh?q93o?Y!Y%Wx z?^EqqKdWzpmP?=B$<*LqqNC+lh@2(ld&ypxKn52Bt^q!=t@N{xsUSp7uH}#x71wZ_ zx$=B31*xmbA+ieG8WXeQv4727hL$$#cxxiM1y)H@SrnM-Rd&Uo3|vz_Ko{ybqo6LK z@NRxKMC)id4WObZ8DH04?e4sONkBiY0LAC?mU&%!^?K`@a2j|~vrZiZ2{O6{uAh$Z zOTB!$Z&~$h)F_l0Znuoh0^HsBy&stxw7!15_jNO}9x&9D^XR*+eLf7Qk7Y)98W(kwutBWPK(1MhJTetNMniUJSADG+)v+^LU+=!%+W!Z`uk$c% z^|J)#Q9eK!Z!mzvC!a1f3}{iqNWEv{Y{HYk>S^~yZEqy;jg!8g(tmZY;Lcd|M&X;->?4P_DF3?SeX^{lT9$h3;A^llc|6N zcHDj`_PPEs9d=obA9M=V{+=_Pgu~Fn1mxpDaU3wtbNO~)@dhd{SAmtrQBOe{l}V+M zue9|937O~vOzIU2xRe31#s4iz=(WNap|K$|3=>p`DxuI;X%R{M4u$~}liQYlz-U(p zguyttg$-O9)ln(0(9+rKI7RU>8E5@#)>yF)U8|#TRnQaEQo+j9FlZk^$XkIbUZi$~ z&{0_huGR?H(rMHuY4;q9mrhEf_xroZOKcixuk03VHna+dVaJJ7Tc|R!tbDpx90P3j z8ELYS5$Up{rQ*X7!?~y)MYJ?Wnzwsi1H2)NjrRVAE}*Wj>t?jnQ_i7F1rn}TTNmN6 zUMkjTsw%p!2mU~tmJ`IC#y$6Nb7c*Vc5xH+5h&B~d6~zS=oS3Sdjt zlv0?$(bws9&N?jd3r{r_2jY=q`LRze-y!8gn6I)x1&+^LKneJBHuUNfn7hukAD)mI z;2ZC9E)nRxsX+FbGF!%`=4z*?Vfqqjk?ZX&G{g91 zbrGUGv}X)QKD;NhbBh0cfu2$v3dyRn0^-gP5rDQvpw(k1ucm}a80w~|K^o>A?8Xjv zqh?`pe`j^PhBQPYKn@0RnW<>W!(uuvvm9k_h1+RLb4b;7n1jeL!OOi%>^Fgx+N&lv2HeA8@f_`k30r%8WhcSIBJ?<)s{qpD|KP3$tE1-P}2 zr8^=9R*iPwQ8u2HoKqn4DD03skZ%7rs~u89e)ApAtNxbI6sP@!E__LjYSF)S zo4#f5$4OpwzS%i^_0p&CZ$;wO%g*-ey`8`BY?~B|(>_4jO{UohHW-lt@LN98Z9Uj~ z-NE)b&F$>(yg%4M^kE0L&nKlp2i7XmoMeP(AIm6Jq>|xW-}s_2-w(c`4?@20^G$|l zdF3wdg~oQ_$i5b^1UF2z89}?~IMH_dgYUmRqvT)CfIHxaqm`fY+owTTG~k&=W3ijA z_?bNdWh-oC+?J6I6V(0I40E@+<6dTQO_w2sdcN8XNxW!S0Z1ooy=|&0#S}>i82@SB zmRoM?Ff_wliUaBtbPm^42~Wue@oCBx2JTU|q*fSf9Xf*XOTEh>A`pl)slVBBmYr6VQ2Ql$+49NFRtA9&Y(`E^16659F^zld**rmMrBD#an1eNsg1d#~Y0Rl5 zoDA-&@wXr-pp_@ca&hD4_5KX%{Wm=BZsFj>3m9xw57}KX4d^aYK*;8;8qF)@xq55% zV?DLNP^|t7sVw*5J_GN zZL;c{>6UFhl0L#pEA6E!S5zU9aIlrvP`*{{EJSo_8nkTlxKTL~>@ZFhOS;XS%)QdK zI;U-SP+LBmA*j5z)&S{9CT)9<1wqu(hAGfz4WBZ)L%I)3%*f00 z!kM`}ioEc@Dzi@T|-Pod)k*jbI>Rj>Wycwzi_+#N!I!YZnqvE0Q2rInB(`mG#w?q%sm_kPf&=w=xDCu7&*GeP$3N`lA z5nk0*w1%S=dM)8r;H#RN`MMF<2A*2cP6A2E?uxG5t@tuxNQKLeDEw&V&StCCf*Ebk z5A_V^6n+1RZY!NRB>t>e5p6Xt7sEYU-^(B+=lQhXBkpmMA`oDqZszmpgzeJUWStB{ zvWaq#O_L(y`|C4Q^0MX4(6tJ{rcNe|Bt<@Z_KdPs|J;*6naJ4gx0Ca6K0RZbwAf^c zkuaNcwIYtaZrKQEr_~Aj>_R<1;!Oz*oIYjaIjL!;lfJN*=J8DrcLba1PckBewqvRt zmOt1$Eq6%K&{hYwwOEH{uDZ=79>QL#=3Fgas}f95eo|QS3Mw=iC7pHZ54pSQV9ER? z6qwkyx$(l~%D1Xmx$2u3Ka|7f+Cf$615q1NRGPGf5*lo~ytx3Yt)I-7{LDTuB9 z4IG!PfUaJCwnB)_aFDS@% z*$Bq-E{D=q;j-PJ)|E$bFP8PJXxz6vk>BhjzHn^o-*Uj!SvO4zI;xX$ab>RUX*Xv`4Wdhr|dDt^cs!moOt2!TY{Z$(yY_>dI% zMxyh@OGSHWOjfTTjEU!*@|zRKUJ{(Bx|R+?`U3LTmkJ}pEESEi2`zU{hYlW{4t%5Y zQm0AvX);2FEhaSXBQ^fUBQ+AOzt9j_LL%0HwN~A}nG+qx;;<@Uq(MkmcU65VvXYyb zi^q4R4HKQMvte4Lp;5{iC;U-JmNY38rcbl@*_$2?Y%k<=~px@^Ee}Sm?Y|Z!o zdh%@j>AnBgz5my}|JU!;{|j|L$_5?y2_A_C>196t2(K&OMSYlHsD?TLF^gR4nH5EK zHFoLA=$eKN&xy5f^8BoyE*F>##zrSYC0gr@USjaQRZ=pZ0gqkn<=tYHC;2LQkgamo z)kd?1-c2;ml3p)kcj$=HP^yqF&dNJ1)MIhzj)r&RJ)vbjMzwVv>==@ zf-9-jzQvd`Ss@-$kb`jC;&0P_KW|3RA^l#g-Jqb7;@G4d*SMsX=iKDIwIU*VZ=h04 zk7b!Ru!_DF!(}T;OVqIP$`(2-M3j&Tn!5Y9TO)gOhVllBS#<2`bWl3#bgWBkOIwa- z3adg}=1u*`DYib2S7Gn8TGB8w=y9XM)v4T%w#$=Mj~nF#nRbjslwjLY63*0FzlAYWwF_oifP9u#dT5>}#d$x|kdEhrq+7GR z^Q>2dPMB8sZsJuls**L`ydd?7`Z?Aa5wBQ(zt-tNXdU$t6FK1i=@5p)s#PiN8LECm z{~E9mXNi<9EDok85@l9LE~TMkAJB-wbMPJ+5#k!iS_Z^gw5OYPLBehGGGRX{eer-Z zLdT6|tVGl|BsiKqx)DJmK)M!bYG+B&t~-uMT(gU0oY4+<@c!Mqz5Te^^8U~P%68S&)^gMa+0AG& zPKE_uR1`s#vCB)WUqQ16V?^z#Ih4?^Vs`~Cn&%iicMwBP(u55pORYMRkQRs}qK`0K!n`Uz8Av6+(d8 z0F5~RLb^kATqCl!_ln$u+ImA)4F?b#t>Dhix`|mPK@EY33;bXeFG^f(n`?al87Aq~ zWc7S9=zpo9TnW(D%J);y*^>pJi2EWkQ-xq$iy0O($<-b`)NpV^S1@ zY0;ru_BI}D-7tpy+KR^@Y26_{NJUf4DWa~D&Ug+5##On5Sf!&Di3OjQ%2Ld%to6&S zv~_-)^^f z*5Maom({kj(nVx8;+a=@J;wDErJKfa(CRvg%8WXChowk?YQZZjpr%VEBV@t?-i%qv zc48~MSue*9p_eIHa zZqae$q{N2%b16$|;YH zQw`^=BoBj}sT^JW8zs;j7*LZ@IfYe+e+%VbY z1}b?YM-`};z@=9sU~Pqt{xCUjqh>SI?UJ<919lbYBv0y--ocVrB`%5EQ8}ZEI=v%C z)4cV&{9~qAkew1gfC%yyZMhFJ~`lD}j6JFfZmT_Yxgt8Eqal0#qw* zgK-a`3FTJS&vz7aD$?cfS0|pbHMi{e1+?Sh4@t0D3(QD%t#M^&l#Pe^WgR_n^fUaY zm-Z(~*^HegwOxxwpn9HM4%9&O9gG65^SCr3c>=bD$e>D7VvNxx)2%x${!J?~Gjo&HeOkv5l6{cy*xYFNGNSld9K0(L^ zXAlLc+tE#=-RU@;v4uEW!Q-`{jt;kk>`X3l^(wN#ro+2-2qcsXe&bIKH538gB?1;bVc#f$(gN zKHip5U!%Nx^37c9$sAIk^yIe^8Z)Hj4r)`=7ju%>(-c+-H zSd-2b?W$&P^~sW%4^}PJ8#tfiXio>`0;R#&B1>zP&Sk2r&fFH1!4l|r9t@SCJVfQB z!PZ&-c73urND`!L;9381n2@#sEe#uGJz6_%a$y)03F+{UjpJ}A^pc~Zn!9?MgwzIA z0YU8P%E6hWB6jsnqUWB0>f`n3Vf4kbCu?hSD!W6o#1h4BP{oRv2xi@g3!>`IMs3!C z#V*2*MC^@?TzBy322cv2H67t|_1NKmc+lnf9%-r3*bXFg^M@DUE!g1el{ZmG9?|0& z8(ww>!!yWxZMkM|73azNEH7oMhJ^p6ZYv$rD8YGp#k6f(ZWzT3Q9`AMrsIB-Y=n{_ zY>g$5D>{S>dlZgHGAiqMUg32elk;2xIvgsC3iv#>m?gSpMt&^0d8p@~1$7>r6OJ%a34ssxQd5%s2qoix*F!WML zKLUrLN96w8lrt2hYXmBxE<#@CJWJxH<5@zC-8syYKUaX53;g@Z4i5QrGMa**Ko>6V z#!7VUM3QBgPSDn?DZ4+282u?>PK#&PN!rgZU2UOXxaX=!*92S~-aZu3Uk~=)N~Ck6 z&_W{it(ejn=$jB0Bj}>+hvXvR*i+#>zJ%PVZv87R6~wc{vJuxlw$z}un9sC+DDq)P zqyTa9n0GPeI2n7j_b>YlHB~OpI{#bZG9LlxwntD- zGo$3pEbSkPPC7Utix2DM4^Nf1n)S3>1QX-qKx%9u4Yl{2Ey1ux$kAQDh`5t#1}5KGxIh z7wTMH-Y500p)$LBaiMq()(fZ}#J7^3HOtKHTAjqz5URQE*4E+z%g=ctlR;tI=gIMl z55v{f=bs&IzuY?9I{GkNb|Oxxzh!vA2R20+wPwqh_xKn{JYQvUFP7aMqMvsOzkkqg z!N~N|tNQZuRrIuavFvphvrqgu+Qe)a`AWBfio%-FdwgvH4RI&Xn0SGolgT;Xi23}2 zMNr`Kz_7x39lV^$xr6tz8aK4kvGkIRzT=81NHs!?x4OWJp}himBI29IP3EB?MOiCH zUxrQ3f@0il$Jmv!$6wKW;~(`AjOu*w=jFI+E>%o};2(_vp8=pHjbCNnXfEc7L4~t? zCfEJcZZW*ZWwR{gxr23{M%4m>&Ou6E-<(@b)h)%fKC`9g(o$1Xx2fmBrrvsuRAl~L zFA`^v^(yISAc&ri)6?0D1lPd)Ilk;o8az?9W10mZp2I16b%BL4Yn}X3lgI2yn|D~g z>UBfC5w1z_(1;02vcHnKZ*T(Njrkh4xGq}gzDvLDXb2P@AUz?jgAg0|zI96WhOKViQQ$vr zdI~2{l@tv<6~!{QP!&?S9klXL*NCe}tCx+*eYmRFQm?23lIyd8M6P(Tms9a~_kG8H)gu21Y=)c2Y;bLF z!*kPJ730$m-t%^l^>JGFnMv>Vky5RS*F^ZG(n|OU5G}>N>JJ}!S1GJ3V_usLyZvb| z?J#s@5z@TA!sP05m;2OJ*%_tW6=~&yI#-(=buNpuY>v|P`-5+GZHJCR;$mr^CDHhm z`PTv}-YQ=l$CbwQywo)&wiHzl*u$(@ii=1I_ucTlkTju`C z$cMWP=41p6XL8NSi|8cnCcK9tbViyCivWe4w=Y-rUafGnBbFTe2xvhlV(nFBkoJwE zRfP^hI!C8#>2Do+=c`Zy0Sa{gI!a=95r@!hi*mo-q7_WbX#zj0H$o>2ezz&Le${}MhB{BgKJZ}Bsnq*sCw^3COCq`8%63~-k8N7GwSb(dMQIx^ z{=x+#^N+~S8zd)kB62CW;|=cfKL4`z-R5pgdU~};GZ(uQYuLJ<<1bX`)+qr&CI=!p z+Q$U6vYZsCv+>nzD&u-o5Lf%91~VZqlz`p-Oq|ebtwetiEmqsH3-Py-01Knw`pgUk z+>BHr&V-!h>V;imRXn2l$Jx5ZsljHZOPHs;%`-XT-lb8dF3R-N`AaCg=AjZ6@t)KU zPPhAS;ykrbn3_DYU?9v7Q3w)ULKx7Ra-ET9ukpo!4t=7ao|D=a1j4uzoH8?loZ+<| zw0!)`0zl<>h&o}z%YqrThSqi~;_KeoCoCw~5F_bk3`jwF7Pig8dP65r#SImiM-7ua z4Ij+3&az10qn$*BC1?#M(j?T8)5FSKOVHQz&XMZZfhx{&RzjP6F4Wb6PJO0s*KUHse6qj_d`s`k1hp-1^)XRtT z6?9spV|K0wQY5#D-5{k~j5s(Q$F~DG86X&RvL2143u2@uJkA^o)yDknB|pV}x(zLh z-Fx>NUoc6mH9j}X1iT?e6h|ASci$BzfdUsOF_d9QACLgkygLUdOeMCKHycS>0R%hT zD^RXJkNx6Gfn&)^VNs{-JI{QN1eqi+-yNAS+B*I__?OfsBR3YLA(1LQKhp4!Kxm1C`sY^s3Oi!p@S3?B8A5!{!MhTWdy zvXeK|`@`Hkjy_F?CNo)CSxS*#3=6X2;+`IWrwb8l+`!pueao%+Tgtl{$=vk|T>FV< zw??-7*yJ*)D_1qCe~^NraJ&FP8#^oS%$k1_9+{LQstcrP-Z+`GwO&$L{L>dkH&|F> zLg`7kQD(lX{>jNE{ClmsouvGc%8>6c6Dm>m!`+}#EQW;z$i`@xfw3IZHOnMWMe4DM zz9+M3d0kq_WUL8il-Mx;`kT5yYkZ094sWbhUA)YfQ2)!&W9ynUAVCLV>4UEVfKyd5 zXb)zzfXeyB&Eb)rhGq&p9ZIUjD>XKVa$SaNlZyy}I4eI%Ala7KA-u}7YV~J4vMh3` zqnGB?%-yyYTEXu~I(Et!M7|e6d(2)O5x$>$kghiJGil^TjcRtRd=tL)W2~oIl}xX_ z>r7Fb4Q!xQkBvNps+|t8J8Vt}4+Bh}INTW1prv~65; zLE?bjVmg~}Ni%c7x)nPy*f!4_d>Uk}7EpGbCR2F*QrF^NkX9g76-i70sh-Hkg=Nzi zyXJX4jmuhJNdjg2?P$=|5Hm5;`ey{R?MY=-YaN~4T%7PjtA+K#(GbxO8T{D<<;M-^ zM~wAEiEy1lu8nG+MI31y+Qq&^uyFTg9|ZNpEGMPV z^vz{k0BR5gwWsUu($(w$CSMBmAYz3(t8CC;^fayG^WXA{cHHs_HsTZoH1A0fb6QC;yFT5DjdotCSjA$8gd>~a8Qn`R8RHz(X+$1S z#cbc7)>s8Fzp8{NV3{*j=fO_PJziP9Wjxr**nrj}hV^KinzMlJfzVNx?Yp~D2hRAv zOl=yBTrmRWm}!dRWMoc~BL1Xv?|0;T%xtS=M0FwOEY{=^;5ysI-u2b?M?= zT+~uOqB60Rn6z_U*)aNAQ0mgJT$s_t2}`K8DC%DW*RTxU9@mq(0L|8RE~J3EV~!ad z#025v!m&f_gSP!aLbJol3s4c+yg!-{014MpUNh0gmPIIiS8ZdZ3#aWC%VH{OyGpZ^ ztLIjyzWwAyXJ&6WlNo{uyZ=S2={yU_ypK z9e1ja96Or5n;568HoGN6HT7?Q-VhL7pX4X)}+7U6B+GtAckxI{8hImqz~<_*3X zmq8v=w!we`_+4I<}n zj$>)q3RHLt>B3dGxO^Z(a&dEj1ootKuG%8!Dwjk^?FElWkB_<(^k5zsPMxCB-c= zYwua)aLP;*K_=b=ak}0)+iftwbcGC@eskeRq~_=ajb32WU{@r9|SQ8bR&&?9s7^BQ1GvU5}~Jje2@Ja5xbnJ zDAmx@vE^5JKD2oi9!)%K_u!SgIr~J-m|P!pa>SP7pOt7PR-A}uuDm*qicO<(#NX)a zUn}T;>s4IqULIV$@p8!W4Pit>Pqi1pVKyS?Csd_s_mt8i9M<}NG;(4!kLhw)CH+^W zxxa#y&>>I2eEFGmL}nHI&MHd4Oi!q^IhAPTgU`^Iq_TxcD#=b?4iuqZmc1hP1g>zc zwcQ-O1?KqR)A_YS>@)31t7O#0lD9_NchRs%D#b+IV*PqLtc(^vY#<1YEhS1BiMGAz zM&`|^f74~Im2#d)mLbR79=9ki*<)o{39)y?V+SV3LwN$P`1q?6_6uznW9qEg+9DrBNe7}iOO^RbHX&3<=&lr*Cdx4 znumOh!S*~lh1DIdqkhf&87Cxm5U|L93SbJ7U zPeO^+@&JvbsQCm=VmUk?tvJ+~lC0()cQ0u3K1Nut!e6^u$R$$O9KR{)FK*& zLmE{tSDGPEP21W0xyp0od5ZM}YLGlH!+~$)uDCyBjBNC!cjMcUnUh87OBNcM_!5Eb zioOb>r3rzUZp9Mp`U{No?PTctbAE6ld1y+#P@t9)FfQxSp9llWWX=^T$qCyW7| zjFWil^s(B3OB>G<==xylN<^fuW2GB2az))iMzw^^XRhKvVd9!4XN47Xh$^bGBeC_a z>HPp$0pb?ZNwAz2u4PX~`D1l1+^R#NCNp<~I%T=(>M;rLoO_43H#C z>t-K1jxS=4LG0VBW=?dmkh7$0@1p(7{26K-AgLwfJvVdBY*v}nUmH-}#PkXK-Y)Ip ze2luOSH(WOVW&agUh6i6U8)kTg7Yr+E@OA18 zu*gply375ah)`@uPKknE25hZBtROZkl?O+(*l3P9V`FXLFdq;N4XVBM!7(2U_3y3m zw=Db8?80B%hO6ZS$mXXoK|cBgYDX>(9;v!8(C5+=AbtcxZFh5b2ulbOf^5QF#U+9! zTeJ~}!$)Zc6Jy8((cfA};5FIJ$d}yW=_`c_F>+_ISHESa@T#eG&F#Musl}wX)G7*> zozGc37#Yav zfWL^DI!OE*a&O1F1?za-)C%V)1c-I-1tjUrIDc^$R`2KCufErCI>z42UuY+Hh$fAj z5{sI!nDzhJtQf;;|UK0n2xt?Sk#D(GW2Ip9O=|# zT4vYY%%p7#uhB@uUhA?>NG9_xsLNdP-k3K3CTO77A(p;pF|-JeU|T`+RPq*a-EwBH zN-CZTnv!^NdHZG7_C@mzyW8D?jQ1rJW%s>XzHT7sB;B#Yh+!EF;YkT~3Wf z2*4ZYwLqm*Jy}ZuBDZgD9Hxk3W0LHjnA%45)|+;5J-RriW$(r3(zT?ah#4H`S$}Mh z=J}xHm+BC71gW^sNTTshy3F$Z9!3X3B-YNZGm|;k-CTt_vI-n|)=fg51U*utT?cKw zm!GuKB={Vq!yW^vMf)2xqkM6eXW}U_n-bwG*hGpGdW9~WW@z4vz~+uph*XsZ#f8yF zGZ*Cs%q&x;AcI@1l#Y(;f!f}Jax}$%UnU>t9rAyFmHnddj(a#Mh&J2YWd5Jx+v+Gi zjjB#6=|Kies9L^$i#dHH48I5F__7I!4`0cv1+qJYFJH??l|Zdi`z6Tz_tSXQc0rE8uZ~-DY2h6@_$%U90AmVV$5lFL6r)}T&Ab_XisNyXb zTe_?xEVQP1U*;knXj>UenH$}-@Wl(6!)(TgH;-Mpov%?NmK6S?v&$={@?)R8=_|w= z{O^54{gv_@E3W3b$o4Qq(dMJ6qhM}xU>y178Vwu?VwCI-D@I{L(RN)4xpcuJf zZ5Iyp8YDe^f@he5?j;@Iv#`zF61>-ZRI%nL4_}%ul7%0P@kb)&KA1;UR=(H zrFEDJ^b6{I*Fvuqxw`WPODz{mqpQj%dXL~+#|tEtyjs-dFZ!sw+rp6-BW21I{Ln6w zz;cmN$6U59!2nAYgmYlz;idm-V0*pbFN!+-gHF^dD+39=RK}TV9bu7POg~!CH*+zA zk2V4QftZ4EcRX0bf1GFt(DmxSjK!Er$1ab=oPjBOnq0%zv@PWdCzhj6YWFrkhUjNITSi>c19ExvHMB7A5Uu-qY)&ZY@ML2(x8<7i;=*sj&4>eOTp+i!dQdDOlk|hD$iKVv{Ny< zC71`SGFi|@^$Kqsd@jWg7{pY!F6S`}R`bw}Kn$$Q_#9%$nfLEiYIhi4pKqbPr&5`C z_GoAQ(k?9*ROa9>DY{T&Wk|he9mxJPIhkes6g_I%^b-4Y8^53|C_vltL^Ag77FqR^ zZS@}g;I=2&->@v1a=Y;j+HV@5>naT7O}g2z;Ue#pvo!kj2}vjz;2+=By=epYSY{bo z5m*)d?t~A(rqshqGHB_i?3XHx->-F`uoedCOGS?WJKQRYfIgxXMFkD>Hk3o8z2x^_ z=`36G8{cr|jKx*!cVb5wK1fk(cCe{`y04#us>v z&JhHA9L+GlNp~36oy%2*v5kpWfimNAous)C#f{G8->9h-vZO;q-7@ zoK5oDQ4kENn7_DV5L-xUA|wk$x{c2NY?f_6crOV@Qv~BkhJ;*oHW!bkLUl9%zo4NH ze|sJ;TTMX>zO(H+SY*~RvGEcHU=D^y~SvL5>=e6-CFw3oKArg@lvBL1r9#}Pu3R6D9*KOaPl<>9^eMte>lX>`LQEy{&VoS+aWQjCKxRD`bzqI(+{X2ByS zaq_}TU;@2Z2M{wcB*ntr3UVawsP|uAG7a%QOD|6tN4YJ30Yqw*W#fI3B4_1p?x(Ti z4ay38t(R5d7hYNFTGFS6OcYfzyl z1AS$NU57D@%pC0!PGO5sVXGBUWl;bD9ji$LM;m02hGxG+L|cZ2l5y$yt^rXtBB@%s zF7$vJnh2TgeV`rDltC7331VBAkoZ z%gP}Ed#vyHyUcKK`9v^EP30YqXEFXMoc;Kn?l{>!C(wkRz@3QZqRr|Kql9WqQ*2AV zdsX&paJ@>n`SDPy9lw_{18B}l2L{5UXOa@$Mb~PUBPy9}N6VX$DmcG+3~6o?Xt4iK ziajWFdM0x=R{a9Dim(hcrF30eX>ZZ(U4uogmS|=AZB`i_e?7Jyl4&#{Pm3FA( z(~F-o;O0z6vLZ;6kzlA_?>MRh7xl(1Qt2xiCso_5Fyr0%JpGBuQ=6(P2sg)9xta*; zO7198t67ytsr~j+?B2 zj9eqPyK9-jWl>ursJqe{e{5=0!e{oIrib<#t|ml#EN75m!FtNM-#8O``@#lGqdrns z<=5WIJL2hGQq%H#xtA_AP6Y1G>ATGThL>!ty1l=x<1VkTX4_bC`P6rJKZi6umCP(> zf8%R>5Oe5-zW~cWhS(?+Hl+Xm@Dh)BXw`*1VW#%K=niV0a)ME_ryiO3E(62s>p6R) zE7G#}pM+hWS_e5<%?9CBBy)0qufnmeMb%&4QuY-sThp_UX7u-@8&LcXHk`~ky%*d>B9o|ODD z-Pw1`{EeP)qPn)&mpDuXH?FWfTb}>G8oPJacXm8}aV3s$S68CHCC<9nIM>%*LAo{h zRV_cE-_0M-#)=@Ta5I9HL>9I6jVlTsbd0|1+=@)#+K2(iP+R)&dx6#G$lYk;2VYjP z(rmS~aum!E(LlXTO>!&%6gLr%PV1z|@O^xY zK3e)?2*Bvv#Cbkk*aSfVBS_oh%6m(<#&HL4U>kx(xjW2r;yjlhDSGRm-Kh$yc9^OJ!9?eu~5*%i#25T0YLpks;N5$87m>*@j>c_R@{zBa#& zxdbZ}YKRoJQ-{nW&L|5)H5){{9O_axL~ z%&ngC9It)V9-0NjxaCzxY+RPJ{MG{7(2cuuBRc;qreSdZs`E$RimaBE$X-W2)I|pD zc3k`9QGYlu(`NtuG1j`A-KE-if31qRfRW~@P)*Vn60Scj4ll5+?}o&0M_#P|jJFSz zmKa^>yPQBHg<{oPpWvIe`;FVA)-qM6g%;p5JYb5y#Dh}+%YPV-I&6E-XBb@qi@hsa zgR{}-Tn~452FS6_M^&@M2TRo z>=o9O2CZMYfS^+{O?3dw;opFCQV`mo>Nrskiq&|ZjD9doj`K@diT}vsX~gVw@d^!x zV#vO_wLN&8*p0w^;(=~ZVt+00Iw#JUA(Z(=>vuK`NE}<0YHQa}dyH5b_EZZ%cMDzI z(Hoid@Om9(@_>&?z;ILR-3f2m-znx@MR$shKiIvo5A()`f;vf*5UPO@Bm5*2mQMaQ zTe^nmY+ED8XpZ5%p<>a|LiRrZoIS6NuOB2eJ-rUq+rpD~NhIx7&*4QO;YS0VD=WJ+ z^TT$q@=7#g;O3AfN4nA6^AnA zU+pTjTY!p(TqYrt64Sa>tmU~Oag~*Kl@|Wd>jWlL_F0%QMGbF8ghWxO2TBu83BfAO ziNBKWe2hsd>Kj+Mu{+zCDq_u$1f^!{`xU4%cDzTApoq{IY%GNxJ&EO&5_kJFvs$U zpj(lx*yPDOXQ5sC^&W$Tp;#?hD|7~eHm62efVJb#89QhrXZDl=le$;C3#Ot@^_4`) z7M(En-yx_m7DA+7wk5~uuPB$A zLdNxmc6A(m9%2e(ww;ZO=?D(3j2|EHZ`|El2+0r8V>wH=*i);fTNga) z>I2!_rS<}vF1T_fF-_GWKy*pH;|dPx<4CQ@XZkTDwK#MrHTh%#9QZSnV>+H02LE1^ zdgFdY#a8PR!R5)_((tt2#=KNxql?>7N%dD=}hTS zYYe3Gs&krWk>hb>tcA zW@;ep>$W_?Hu(i=8Kdv4HV2w_+h^dTYV}{xlIQWgb|Cg}Zc|SfFv7CXk z;kt@hrx#Jw$8Af|9f3}f;lP7KCr}PF2guRX6@77|XCz=k5+&b7Ehr7IQ`E=&=d^Y{ z(@k}qMOQ84-||d7K_%y0X=!T>jf zc7J#>Q>g)F849G<+H=j$j^E~jxEeix8sq|#@XyLYQc@ptf)#A_P-{Jik<$R%2Q1qD z8+M&5ePl%U*$Jt*ZZ)2)La8{0mRvZG$7MOeBIF_Sv!(~0xP^)uvNHzoI0kzGUIgsj zU#_6Kz~tq!Y{KvyBl%skK$O21k!7X~37h#0(xe{0o#L2qygX59 zI#0dC^)*s4zx;u9%+4)gQ1V!Fv4XjZC}nUzUi_-0^{Q|n^PQmDU%BH}tbQR!tmf0E z&49Efl0ds%JKJkT%3Ma zb1U7tQO)jSykdRDQx3JkarZ-6+;8tcc3kI`$Pcy6MMg^s(PNrho#s}wOE40>Nt3b8~avaS>qD`Vr8xe_OU zs+$$7d&&OuB|f1of}8Z(Y^hX{Z)2h{eA{L4riFGRuA~@PCPci@RH<}-sMWv4YjweQ zB34Xg&pfqM&XfVW*lPwxobIzioSS~Aqygx$!a+VCScgDjsO1#~t5WlB0B)!y z%E9pCap)!Vag`R3lD*E57_uvkX8NS-B(i?dLuolW#tH2ZH=ln3yTLu|a$kp{kye`s z682cqUKEMv_xyBLVU1PdD*TqZMMe=Jgn)M9q_l$)V};)TtSPh=1H0uD2>)ROk*^s5 zaV3gUs2Nw_dq4;QNka@=#qf9IE7!&(Uoa3sa0})R6{>G(X|u?|7cVlz##abYrt4A3 ziAuNC9G%fdOHr@r-#X5fy^!+5HIuOlM!T3Fo}(t=W~0=Q9$^Y{8G@|D?yujoYHzzV ze%=RhmShayLEL4tnx326=A6&PB7})GC>+P{n?WIICZ#@ZAziVMOs*yt#Lz)6_8jfh ze1lKzj-Ol=WS~E)-L7|;E;tBgnK5{M*O+R4K=4Ga-zZ+~a28g%9Mje`N-|!sO(jH_ zC95x69C>V~mTyIFiI-@X+_Y?+AszCc8`tQoH}ZrHTsOb~n-IJWvw3IGY#xRZUM)%* zu?zxiQ58`T2nbXk9CO%2BJK&&>n%9jL%Wn7Ay}Yz3T9U#6~x^3J(TM{*K=jz5=Z(= z^-Ze;bHUhHF1qpQ`T;;jjQkeT-!zDww7L_u=PcQCcuMsY|JIOD+RS$752~KG>rbw% z_@rS7LjlyW_Uoe#vM~jnS{g>Y@E*n_KwTx ze9Ej!{M-}hz7+x!MB*@FZcgW!QS8Y-HR!3ec@+Ny^PF&2W1oAD9Ftgvrl>@;-Pbsf z&xk_5IG&H$vLY&E2fq_vUS$!v-bz z8$~)i5N7x}G{ttgYh*(IH;u?yq4zgCWy(i1N-J!dFX%w%d%nZ^_0XQOH)fG;zvNe_K&Wa}d zNG^SFxUd6-KjW-GWpjEIboSWy}FMiV5lzRF&Psw8q@ne+? zkZ?Fw4pn2XdLR;-BXG6?kG{rH?}IzV62z{c-;vBZn@-UK7StCtwiH^$6IJS*6rh>V z9yIX3#V#%ll@cSHerV$SHj*#L-iT@pkoIhQc(8aN|5as^US>jR<{2T1bkijLpydCe6n+L^g@3dm3V1rl5#De{8&?=ECy zzw9$e^WJAjejT^w@BMZIZG@2F5agEU#bstjEiAciX% z2#gi9e=l8hRSiscR1FMdeYJqScX~D6pT1VSt7hPO#N6&IhinfFtNsknh(f#BC0=7G zB7A-=Ih5g}kRxudNhKb^4h_63%=N~=roi4ImIP?R2A0@d`8>GT{xkKw&?}MG5FmNx zbAwK*GAIG!91k4ZA>%}0$B}VTbd|X!dAx25f`<>iD2;lTz$T!_LB0?*h@u#I?wh=l z=`c@ufGtaN0#V1c10L}Z6BGE<2!A9qXE~i0qfHur^x3FXmo#80vUIrrb5FZ?EpLZ} z!}A$;Z)E`K196yWkFNJ>uiHT7u;R1}fd1DCLlO}j775u=?6khKbRQ5>#(|q>=z&@z zqGpe`#NIuc11^g(>_aR`My*>RRU~+ZG)g}xS=rJ6=4?BuG5-ro_;1l0vXZkuN9yA| zJwWiD^L1)8ra17U#13hY6f3s${2iaj zA}$Lh>fZybVsDrSvG?PEU!CypxO~2Ti9mnN;kk>}jg{en?uqGfTxDo%Kj+yKl62nk zEZU_;nL=@?*cs_>SF}a#9zxu5W@YeDj)e8P}7=UA>X7z6?#5$U9?)^48b{Wz@0 z%wzcco9@!x(e%nq&)OoA{y6M;Im*{bSG8TMo7O4>T0@p}_!4SC%WlyRX6CILgqLL~}2D>@;1$0Z%GFlXTYy2m% zi@mKgKYUGj${Uz~De5C=C?n8cSq$_aFNR}F1DkZV*0Zt?N^4$uwNrQIMp{qua2e-WrRU<($tYoc zX$pZlZ0hL;NpOQO4P7gC6`*?oKZ=H0kPQiBL<>$;s0uS}y7LTW$6ZAZ`|%fBy(u&1 zPg{sM%pVsl?CT$t=O4L^YubRH&Sn^1iErGQ?5&S~oBy3poiEkT55V#pq z*~0WXr!7X!CDv%3VuzLjBRTW-U9gBXwOw$xg(Yi?d0u5BGw@E&LovJ}`^89Kj|&gV z9>;19(i-E``841gcRN4du3W)gA~6i@W`S=)=QPjCMuL*sn$J|br@Mbi|9FOYJ%*>J zk{VEYlH@FQ&jhJS?;2d(oxAghsp?DUzC6?^nockD!w%LA@GZ|)x5{rN-Z(?A>djxD z(@W`IU!z>8tM2Nh$AC{^5l%T6)qOixEQJ@JOkr>o^*W`;){Fvpww1OOfKOvJ_5YG5oJ6spz_j)cM89wQ-2jsw?ATT8<5=-qe;nBn^mn3W)q(J z3A?ZV)6Y8c`+MP8!X0Qc(jok<@kje)vgRUx-s8s4iFZJpWOq0|@(R~{xX&_&V5NuU zV(md$@!+JQn4Te^YJ2K~d!a)tG*U2}Z21d^a4&rUW2r$i5qG6>p-JalVWVzpCPLo- z%VjG>Jy>Z!ZOgYzH+6fqv5s!H+U!ouo^I51))eVd4`iNDpGS(cwv}$1o?(K9QeyQb zpCmFGXRq5p7->+RgjaFUFc%L{ID!i%m>bclm5(S70|ezNp+wCi$%zz+RPLdWL`4Yx z=BSggNHtEoc~9o3%!SDSlI)X5!qGvUGfIRyDX?-;bM#C{2s0+exrUZ)u9BZC(PdY_ zWH>NRhRODb46vNIHAh<%X-Y9oMw7a{sMqgfMLN1Ch*JA}ZdP3hjTJl>O6mc)8G1W& z4WNb0BABOm+Fj_;ETN}F=(Rn$%q}jHanrbs(|%l2Q&75bLcC@q+DXEYw4FssSZ4(c zNGY`fIKNUbn&oq4Rtc8+TKMagP+nV8+Mf zei!4?O6}7zz4%7}8>ey0cyJhqKU2T+kQJy3j4^p^f+vNz|FQrB#6B94G-V) zDS9Op6KsGoj@oV zV0v0cx&9zU&bkpXA?rDWpxKsQX!o?VZp-wQh9&&oqfEZ}a;UOqHjzK_CpFiWBWOWK zm!?$y{5CCr8hZE0CEMAWdE&gi)5J*Kb*ObwvJZw*=^eYwCM3s+?K(eyV^-sP zms(@I*jy6iu5b!g(BY}Ed>>^Y)fv6!q(RFuxkf}k_#a^ykzRua?z`R~r4;8GJ3ZIf zjmmK%X!IBr!FD8|J|>hy2mLoDv5dgI1p``fgMVtIisC?*+#{z z!WX1rCdu}qLh4!Slz0e!wTn=(;;)j%jg#68gT|8necFU*ABxQ)_}}$*lQsl>meLFs$Eg31JDYfk zJVyp}d*X0WX4Y-MWf4dX{CZSr8v}`TdtrgvczLXt_P#V>JP<5k#ufG^A$8%nHM?5P zWNZ`QrzEOI(INY|=H%}b7T59DcpP5N_HyAPw3NU)<8hgr?>Jf56SWqTF!TkjK;fqC z7}F3LPs~Smc6qgWVs^22rb4OSr0QV4Uq=csxfpGJOu!14c{r;bnF9`SN1_e(!@cnK zrG&GSoh4t7hVV-_-4II2tJ)$af&tUGhOFs{=k=AzE0UCVPBfORp>Ez6UZe)Y2dNLO zaE1p@eDrfmPDQAd4NDDFQHo(U`HQ>xnZN`EIevXBgiV~{VRpV4ppIy?gi#9bv8C#S$VEB%(^!UqB6G4;=64@^IGoz_SmsTQ@-cO$K|YB zm?cbCs2sP?xpw;&D_9Ba5_Sb%OTZq$<{mSvwPMtN3r_P2XpGPx<62LJ#ud!jGRZAt zn){q=T09IhXF8+kcbP8xr7gKG#rwU4!7&TKgy8GZfa_ zR=V@!&j>-SdW`HIBa|%vohIrPrBAP0bL0?$qP<3B^ViMkAt8INGTO?JrGu}QC}fG~ zNFKO&f1i{%DW8%_5JVTLF_V?z=$oPh>dxTyN^x_mv|(s~yJ0V26feXllCb@6I7cke zxFmLPP(Y}x1I4yUcugab4N-hMeZ|ZKV$~dy(zu-T1Ulq2*l}G)N3`8#gpuPWnJX_J z&)F)U#gf$Ya(g)nf?>c2aySBNv2t!s$W?v4z3()F#9}UQrJSZqGhS-4>2d zY@PTM`LY6=!j-yUK3d2FZh?TzJ0mM-EEFs${^F$=~aq+E4sIArtYP#J!gz0eRf_bdr z1!az|=STgAZCP&F-tsd^gW<57CMJM~NKqHm_I7y$>ctA@O-S*EQ+TMdXyuG^g*_<6 zlWhs}x0%L9XFFWBs=A}|<;rrW-+4N2&f-Vb*dISDwB9jFkRALv_tT*JOK z$F&H&tBi^nFpTrHw$n-nLw@@j4$hrY-LVrq z$srNJwWFbBR>2}u0y&!~Q;ehHi2j!N&`yJ{N^a8CO5SLqJZAC*`jOyeXNT=BI8bYU`n7Ao~zcs5CDg@wDYf{M*mwDJOCyWJ>Ds&|fVp9Crx zNq+!C&r|K!e`TmAdoA8~y0Bhq2KZvrlQ%i8PrAT=7?(naAjQpoY{%A<6W=au zHmiR?82(XRHQmHwj@wltxedXNu`LS?AN1zw3cW zE+lqM)skoWFZ^BHD<|9laD>?!lQB&c*oP3K?Xj^^(ZXb=GH05-TiuLrdt>6)4^ zn#oBQfV1Y!A2w*Ka%2s`e$hX1-9wmUlaPQNa!lAmVUNAQet<+6K}HwBFi>IB(6opr zpZ2nbwU9V}Tgw6uB~>#YbJoO3cKQ^NTM3Po3fP6t#RHhRj*naj{U1(H=UtzP3mRcg3lWGkx#0&%_W&pAd}mn=TV`XwPhT=-*Q zHGeuhYX$a@q@-k*4iYsr@@Y=D&;YND=mCdM9+eX+8EcF}4x0({c-*XdeCb7J9o@YG zw_=apk3>i>(5VcR zxLmu5{6ZY-X&56{gpU?NhV6u}Q72w!&LxbN-P-X0L+qTXA_tFz;F8H47#<}#C*DW0 z-v;dnD@)++Hz#@oHx-B|uPT}%8gDpud^}I)CQtD|v1r5svN^-G0T>XaKqyDOxLkmh zLbv(|3)UF`7fY%L+xwjYSd5awM!&hS7vMQR1dFYxX08npq-%Vdi~?HKqMJ@x@Unzr zHBzMe+}xh~X01%%q9yjV6St-EJv3u7L8mxiGI^D|)gtS6bgmiD&)eNdD;0u!#Z^hp z{yay4kwwyP-`|dDTP=LNiwP}ip{04A&pUXRXlY)h2x&4(q=N1>MI}F_QJDHx5leL9 ztajluXTaRydi6`ELs3e-dO?>_%rj}!6XZ$IVS^Uh;iM8dNN^_wEs7Ec$gBD6fCcqY_xT% z*w~-4N~j~?Lic%bl7kBej5@|DDWl48aC(j_=T(}@+>J4X1~5$Vr*6h`9~Vs*Xah7= zo5v&ddL=v|Z9f~)+obxbp;l(a!3y=Yb}40HYj2%k5X(`b_OaOg@Pd*j;46YL^!4&4 zZANOx$rFvF7hcv~eNHfzxXd=vKgyT}dU8siV+Bo)%;=;5epWaQ&BjIH9PwHNEM=_@ zhwWHB{g1_@hh0HCbZ8Jxk+eOp(kf#tsEEcA+$2fhyFH-nQx2V#tLB|psscU^*}_43 z#zmZP-LmDH^>UdjCrn#rPV3nqkSo0d05sBQXV+65<{_=yWhSYTU6{-Rnp#dDAOy;IS zS;P;CkEGWPNnnXgOj)q{&%&c`?SONl=^F)hxbNWaqB)K_-hJ`G<1J>VG!4|%r)|Z_iUW(0HnS_rwtPrZ9_@-` z0P+Q4Ag8yg@pz^JJ4C(8hyM}q?RNlrYheA@9-JMR;(RY0lwii(+~r*VcYIP_Wg(&g z(^;~NzVer*wtCNEZ!XB+tKi&5$!=cO9->X4md}`PzbS)&2MTCo5oYv7xGT;UK}OOs zhTabP`=B`)~!hbo-^Rs?R(H5}A1IaTTLsDy9O7hF}gx02{ zk|pY-oU?)@XD={QFVlKA$m2=V4Pn8Pk|KH!)diiGO?#q~m|NCU)iMRi6JbQMJ|M-ji z$)c#W`LA|&i=?)|B56f0^DY6FBph@clEd;Jve!56pveWW6WVbhdo`!1*%j~4mQK2^ zbW*D?&Q=J4rvW?ME?WHd`&4Vycd7$9L~FkYDVoj2~h9a_%ne4mk?)&}M z%+aF19_+n^?mHP{cv=GGViZRNZmCJlVAN{0yKY}W!u9rod$;ri zwKq+Q>yApvAy8<3jIQyHW+|ezM{Z zNCh;1(L*?i(VZU}1D$|Cjw+vFZ35n(bbo=pEx-Jj3dS~<~q%X zla?$PK4lOk`Gk7qNtyqPOMfw zFb(7$SefJ_pxt_!4pf4Z{3_A`>wYvm_LxZE9MWRDw#i5`i9J(Gu;LA2QqiZ{DIh=F@H0XpUnJ$ROs60 zPxYQ?^BM%+c3oG1>Gg(Khi0cy{jHXBTNbE~Y2YoVe*^s{L`8UaC-|f2!joZ8W~D{ZR#R#F)oi zv7Z`W&Lsaay{3V%3rML1Y*N~S_uB;Z+m6;B({pi+3zTk$k>ncu@u#XvKn04lpLQp8 zdB^z2;}%{W;WNym0r~9$vHEI|E`TALxT$p0h5sDpFEc|K6Tv*jmC@6w$bOr4x+984 z$=!S$OCFWJC0`V+KcVl_1eK5Vcr{s-vR9#caZ}aA>PhI?5sW-{sA?ewyHt(Q3BKeG z7ZRI?dz&)l60o*-zF0 zoTZcPJpY)bb&ly^`X6sG)oOxyj%g%>di>$T35Faclb=UX+^!$}@0Z698~9lIQ>M!} z`YIY9tsh65v|KUXXOGJN;lHQp_*!?my>_PKely}Y%iW~kM-fg{3!k<+SH&2i9Zk(+ zdKBxSOnykN>OTUV&4`G>@Fbf!#K|}r)X86)A09$iZX$&3)kLC~!moFG2ZwQ!`>~jI zyD)N_6(f%$YwD=J)l(XkI!9&YismZ7WRdg+`Qja-Wa@^Fk*i7js8p$tp zSDp^7CF1cSJ*KBw+VA0wDz5DoO0`KJgxRg}IJvf$6)K?(0ycTw;o}S)+rEU4=R&E~ zPlsodb9nUV5!D{tW8nJ0wmAibbDEaqwr$#=B}6B%Q2s3*>J!l!YaSd7Pj2n4ZHtJjlqEumZV)GXto{n({yy04PcT#U0dVpX|$Zgc{rT*`%Sqm0!F>* z?Mg@!*9{Vs)wUT}fIkVhQyh`0H+bFh51`<_g)GjBwb?JHEc~@uUc0qF{xP`7_4-w1 z=EDPigjr+_ns;Qw_mf_v+#fni)oPuLuNAXaq|CNf%;pBb?7PTQBiWp7B2u5ay9E;` z*L#jTu+X_MvXDZglOmnq<9s@)+uLw6S}V12XBI!IDD7c8+W6ArR`1A?2Ge~R6_}C? ztI41@Oa+$0puj0Tx6=>R4HBKqthpJ$i;`+jGvAVRU`7V@rOJezp=? z{VZ8D&q%K_ir5CVAQ=%Cfh7tFyHKfAf4qSCX+{I-3^^*rpR^oBx8U#+U$oA%9*a0w z1}QN>A`*KiOrR4MeMtE-zZ}*z3k8;SZ-V<<)c_;^^J`uV3$ONAb$)>i3VgS65#i zzKs6qj2`@>f&U&N~~qM8+OR9RPTqgmQZnD|a;(dYea_z@#1v}1~PS)8ZoB#zF<>1jLu z@yCeO;~fy0>#@2cjWJ|kOtP|610SwZLv)-{D@N(_UUs2kX`c4;WU@(0*w}j0`lZVw4!n(Qo~iW#a#t42E;fD(b=iT2ie&Wy^3SbmZ;dGPz$R* zSq}qy_O4AW)KlKsU)266(JhsNC>t74%nxv?T>N5NaG=nnfAqtjS`~2$9bpSnQ26C9kI=5_hh4j0%mIGTKVH7bs*sr+c9nEN?xZjO81C@4a}=moscRP0hE7#A=i- zv)<%<6J-3+6^tuPn4V2VrBatRbOH2qM1A&0DS;=4C?6vU*URJc4(m=+Xtk69M66yq zjHQBjFt;TWsl_j54zGre~}D?4(%z;h%WSUtMoKmanZrHpDQ6_LBJ_xk#9D3?0nh z0tJSa;17WnSVc~S*Hxi)@8!SB{oh`?KhORTKlJ_I)2E-){{Q&tz5mxQv;QNjCBR6F z-x2e}DC+#+D9(EEF|Mr_&ZeYaBmNTaN1M2`LP?0L%9ce;apdJhCj>K}u-QW9IEPW`ryK{>0Z6Ei<5ej44J?pvIgJOG zg$dMIF+-RO(v`fYl-NFH+yCHda-gK}OQ-gkm45qqSE zHVYoK1YVnUinKdUC++yj<9z4vUw`}ie{X%0{G<2r_0zMR!8hq&|N3hBkMqxW{(Csu z_WW95- z-!Bu4xFC-6v><5bc7JvRHyZYS3nCwKqDG;kYM=na#r8s7i}~2$-9DCP*T@4p#jCU+ z6#7~zV7CpNk6^}Kbb+U)hoUnJ88>0H0#sH4V6IphmFkG{9?RLM-PV`&4DRXzxpf)s z-^=VpTh$1(m^uKzrVLm(K^3;H-Ki}ZN_E|)tzpcN0mt=6OW(OJG(al6Ji;i{;{zy$ z@P^D~DB=B$K`JOF7!w8=wd!uEo(0uMaV4deB;uJ=ZjY40!#iE1o8aOUk>ps;rkaon zULSstyha6Ust9&@)g`PEMT;jMv&ZBmxXUmr&g&$}>b+lA%Q{VX!n-@-_VKIr4Qc7C zM?W;Y0U0H{I1n`g_`o11q|&N2LV4_2X5_r!Fn3^hFnyyjx5I3*9laKls}33h7Hxfj zM!C%KN(zv1$J)3}RnlV!h0gB1Zx0XOvGEvM`b9B!HUZ>Br%6UegkCtm3gNXGltU#* z996JU3o{-#_?0GxIi$^)8A{J%&y86UYPX*z<0_GRF=hx|v>G>5>wbK}W!O+5!CmjmZIht=ot0Mk#xX1(~`oGFP#FT-`%!ckeZR=-7XV zkeZAqh^>v7Q61XQIvqlO>~yeIosNJ%auaL6P5tkAig3{RxZk@g6o6a!e{Ved{NDcO zS08`#^^@)a=8XTwFOIE+v{r^-L{oP?e-2(sV*|Vqj z^53sK{&$7}bqo9d^{4mvZ@-c8SHEwM12xb7|8pPzbz|+xlY9H0U*s>0|DmR5|o48 z6pcnTx`LV6&w9~=l>VsGv*F5lipw2+(HX8vc`f)bRyJ6b2D9SgTBEFs0K+^wrN$-7B01fT#kOUro!j+=Yfw6lB*Tzw9nU^G1|lnhWw`Z zU^m$RZ;b;s&;H-Ye|rDlc=GxE{{Jh>e|HuRVBYvYee8|@`qQUR*6+vvm-ySC4r#Z? zja>OeZjDAe!!wK|mR==@itZrXSNXr#B%IKRkxAmb>35~Qgt`j}ofSpXFB z(EtV^I!S8@A$>r4gZ1CfyGj2X*yJXv=~hwdA{Zp1zs;BG5dzp31n^FRjv{L*qgsfk z&j9&DkQqr<>gA{{YJwn%)=X9vjrvR~1_I$3>aIW}tWo2VQqfQ^ig3V0Tebgy^?zp= z0Jk{*KYMnM|NYw;f3aA(tqb7X_%F{q{LhW`wI}!SUw)Cl|Lgw$n(ZIZ9RD;Z09#dd zD>#6#`+X23;Ey-K2l(SHpaDWY`BD=(mC0FgJsrd^OaY4OetJ5F-inxcXxd>%e@SHE z1?Xgi@z&_Y5xMpk@v#Rn@M9!l%3A0?U&lmElVzNvh6!>V;fte~j%*#Y=RRg<_!%Gs z`s;tVtN23vUpjg9m)id~)*j!-|M(4#KQ5zNNCETo|Bp8wd;9;!#@hY<|I6(Emj6G& zrx4GxAF>8db|lT_f%OtQJ&IxHQ{VuoK~wk~@)ojpihoWMpm5CKB(lL625S{&?R~RK zQpxFgY;A**(BA)KH$yZUwopIPI0x2l^1K)&LmvmhBJ5>k(wIk6w>A>HtYk7ty60%@ z>J(?S*-G)EW`pO1NTB&_o!i%N_d=T_y?u!bQ zoYe$7*^7z>*lCUpvc$}qF?12>jfZ#v1&8{2KN+0#l4!6Q4Y-alhhvWF3StCGa6lfC z*W=N7KFQHu8HK{Q(F|6L$YX!GjdN!hCVAo5k=w&X@0}5H)|R}+YVvwW`V)GgQ8(Z{ zHwE5ced9*MS$(=tYZ8b0%<5A2m5UoPF^wtDI<4Jy7q5AY<4by;lt7e|NeBH_WGZ$+ zm_z4)Nmg3ZmW)PU?vX|AbpM}bO1S<+5RiH2|HmNnx%%JFH}3EMf0@5~``>%}-=Ew5 z7Y%on+3e16GQM^UX5a6=AvR-~%8J*!XDut9`OCQVyx4hrxc?7k_{;Yk|7YRsB#ANQXA`RT^fKRN;@pg@c=}Q^%`+2 z9Lx%)Co@WjyAU0GwGF}s4w{Qz@c@JZB2B{7d2)dQ*a!ul8K^c6tCV)z^mMzU(Ezx zk^U}mliT1`sV2{bDq4tvQcsN2i~M7{B{Ph+$6K+}UaDv5-SJe5s7z)Z#BB2uXN!?2 zZlDI^o2Dk;gqC$;L4Y&=(mrM)ZHrQy>D?pm-0T0e{*SiyJ1yfD{@-g)@BP1j&GE;7 zej)t-wI`oH-YDV!-`oG*>woX{zyBHcf4_=`_a4>XRdL>xuJZBEtgGbsVI7Rc3sUr$ zg!dTNtn?&yB&^#xxm0ccr+fX^Z!UIrUJbw4cy#{f)py^YeVu;2wiDkg?S4B*_b9Gg zX9uYW`GIuAqAhTIvoTyNz_c*nF(z}5>i6ilA#~jT17NuKXWT{P{}bTKzyA5}@jd?Q zuXO&S@xL?duiK6PQ-p-QAOBx{{PFwkalaO{|9!H)Q9A#B{_GzA_ZRuQ$N##=|N70~ zf4Oi|Yc~M|`zD2<%@Fr11o^e`GedrzuQzMw8}Q#_`0q&ot_%(&7b%L|(c3AC{rWc? z&c|8O_wmY9zbNuPgw%!tF@xc5)=&a27dn6gN&(Z}!53S5wx;rx0k+oDfsF;`Jd_TT zXBrx-$ev6=a;j=5 z*!gJfJeEqgQna@>b@A68aA~*DS*u(D1#!lSMpgn@LKWStsQM6aj6lb8 z(Vc3~WU67j`B9FW3olrhm$<_ED@i{)gHi`s56=1_usa1lXi;dLW@(@41gD9exb0-Y1hXI1cxBs{aA< z|D9pKEh7J~eeUDGJ^6h7*}eS#OZc6WG&!YW(iM3(9j74y+ShNp!L zFLW>fW`V*q(QV?Pqc#*d9Rb>=BPkJOS!^wATgeFwYE;0%V325;n3@2Y##2O%TW>|X zsDtbGBM_PBU{JtL&m!^3H0$@2=8g=AP}a6i9qygQDhDuSa%@qLKgQE8D+8n5Q#vbk z6Vlk?g%cg6Ocs3l2*t=Da_FL7KuLU^u;^PA9{yA=N%kMw68@ z0_Ni~2iL_v`?1QpcM$zuH8{durk^K0KB`1gy?1~6hL4GKn>I#nn&T2|ba~SI^1QoeL)8k;KRG(;N z8jV3nW1^)Rpc=Oq#aM1^GQEhlfde1zL@&1vx8O*)LlygyXl&tR;E6%Gq)xetLTJG+ zCPe|$CPe-lJW@tmuc3$ccfZ?xz4HwoFW++eA4Z3J(K3Er{>!okU|f!NU+%y;^wsXp z%joNWNCK$Sla^{17D`7fH1y@^g3yYpvD~r$kO5~kfCUoC>#&7=MnevV@3H1iFr!5= zZ+Gnqw!MuNf$yrh>bEMEe?PbAJM~b@EIEOruPOu%`3W=uy&uN7MsSv1CPSJIy|kZB zQZ-LSaxau3f(k#XHg0mgyGimtEPw2A6Rdce-f5*+mw>XjZlIN$V;RIT=`6@7=i z*UT$^4yEk@^REI7RTB|-aC9=xFQFQ&O}fm0ArZDG-xASQfz*;n-BIxl1Y6nJa0Td$ zP(gB%b`v#qY4B(>b92z1MWo#Qig^IvV)n0z1jW7$EMce{9(xA~9bq8xi84Eod6%4I z{cLhgxrD30FhE--yp^^h?i$=6)%Y`%1`M54UGUPTCk0+k54j!%myVjc6DkQ7=wLLF zofM(seoy$O_kXw3|7%|P^UMDmpRcd``u}_X?_cO|Wu>J2A7>y+zXGAKwz2~Mzccke zkh&}NKai?a|8u2psQ%|nccK0VGIyu`2V!@o{x>7}$?AU~6*BvS$A@}1vX5AsJR)J4 zmR{YVJjz1}6&a`MJRw47eSKOJJIf|Xy@;{ z2Zsj{>PWM+__8Kl%J1cA#P)kxFWQAZeY*$$y@!t7dVjdr*?kK+-axluKfl@9hxxPf z54MHlBX-Bnw!iagXMgAIHjD?SDC${HtO$$#^ep;rYagp#Ute2mm~8Lg?*8}t9Ztqd zN|o;E`i4o@6pIznGQQb)x%>Xj;m+R=y%dBST={l1J?Uq~Ijkn=&7H&DH#=T}>8MAN z5577$+uu)isqytx?tS(gj|}%hOwSJWQ;tzd$#VnOra#*7t7r|2xBezn%TZ zv$eFUK_VnKW;}`iuRL83c=}u$P(U7MgqCD2% z70iY1$B3@t2=`F{H2I1S#2>ZeDBx3r$IUojl#sBtUHnLb@Uk$;$K7){+bZPnE&1so zjv4~@$F^T?{nHSB8UvWO{?|+R&ueQ>@9%$pi|fC7f#0vr|IGC-rU0M90p_g#$7>s( z`|JPl=lAxXzrpqY9B=**Iv~r3gzzjd4}|(e;LZx-fw1Hi)U(W?*ot%#j3n}ObcIOQ zXzNF(opww|M>~p&rYUbOlKwQcv9Ec*qPGpaxnWr)?H6ej#|i`6g!!0L7j1CJ)lmo4 zIKrERcKmn^L(lZ#+mkh2Go9##Un}({Sh44;sxZHTbA!QXNlIOiCI|s>nvK(wq~C81 z)5+>+od1w^C&j9&!)kFooFrE|1`PdjFMTUww($7ykg%bXJq$=I(U(ma2FGnHynn|B zZMuGtFgen7LT*)+^sfo&#CWnRQeLIP%lX3IzlBkulD2<#cdy)kPkDzKV}*vDRL3I0t3T zF~~iGzsmq8KO|Qz_IXT3SwZ*U_-wVGofNA-z?y_ET5mmWZOD(-ARD%RD4cjiKO`3k z(}o$e%HLjC1y&@56sxPxKRen+&D9aZru?aXIvsY|=ZQ?k_+d=Fctrh1EAlvQw2HJ( z{$^jAWMogEm_$RY=Rzd$?*iDh*3h=2BjE(i$Sl0$FTM1{w%Sk>DxMpJjK9{f6XB_* zjwv%pw)Q1_JlBP^`swg&at;q4J#woeu5k6wkp7WMf0P{`w`hixQ%h(0$;-V`cH)ap zlRld#+kbY50sFgh?i^2bmY;+;e$pTwA2WEq8hUFc-U~+e@*m58S}6GW?*G;U`k%-5 z_kX|0-~ZM4e?~MDcm9^LbvYtysSfWd5^&^}@c@BU&4nZ=*8N#wt^j1&Pw5Kv! zCV#65hzF0ZP*SI=*#hwV_E>@niyPR_!1f&t-3?kW2y+d_gMI)T$Mzj?DNFl2>BOC65>xSZMMdf~?(1f-a`A3mf z|M>iT-4C>QgGr2XJP@c1?ubJu>j+4Dhd*7LP>e)tDrw2polO7HF%|jt%`GDib&1b8hrZr1@pJ`m+7QC z%rEOVAgwuO)_Wn{Y-!oh>;-bIK3~mvYn~xGROh~(%wS6|B%!T(H9*J}OAb<{o=*FH zl7@uJ?TZe|KU@nozHEYf*p>(0Mhpr_ZSG>TI&sU`uN(j;u!YttDv~B(#1f9A2|0l+NR{Q_W?0-Ig zx^{2>^V=H#Pp|`+cmMzNbI<;JWBn=Gf8EP}zkvK_#c`z}7R`%&(5%=)X6;nwtmy>x z#jU0y+N-xMG$RrmI0c7Ui3-@EVZ)+d4Vx{OLr7P0@UZfs6qC6($cEOvno>u61!rXM z(ns_NJLq^rX6=re!Y)_u4Zo!*wR6|M*k=8V_y6k~k4yGH@a2B}|5on*MLkGYgQ(N< z6`3puP5UjGEBe+8!46j_%L-;u=pqsf!Uh8&D{c!@hp)4Iy8TR(4xxvHGg%lJeAD+S zcpRF701FY!jkW`!rOI(d-DujE+A3gXjwwdfar0bb2A zGd9G(D~EGK|K7^;x559_Rr;Ob|5x(=4Uqabo|f_d@9n>Sk-rN3f6FPb-ff^`;O8sw z&lk~FcLEISV1h1Q+xd9ZGGOwX(VN}FsBRst5$d?er{k{Yc{j)=EApc?Iv+J^wQ(HB zThVC(r*%SVKwm&~vVTYQA!oMKfKlY9(Hk{EYqcW>8)6aDr{mfSO@Pr#2nTDDuh3QJ zX@+`{FA-N{G6wcff)IM?rAcv(sW1dqv~( zLx6G+{0(hGCr$#x zI7^4HFLud&YdX?Sgg||YEmPiwpIHwoLmH7VD&E3e!l_p zisLZ>6=>5Yixn?kyig$o)MVPwFZ2^_=U0y(p5*yw58*fvJwT^StkM6YBS~8)r!boJ zcUyJJG>D|B~g}K`uXD8%ti$(~xI=ui^^ic%jh0`mR_kI@8xMRT%K^`$mI|JT1HpsXm@!4a~<7?mp}Z^atnV` z@yI%^Q{Zw_tTwrB=(-SJ1Y>xXD~|O{qpD<7I;*Q(`BN9eZ4$N$9;T?pYQ_K zYo{Y!*vW>cIbx$n_4Q`7){Nla^(PHjf{eB?0a-bXU_JrQO(wuH^=2~aTU3z1@_Gv- zqt^P9Wq2VTviLkcMEybZChLBTYbL<8`c_;Q9Digqg6TL>JcEC!MB~&ClnqifL%8#f zmK5jx>_ju1-qV_R%kRLumXQ- zCaHOevpbm(`aCqRW%u>b=EiaKJgRRrpERrr73oArTscT-(||`-csrz#_Uep2(zW9t z5KtC{^^|urQe zk8G!s?u3x6sG0ntgX!yqGz26+bV*Jos4XIBq~s#cdZ+;_GAsZwbu*egw5p4IkfIMf z5T2q?AVio^AoG9o@+rtJSH0{kElBji)Qs>@q-@WHbAq`B^3Rr#*dz3VtOxSgrp)RV zf7BbzXgbY$E&NAz$OqE5T&Q5DM`E_!XT?-&Q!C8a2cqqEI%sK zs-3A$W7J`Zt#efGo;Th8?D|nVBG)h(I25#c`NN0*_ptNhPe;e~#)}7!R$8AeW7JAD zPnb1uG$R$Nh1{!U_J z!sqC4A}w1UFPr8yTDX{N{${sPe`^NE)3i>1(@o@$a6Y%;&aZF=w-20O z4K2=LcQoALrf1Dh8O2W#dC;x3t% z^pKenK8}j7ou3K`%Ed4e$g2g)O{2lk>sIszkD(%B_i_^1NVsEAN+6>$HVFXdJOlQG z$`PVLTZxJ^F5Fo+05SaFSay<Sr2#H9rUPvO`xj~%2xuLTDsWyLsvoFI7ZM;Z zrBUY{kM?VLz6<+rQRi6by>yafeYHblt_~f3(XEB+(77x+`LKUY0gj+m@McU0pb^Z> z`Z)gIqv*rr_|czYjCtI8V+Tg|;-FP?i6>2;5tLWQJkBtK8HnGFu0SAu7D^=7os`dC zonm@=nqA?jy@+*MBgie3Gg`IsQcB%k3<{;b*L0c((JMV4GC}L3X^|%5eg^W2Q5cM_ zY3k!!E4wv)P!pp%6c!Mzl?YDU{WO8Yq?O0wZ5SI0)$h^DizpUhH+F)P>6EZCF=e1P zENTHF60#S+2#53OH=4viwrfUfsp=N(wT`fO*0;GP^_EncGn*JGxeOIf$%Q_p&%E z6ptSEZ#Sv4a3QJl+p9=Eo%C>>mH5!||JFZD{(Tf3|G3^<>iyJMj+V?KE3u|A91u$J z_qZ~=6<9Nf*RI+T{1@)V5PYll2mhtVZNsxK*#CZy!{my!1}paFW?*jXv~I0R?>kwK z;)x!co>M{y^K=qLOaXC>em3~tihD{2FVsgFu7gn_BjvBzB3XqtE+Zl2bV5U@(Fj#K zs1$}>P1IKwCD_ssFg5A75K{UhvBhkF&~%s=Ms3P!A_eV@(6K4X6tId?i%w^U4p;}M zgHHO}cyz?0U?FPtWOyAtTqdBr&aygGM$eNM770=dXM?jBtT9F$YYsNpYuQYG8+b4a zP<&LUGj+;n-j@iYjq0(kN`(6)>Zg$!bp0uux+Zq}NpZrRm0Bpz3MaJct!Zxc0mFp34%W7_XwZy|^w2zU{BZV`V(4GOfJ{E5GE zh=@3*R5o1RGM!MD8|6L6;YD6`qStx;k=gMe=@x9cz%Z4NIwLvzJR43#0iYQ>CRV(T zmp4Z5G>=8V_jV7hsaa8CEp7oiQ+Wd^Mj%2=Nab%a%J?X19zTl93IwyQNxl|+shc8> zN>w-x>|0XZ<@PdFaI;ENfhUmx9-}}yhbpF09OP?8>y`SANb5W+$ud3Y_d0&9rX=|-#mNC6V0Qxa-sw6f{O)UDXLk0{qMW`KM)_c(xM(o4p@>|ZHiXru81nyQ-$ z$OMNW2M$<|Ka4*N|NQ}@p~fFDwzC`_ZC?B_Wd-}6_|6b*dl1sA(3~jnHX=qH&3`eFEJm4X)FMK4w>M_dzB(HlC*c+tiM&t}l$t8{F7NI5II#XETMc$SAyu_`S& zZ-y8ilf=EIV}z1&pC6*ugv1%XP30?4k#Fc`npD7IYE+w32MkSw-I$7dwnW!; z=oc16NB{fec$2L<<8Yq1dT4OV8g;bfW%Y))9|Urda9z$3&N7}3d0;wc$zVFEm#5RB z9qf=zdOcnH*%g=$MVno)wACR^ef<-r^>jFAVq45AE(J74!wOb)@LmTD8kgLdHa$GE z_$rT;4-Z#96c6i1|BKx9_~BO#_=OKy2lD>y?)KixorbN*0=_dvI}JfxrdTetpn%=7 zWXgO-RvOJ4oJdNY2Y>JOcV6xO9fS@LhGExY0i)QA4&J|l=cRPiWzk``UzK17BP}}b z4uEY%L%uHNCo54*d@9D;PEi+Sak=;ds}vo>4cJxtQ#i@14Z{$s^HzkrQ6o(6Pq7m& zp%|ws3=rO>=mwz3$O}sXQ-}bJ!ff<@m|ck^M*HIgajAz1uoGb-GRS16SUJLR{J(md z)W3T790u()yLwUoYLmyDn{nKLq1Wk-J`hKz@%dm4kUV$jv6|>b3-SX^t7Dv0%5eyi zeOSlgYSoFC#Oc&0EQ?gl5sOzg#_0>T#oF;v8 zVCxKFxyWNW1fE`F48K<}9ai*E(sBMlAV*^gZCR@QATLo3Kj@e6x7NQW)ZRt+I4UCMNAYBXNbB^i(;a@aX%O8fzd|(`U zB&timx1_@;TC`V+-n%H%PzR7Nmm4z=0%m(s6pw3xx8!2%)S`hJ)^#>V*(^7N(j2WF zhtCoZqVLb~WGT569Dh?{BOqWE+4c1EOO8Z9bgO7Z5sW%*4)9r#h7ISiAc+kS#cFa9 z`;KvkiCetCVFfDaWCe@ss&3kj2NgTRT$p!Y-iBw)5tmDcBfsm^fYcU0qv_+jqEjA| z&D$SFRjZnE)8*A!)mKfu&Dm$)8=hDZO{SNgOwZ~um1_E!twuIs+4XVYU{G*6#0yM& zVsFR*HlrmxR65O>y>4(f%ZAgmG7&0NF4M7fIVegLx`I@a!45TrCKJrTH#iq&P=wLt z`9j~`9*z(h=b;jt^oV>ewU>q%|Ia52w( zhU*F{irZq|MY$~tFCLeT`Ds8Aq_41?-X*UFh6u)Uj9roA+7 zO7Zknp)}!l);>f_Mf{-_;%D%;6xErIBGRm=lu)yV&rpKJmGx9W5^vO4I^V>b^AWHt z=~uR{#_|H>$Qrm6RJLVY4ffob4p3X0*12MpS~V7Zhd2R27AR2rAZ>0`^Md(&faiU(Gt?l;&Mim>zyf3iAs!Y%+Uu48~VTX9>?x zwx``>oHi?{4J9%TE0?3ov{f|}S@c<(HhCVh72eqO0HGR`Hzoe|9!@cIx*<3+N(w~M<1?NzlRdQDfHd3I|ZPimv z@2G7zM{S0b>(1uo4;xT)NMn@{`U9TUzed&0YYD4`c$IY{{~cODi<48ARx7HTy71-hH?{+3wsw{ZM$Q zegNL=1Nl&V7>z%q;}47R2Zk*FFiqtPYCS%jqd!m{&p#yR85}tuU~XT2NT&IRQ9R}r z57-k2%aqcq_OC{Vv}(_%94(@ukNVqI2mS0$%#pyE^#ZDN$6PE{^|#wv&x4xGP+_QN zf5*zuKXa=2Fyi}J7DsQb#IQ~Dr@_WS>71Qjdxc{q@!qgeB-3O+#w>jE-^Jx`*ZW|vKi&M$|65>YJ5LM6Xr%6@JOa<4 zt0(0UtVvlnEwE!*ldo;e=I|%EcnPYr^z5HCOJ#PB6BvkS{M+>Woaq^ER43^F=;?X3 zYBYK}J0_aLveyb)#o+QF{b#WOcrE*MMlL_M1`#q>sF|hA6Zs@q+g6P zvP7mLsR%x2BDO;x$LDiF02A-EB-bW4jAvr+{9I${4JbQbTJIlYZCgw5C9P=~+a^)^pu<+s#bZ^C+uu@Xy5mW9qqQ?UapHB7XA@i>_fK8b8L@8_!EI$d0?)|JX1B%}9o=^B!kLW`^1=GP80fU=qu zy?EUPR9ViY^#MlBQJYR@UsVR*2EcyY`TR;by7m!Gojq@RS^36Xh6VMhWny1Gl~ZblE;%EXCw8lD?1ZXUz58;i`Yk=mv3C``J13xRgsnLxu`1aG&x*F0>8B+ zzM@2aE6KdX)39Z#vBoytLOXo12Jh7NTA=u5%N;_FVfM|vpMc{r&@U1Jjvz~7x;l&| zyvjy}ZEcAVqYEl26l8rgXyj8?i}j&*NQiU1#RB|3%_AEEG%6{tF>c)JJZX^Lw7gMIs@BkB93mrU81pQ2#&-X}G&e73tk z9dSw(&}DcT&0v2TY=@Ik5gSB!VW3u+5i>_zdh{uLaf!sr+;K%%C#6u0Uybu(O1zlz ziW`%drIP?i4o&Pf`}*Y**@xMwJTcwPXup6yWOanGO+(6jfbt3Ui(0uBmqSSnQct8h z>_RO@!>CiXW%`q3blO7*6wgly@d*S4#);O+bVn>#Fq6(0UGTTi1;2mN3O9p3+2tf? zfZi`}>nFBjgs6e12bKMF5?#Z-zX8L$p|<;*N$nm$`3J%Nfo+hNjjO6>Xa3efb>}Qs zPE`#BDR;$cT8ZR$_xgs0oZZaM;!BVn7|&53_EnNizg0Z}&>v}fx)B(=6hWCobMLrF zIPBIPbqx**N@&V0Q>5HgqSdw@vu@g&xNTD>%^$BT*U08{3zc#@WbA|Cym(t)&Wt?g zNL6#Aos8NzAKJQ~4CQv-Dl4Otk+K6eme zY_~N%r8Z-bJ%BA0$ItvNIyRziid>_DY-FgPtH1bX3a3RNEMP!Odtq8!^ty}8;POb?9tR-jwiZ$r^3Y)_Y2Fq zeTl1QoeS1O%RyC=n~tgMENEX*{fy~5w7nl=E87tx^fhE0)#Q^HKpLD0`LNXbUwfKCUlh`UVF&gq>GD73=bjBgUQ^eRl=ub zA;IwR#k~-gN@Ufit|4yAO2(aP1*^w4t<+z6YA>t0o~v|IwI^Ly1S_7_V&+jSzN)=W zvsrt-_J39g@-~;+ zq1LwTyVj>lhL(4HeU3Ji zx;i7TjuZ1P*#dpT*6n<3!Iy*n2L2oTdZ+!7C!7X@Yp;~O^Lg0^%d;EcMHS!o8h-v* zUn~-a1Y zQhrZCbpM^tPAIAF=|9wrmf zfQAx`1&cqk#oSb57_mbTTQ)`*IPqR%e4QXC&vaMRJomyUEY-}QIiR)n3Ov$EzBw9%T7s>ml>Vm8E ziorWnR$o>4#`6^|fWFA_nhuAH%-g^wCew{CbI@9>zVnzc@nh;!FF~*=Y2gHTSInmj zfWjp^DV>?qw>1X!2y5SgGYY2ao~Sw z!oSS`H{>-8Mv*1Jot1W7kX5^-E0PbTHt9}OfrU(e9X=^@4131wuVfnyjXe2z|8!$n z5opQ+HSKivwTTkn_AblQKR~dcgMD31_dS=}khm>}g3UZQx4N30y&O#lMa&-U@O&%A zvxnmw5e5ibUh++eWrRn8(9An~+|U%fEvjJi5_lhz_gY7g zWt_m#!KaN(;!Nt0k`d@xOlX;ZPtVjsF$g7yQi`l{zQ`#s1~PTPzAvV%Bpi`NC_%gI z6Y!1%qt?ObU{>wU?G^^LU+c? zRfGT)1;&*1s1piV&%MUQebr$GfFAHxAufxrAL;0(Zs=l%bd;T0x7dOtl12>dHFn>? zL5Ad&_*!A(3Jk7RIVBt@&6qBp%cmI!k#=jbo5Z+dAzEvr=)}r%GWL&#mt^zoAfUtC zaM{TmK`Aqkk!LBva8g7Xm<_=9E+%Z``@lhlOhgktDiOyL=EB=#JZ3R5nJe&t8Ad#s zq8q=xJdFBaz#J-AtKUSPlo&l{gtJ8(d{sLpGbbcSWH`ET*o#|)jb_`kL(-hUv(sQD zSK@-qz(HmJWNbQa!)&|oymqrzV)44csfT8|vqnp)87g^mt;D)V`-d$``!A&+D z)6| zLiiEK8LVMbcWD!`$i&9(EM5WZ!f)~bH1QgzspI--SxZw^KAmVnI!Mn4EaFn?O@5h*^2j!Yt=i&)=ASp4eUe=8iCA~%XS5uM!iMwsb%Y-JD$UO zNc=d7ZpFC8!JjQLcIg)-Hk(b;oaI$uPZn*&^*Qqejk6kNKc60$dFY;A8Eutqg~%!g za%@m5ONo_}sp)#kSGQ(kPb!lt8I`-ef4Zbc3}gqT>8aB?ADaQ&*p?0$;cPx!%msS< zClac5qy18>4THTp7S7cqGQ6@FXMoZX&Sc>*ts7&|i%yg`!C9!S#|#c)K$Fxu;Iu2b9bI81|e zRi+poj4>J*e2?bogd%Fo43{Y1rY^n|hgSfV1$){1{l$RY+y-YDVJtR-RhSx`rw@M` zJUM#y=fUyePsf8-htH0Wo*h1c8GUDKYs(BiwFAAM!xp`B*St4-!`jb*2)Kh;n!nKy z(CGK5>OHv-H6q71v%;&e`zZ-YBrNlTA`%+?484a_`Hn%b@V4F{0_AG6fj9emo}*et z`>QXDsAzP{vB)z0)ZqdJ<^2G2YaCk!C?zi^c44HK&6B1wdQt8fWfR^G2V|j@%hf~M z4>?K5gtK~%NWQ|TROfVVG38=lNP7JItJ{{NvVOI!pu+3~GWNv$rkWCVpXcaQjbe;8 zz-mg2LJn#;3YJkP$bL<|=Y?VTDMiTT1&27~tKbTmvlt{8GV&6W>}3{T0jf8qX@!V# zDZQd&L4qz$LPrxzX2(6tq_Bt6-7>-kpYJS=*cZ0dP z3wqgx%D3UE%~%i)-?H=r-F#JcARr%MMruye&?37bfR`K87kI@jAk2*#{>Us9cj9Qw zhMIVXdlfgKAkn9ikwP>t(SZXd%1TyPLK5c7j5=?tB5BMF$2~Qio7)82VLWi3KW%SC zV7tmVx`4e1dY2qbW={7Se7}cdS?8Q-HnrC24+z;Qcb9q2x!t8mu9WJWtW=$fQ#G8&TbD{~-7GydP z88fFhJ1sBF7i@)bT{>(IDj~LN3x<=qBik5j>f~*UozG?>D{v@>SQ| zjl>OT?R-jE*7MzNw7=Vh5^m1E?iF=zd&XD7Pc1jI2Aa0svrHN+u4FTqFsHYGi&v9- zB=eij!I&x+$?cL-JC4(74SKLK`Jw#AYMzCRy)cS7pb7ed66ynsJ0QGa(HUxT;Rwv^ za%a`$k6^dBeHdF}t-jYbvEjNLnLH?JGvK_atS8w^X`5;_dwkuB7uje{JBzb(j4Y6) z@8Zg>CjQxA=x!B^D684eVCGy&HK6F+4B^kqOtmzq#_u^F;NfU)T>IWu;kI1(4R2G^ z{jdC53~3_OTh)i$9WuPefu-fc8^k)dp1Ig|qe93sVng6lxnJ)tapq-bDojl9m)N6C zQad#uZ!jvVPk16 zZ@%2RX>YpuUaQtuy6VYe?N|6(f}u3|*Pahs=S{oidfEBsQ(Lxb`)*6e?Toa#Q@qtN z%}-rO#wmwuH_wLDyU5|vm-QUc)o<$R7Ok~{4RANIeNzYNCbjELd+M)QYd9x79J!TC zVrQ;+DDsYh%tPQf9E*#MV*hh#Qf?ZJfy5;$*aaGh{ig0|-BG~`1B*k5Tn1$~N-zd2 zSb`wU+C*i5LB_kfr_*Vhw2sbO__Bt|(if%E@Y4r!Pz|YM{yEiC|IR6~Y#Fb4tZ%i< zTx>&@PZklD@k}pEN-rm?;;y=z|EOs7zZ45n=*=Xo*MBrC;z~jtLL!gFawH=^P@c0l z#5KX}O9EwR`o$KkJ#yDnR0!Jy_2jg!NJUM_SLmaA6Q@%+^Gfd1B@_M&usFA3bG|UE z^BL@pKIF6oKDjH07xBC$9k&!n{{~*kB>zRZpU-aea!yWowa)r-nkWj`5Y|*u96&GKIcnx-nGql*{pfW4LgrXKX>Uc z9wFC%6+^zYxA~z{{M@Vt_t2E;ohGLkpw{2>d){{2^E>dl^YO&wM7(Izb(* zxLT>Bp7rH))cc&GBPo0f*~HFm4>r~WuYab{Lw35hcfX2gvU|BcqTLWb87Qo>C!$ih zZ3I9!oYZVY^PufbmDyJ`fA8#jEi~&HcmyI1KY)StHimjaXZASJ8OR!jPJ`XmcFr@ZL&p znhtPw{8J?EyPK)jy3s~Gsh4BM@m_(Oew+;!;vnTAle4keX=cga~ zqs@L8AP-c|Um}XsB-72w)3GW<=o?cIePNe5&gb*`7!UArFu$Ia?-}j{g|&XU7f=)7 zFUA~wHyxX>NWuv!SO@Y_zx2f?kQJ$RTMBLK zh6Ky3klhqlpjk3`MCgl})&3pghmcKUn!(hB&AY{3;b><>*DzE4;%qS<$GA0fNC?B^ z5%nI@?=$wAYn2A0ZI4bm4tfw32OY!xh~VnvzX0~=h!RQ6SjdL<&VK$uG~30k}=%)m@ARb)lm z6ko+LnjV5k5|1>FJK7RD_R11^@LI)i0KIUJQAlf&r)xr}0iy*+GEmOAq4)H<0G>g+ zW>UIDmcuYp6et-Cq%e%ek&2L@Hjjv{Scm8pDXgY!_+r02k1;e%ndt0`xot>BvUSZ9E{hK1u1sYv=Pmd98ZgTBira9_#(f1M=YBYCv5hY~| z;b-w23*%@x=r2Ro9AxJ`B4ms#K3gQ?5#a=##UA>IvY|fPdPEpTxs{g5!qdxWmb1(3 zFhdq;R3H$+&I~gKf&SoFA)~0pRQymc62xhlvt3Eq&M=QA*l!q@OhM}Lqd0qq5i($O zPzv`pxO+BqmxSIC>cR6_*~Vjy;RO-O2(3G5Y*HX{fkF)eTU=dUYXg@{vS#A(gQAX_ zBY{0E8_q_fNiye|jI-uq0ZRp1kH&0pp3YS7okzpDYN`ynfF040l!reNNRk(EHYV|D zlKdiQzB18pn$eU<=7gJJw4ime{K* z&bQ%H*7})G@4%Vf2wq@eOA@2Sr@EOd$>(zL+D>3C;gm|1()@)2uQWI$XveSH?VXQ- ziMAZ4V^lQs?-|+^_&MExb<}6QhK>bM)}T@P6-- z(IOhxqjs^!l*KE07vuB{^1O{>WPCF5j-w-#p%Imn7?CyALx<#chdjs?20uY>XHG;1 zEU`(wg+bm@Y}z{S=yFr2$~&dMbR2rpwJoF?#I}OCMTgD zi}o-fqfJy@6#{aO?77*|=_F>?8UriZRVe=*!K`7416ihvjQ4M#3p`403Ii&`A4BhX zI?K^o`--c~u46gZBpy|Aph>-6o$d~Jm>1JYW4pDXhx1u7k1JdpvZBa>aM5g`-zXVT zpm=tX%sappCFw|zeHnXiST_vFnnP{t<`OdCx*>r;`cM^!uRAkc80SiAvYq_Qo`CW& zE~oc)yci)Tc)7SdQ2rE_Gb9k1#lyMj+KT>;*C4OGr%E()1|5T8gWiMLg9g`-V|-oe z6=YFCI!m*;jPe9lVJG8FK17yRi4!o7#tY#>P0o8w7*|fiRmc#LmScyq*r9e}Gd?2^ zO2c9_xf(;LI*FmI5PLweC_R5WWL07m`Xx|yV>7Uf-EZDRZ3q&z-6=K;zJ?jY($j9w zz?)51dV>`c-m1)@sbk#ORcl*enOVs$u5uas_C-7&%&sp8mjQCVk2AGz81^x3B>U=Y`Od+bEBpgPh~;v6mt&(N2fyi@*^h$UDnKGzT6fbT!)q zh2fEclr7^YP%wY;I8sSL5Lg<08JIoCycy=Xcxhq&%LH*je8=o9ZytKST_zWoqz7nK z*MO!GSc^!(XD^XKG_86O!%Si`+xpG2#2Cu&?53ppdpI_CZ909Ei55mx43HaW$j%=W zCpgg1H|$1)9t*e>^LJ?HarzM!ia&O`%JYvSkiQmhW9#fjzNp z>wuGH(c8^aWDb7)=r|+$gcJXmhqL8pJEKR8VI;0jQ{b{h4H%5VP1o+$9A&nj6#t0__ zThpI@=sygW0t&5pzsK7^7C{x>IJ&Bv!E0!dw+p^fHgsCFUD!@OF&4>_A^?fW$zo(p zESkP_`07)K-drs@$>tK2rIstMi1(>cOI(*=w~atj4pnHY(e_-1X1fL{A~ep zjU(Mq^6MGNeklk%naZs*>e7bMv}PK_gVIg@bGg%zUVKxc@ztBax(>yAOz)vr-y}PK)x_@D+AAtGeq&b{s%UIB zPfbvd(gikEbtM$6q7RpFQWfp1oZ3EYKZfPo(0w#GbboRZC{7u7f5)!B6x0-sHu&#b znXU|_#>;P^;7)htlwoOsp&Shg6=HuiAm%5F@jSucY{e3cDytAU1cap*EmuXSq*k{A z$}3388qupS^j@WM-I`2ii+S0^u0q=L{u}ptvVMB=24kqcIg2tl>Y`uXyzxZ;Gy_?B zHV<#2^&>7|3V1NaI3e?Nh0=Ft`$Yw~pRG!$PPc0U2mby|w3SuBG=;%DOQ2rawMRY9 zmx5*hwxUB}7%G(1P{O#;n2|t>T2LG_#s<2~V$v7q@o+)LBKE#%4<_czMD#+s=L+w; zm=SF+o)5zsO=fwDNdRnU6~@o4P7>)m+ywI#h9nF_H6LUg;7i%`(AD7!ygXfL)O8WU zSz#$^q|K5nxzpUG%jd4zcpOnPjUA*uEDXMu4c8mS**t<#(wNv9iA}SGwBBHEH?{%X zfoExg3iVv0yz>~dZ=69c%Jf<`F60S`CCyd9WPznG3Z29eo+&jrc1uB4F@z-58I3RC z=z~f?Y54=T(L*D+c~bBkWeXAYT5xirQa{kXW1kvJ@LVZq1WreaMWNAn@)WH~Pz4c9 zho!r(CGf9HpkE1Aq^)4Xjozoh!KO~4i)3h`tCx$dbGj?nq0XJIobFdgx?dgXK04BU zbfi0Rq&sn>J9VTxb)@U}9SPx~nex;!&%6bit_$Y422ZyF&oW?8dzXnqf1|(gVK4~T zrVm-8la0aYMnHxOuY&$I2Y*l)rY_3>B=W(eP?wo@lJhJ9mUnzzUni_mU3K8qGMdin znbZnG78%9vRKm8@Fg?3m`F)o3E>gDs>{XRLs%E!{Qd84Q5(-9@+~3~q%2(fIsIr7) zaRUM5;t7$0j|!RUExK+0Usto8Thy#lmmPOqD&{IY`bJQ`8!T3qIGYD-a8*=@fB-1XpEKZNSmQ8HG(MGO(Xx& zb5*%Ag$yp&IWf3uRjHWr(*W*q>Y=_*v0|d)uBt+Y791|r;5?VfLRS@V zLX)>aWz$CvDQQy%?64RF&KN^zwunSEzK2R!1Ycl{vkT7hHN~Y!&aat==Y*AXRM1Is zs^*wM^aS6Xt^}P~(-CGNb%tN~E?8DAAL#ntCOy)(ETj&?knE}ja1Rx9VPF|Sds`G*>)to%$m@K`XG0XEa|vRsv5xpW)!2l z6!WPH9@%rj^(5X`dv%zs+-QZZ*>pgfl~U}E6T#0E@XU43g??h^T4D>SdlNC99pIO! z&mhEL3`uJOaSD8==Y|bwmIezIeNTW)ZF&JV`R~PKMrG2rt-w{+wOX1s4rsQU$RF1J zcz^rr2O0#;UM7Y8cs*Bm75M!TBy=TBlAAuZ^H*?Iem`C%Jd}(Im#YLiT ze>_j(E1toWo2B72o&$TB{yiRII60NCE1vtRULqe>y$DtG%txH&N&O zVduNk%?}uR@>0(Fgujqic^hl!JL3YbDPb4oRE_zfGwAJib zW&&A#08!`Riv%^F^6H63iOw*G1LkXe4e?{5~Q zmT8mBkaN`q(8A@IAAC5U&w}2`&;6#q-_K7sgMDi;Ft4Er)eBM=+x3E!iaut-5A7TH zK^NP=J#GhPwXKkLOCx*lHtf58%s5Yb!9lHF!1GQzVgIV)gxdd8B^jn_^+Wy=Y``~I z`c_7B8{V%3V?DCmM_!lv)M(|60(Swd?Z+`HjB;_p;T+?%lAg2RZ=u$J4ae^Lc3#xc38a{{9)~zui>BVt$XzEAK~BkU*E;=+uIL%_-IFdsDJO%+pX;fl`FINNRkq6eM5A% z3Yzzk{`;LnEbyCxA|&+ptEE&(8^wNFDY#p?yX0G9iBh~WI)|~w2vgT+ZUDW1<^7lt zbBN}rDS42~t?ako9*&ZE=QtX_HEB(>7@xzGQX#2fe6A4`5<^j6FTP&HMNJr|1BAH2 zdysjp*^jRV5u9#t+^3TkY$^@UWyi~kR9`kulu~haoL2M=49rD|*zO!3yi4+gaVIk4 z>KqA_0q5!g@8AefktwM;LEE!_pM2djMI~AISVk$-btQ6q-BswL=v@p96w0d@NNG-v z;e!5!E)N$A@?W^{qKgQ_ZqxGB1hfn7SuEVzgbaD;_LT7w@;P)mx^Gwk(PU>SBy{D8 za-w1Yot|h9pHsv8_Hq1XTY9_mNl-RC=H#Mu3n*5^;B+Q&-HzDZ)W*EarjK zv{l=T8t_lLb?6Q`Xx%(>#A39SX~zm9S2Oc?Z^VKB2swVKSeyA^Cs zX5Y09Wab_Wg`+C6A||7Igq9nGa7d4m3pi$5?y`%8bb|k!3PN(7`?vuWi7=5=oPb0^ zg?jeYHNIs~P8Ex)C~bAAqo5KjcTJaI!cOM1PWruyL@w6}Qk_^=|%Bc|6oKM(pLSyCAaQRmys(R!%O~woS3c@6De^JIwU z5R>a<@t8a=N9k}ep}DL*$)48x#zksq3*M|dvE97-ESIf zy1K}+>()k;UF7I(^!5tB$#hW#f{+9cq1yOyvUSS8PgIO0q{Bc!tqr^aXFB-JawNjQ zt>mr2Wk4lo*R7zEAY9BwsMZ$><+Y&d#8qZITq;rJ=N$Te7^{W-#VzkKqrjV;2vLgp z@LJp9R=~YdpQ@D?YYF2?HfiGQTdIgS&2E!n=JGH@euy8~cOPrs3vwP#K$_^jczAq#`0`ozyC)C7e^oG{6n@d9QD$`8 z-uXDX$d#KcOb0|LZhLr#@!kr+h3U96CHnKP*2e26evcaV{j=vU4<9{zb!guL`D1?n z7QeT|lKc}u(mVb_~Ya&^4B=sWJWy@boWs892#gJWCN)Pe4n^;l$ zyg7in=j&+ex?~e zEiZO~Ps)v}oAbpZ!;Yhns_2=|L9YpsKv)Jcb>^Ig3?}9ID$+1|EU}}K#HFUY(0`fp;ImGrTn>k@vZxBK4IH55$eR+_*yqdVn4QU#{g=@NT?w2SS3qU=sSph4q zf~7;bp7p%L=TGqDBTFa5ZG_Va9^u$J%xtw(6^pr)`g-#BnphWgah2;7bS_pZevb$| z&=u%!YpgBe`qqi@^H*~sr!yS00F|-0cH+^ut}u#trmjehcr2d6ZOQ4jsNvps5_OCS zRF4D=8U9gsedI5TJ%gCKR0XVzWYhI=RY$LkJxSGDeVCnLe3uQhQQrQl#lc$JQ1ibd z4Z%n)pHd6=R4Zz=dfQco(}sl>jL@X_qNhfuz;+~UrRQ4Jo z-|@+*m5+l7OjsZ`YNcmry~Q&QD)~4brWqN3k)ClF4KcVwq!l^MTv2|9`!L^w6LcSi z-ocR>i{aAIx%~1_|CjNNmc}fGl}p+{0mY?)MnW`Eh2Uld9g5VDX~^PEj44o^$QNs( zQoBmtCgF^fW?`CLbfIliQKIPvJxR{cueMPP1})4%GEL@#f#`yTTgg`ri<`l zKBoHWD>J6VcT+-X!_dr?wG5`Bmfd}3c*GmSau}(yz zZ?p;=4!uXeqHAJ|a^?N>^oeN#h#i+L82*c|JXJj$8W8hlwhV!yp^c5(d;wEocgBdl ziK=|rhsvd139Fp2lC1>TiDxmKt5XS~LV$Ey*sp)N?)_udY()NzLlmAXHr{4G3;w;Wi$s|Z2>aKZ5c@9VP&i$U( z95g+pv$ssVQRF}sNcatd(_JdZHUCcozhXryFp(@jdApG9?!8_Kni;SA7$MdhWohAO z*4%3Lysh{5@1Upv9~JxFKdi0i571%)+TVf+i5(Z{%R^gYs^pRMYCA!bYGiax10R)V zGP>7mZlZk!vBKOPAkojyJAd>8+ulOmvBJ1cX@^i;?6dS`d=4|S0A@^Qb3)n;kESdJ z{Sc35Wi(H`Sf6?-sR8v!Jntw4j`!K~V+;m9CM;zHz~QhPaLDL09NWlQ2gYk*|BNS9 zGhpK-A?f3QpL zK3{kzFCHHM@G1<$Q|>T9uEunnz~3AU0``mL(Dbd?-)k5^yK9vvLeuH^Dg>cLUZg!0+Yr)bRI3nb5>Ziy`>dF zeFM3S-t{<;Axb0UpXiByBDr}ksZY)Wlt<@SHVFLDyC~c9pFV$ls7DK54q!dSH+4${ zk|HW#6>Je!4K{VYxNK@TyrmjKpW1^JVXNP^*A(lerZOv3wx2g>F8vcEHF$aW^!Z;7 zAD`k1j+gYrd+rIgX)y+e%@=QII^rearJJ_yAu`^19Kn7TOWaarr?b4)i~*Jz2t`OV z2)$SOQk+E9ymtekre!?T^pwGnH|P!qYNo&ul;jUqqQvN>Xpx+=T@qti3Y0p%g|%=a zEz7J($e|-d2VWG;75nZw{kyDgl=ODv8;cW6?S@|ewMp^u+r7L@Q!}AaoSXX4WNKokPLsP0l9wU24 zXI9?Q2fIF`kDh(^91m`)njC8Nf!*am-4iyq?dh|JcsIh>8{jw$a9WE}pU5GOahvQh zQF|M&*i?-=?D3Rh?tkf(y% zz0@vDlVdtCpW-P1_FmV2{xWB}P>-CU*#NdMJyi4QkXa zXAhc}J==Lio6lPA9_Pfr$J15~kzh$EjJlhDbZLl2d@mBXX8?&Xh-fbG>NR)&lA2Nq zh(}<7z_0nsvewQEic%_`8AM#+*25Q?ZM?!A({06x!q?ww`;r^*^}L6MA;}b&retJq z=xhM7p7|@IsIPlpcd_QAStP<%1ap^``4INv}U8TWi3( z_aMNy^FAMzb-nK!Cz0%ZIQ_YDY}}vjp3SC56B*#@W2J)z)=$z{8LetwmRDEmH&n zBN!|8)`D-9>^cp!+L69hT?0Coxz=1+Yu1pPtuX6RUs1D2{e8&_oewV$&}NC2C+;w1 zDn`vw&70sAFAg?H@bJzb3{bf_P&#lN7#P`ki*=q#hZ%T~_jmty{y^%PMx(T|6>fJ& z@g&U=GVg-^fhqlE%GiRxukYQ%&+V`8ZJVF?_1>NP_kXv&y>oA8=g!^j?XBP8qkCJw z^S1sK9k4)l&GUYT!3UPL_ony%L;u#k>MruEdzMVQ@${X?nuEq#W6gV%&aN|zD&_5L zZGGc?2iq_DlhL~;$uORx>WPDbqY~-ijO;SxP22m6DCl>>Ew6)2#l_m3@F(=HsPmUfPK6`c8feKTW*V8cqVr20@7D*=8l{2W%kjz=e(G`UoxyZyh z5bBG^9Wnr3o}SOI;7kH4qXaqkv&G!%ELA&bvzY*05vHF1@RfJ;%J;r~`0D6Y+xzk8 z_=o4OkAc>Y4`04~`0V)T@Rj%crT6Ihv&TorN6(+ZukXBv&;IKD`RLi>wn7Do-_Hop z1BD>$M!Z`@of0}U=mF%gZOXkCm9gV!da=L@n2YosumyM|!xlS9M6pI-?FiP-HhPV< zYNJBfdil&!hnV^;+_L%!{UhIG6AGh6@m4(>Me_*NoaN_)Rz$Jt%Wsl&oTjt#Q`Y_D z<;Pd?nR>LCCL}^YfsFOA8w|Bk{x8Q%~?)8ajDlWcZMm9_jClu?vmo!hKaN@eHm^T|`zuE?b%3 zh06SpPU0UpO&KK~5=7bX62=bFJ>+loNzY+dfOOx%M}4ArI?t}Vt&^X-r<(z!KE&Vk z$#Fa6o54r?ZAndGA$6fklPOjfemueNH>U?yyvP7QsyR-L_@)%<8T)dS4CgkRkK;LD zNXdnN%;V*KOCw(5(_wFl<}ry?d$K{Wm8trnSa!{-6YtI0a=+9Vbb?NXx1`3I%A^*_ zfSuR#yIfj$Q<-DC99{=*^pZ?#& z@1wtt-aff^aX9%t{_p?(Zt>U4uMhv-X`uUNp>71iBiJHB~zx?&u5+ zmP>nk_(&5hNsF+d?a4M978_$*ZxyAN>Tzi#VM{6{#7Awt&y-%O$Hnf0h)%K=tlj4& zx(()fneayQr&)&1h4X8@9gWV)81nv$>QjPPq#Y;=Z1TmGxVCx?%Yy$$cXm(QQ#1>|K4Tj-BJ9KJmC zsQh)j1V#n6kPeETGj`OL(Tm1jjYX7Nh8trN>%fpu59sFXx?B^L?D@;bhcCTv|LV;a zXX7MC+wYszl>XzxSC70WM^BHAy?fP86572glpgSczz@QPSM10x=+#Ck6S7Gy{}Q@` zbUuDt6|pCuMW31Hzzv-SyykduQR=#DqvIR{sJe{@5c8`cBwDVXxbBI@*qr=793Q`6 z&GFr>yMepCl-X?XH8uCzIM}VD^gnQp_%%%vZs5kzGy_&o-1iL2LD{_7WyA08j->b^QW z*8RPnw+m1-{wZCy?lJ)L%S|OZj0nX!9j&h_YNbJj&>N!qG#V$r0O#=RyFWixqywd| zuT~|>he?7vLUI9_%Lkzpp#ue-{EVo%nB1!zHb#2CBQXx}cZjy)SxZIC4CJ=mJ+PRe zr^(P*+5N}vxX7J50P()esac+Dy&8{IqOggr-SKZZmyVvjI(&KT9X&gKuB+8zwhWlB zHqQs7-&WJAP1C|Lm-KlNcz=2LsaX|@e zPv{AIh=toNQzAeKaC6CKcyDV9)-3levH#wA1nX#IEO(xL%U6&sI509YFQFT+90qLn0w+8B_Py@5HMl`NWG4{lR&VU>L6-ct~(ZY_Fk8s*e3@5&Na z#kZ&n+Ni!Lt=6EX3$31O%TH49ovw0^S6-mL3%%5urd2v-)A{=e@*byEX456RVxae| z*xG`+&BL);r|6;%(-vx7*-8q{*6JKNZ*t=}%Id`HPpgulQLT{cokBZL`^YQ$ z+A@{6K_z>Le!y($#YLu>u!pv0KRkN=1V5B3u`;=8t#IqhP5i%f!F%ibU$VG@(>CsY zsWO(||I(k_|K8o&*}8}Kzgzcre&^l$?f&=QI{vH|@BSOc|Nb3#{oDBeXN*7Df(-Ng zv&R4a-Me+;e|LLl$F~32fv0zVv;X*4{B<`Pz~Ss78Q3j;Mh1bUN>xN0VfH z-SfVWVQ7;G#v#s<^9SlRf*b+$o!Oj-5e7WhF3q6+=%_c2W;tQ|@wW&1rK-VvWaOZS zXrMnrb5)h(a=YEQ+=2h@z<+nEwex?7$M0g4+`VVyxvGDoEkP2E(R*i_cXZ`dzbNtr zNp8{3mb3_lyWOB5YL!AK=`o(8p`YT3(K!2|beKGwy@v^p zP6BxX4aqP8q1s=@xQjr7yHdZX-H1G0KV6HhA-!GGvt<&!?+^$>&->%nmN^-E=vNoKfFNfYRrs+s?0@us@<2V7RnupCkyr+2DBMQ<4!bAE}+y& zG6FH*9C%gsJyqF)LU^9UW2WPrCT8l8Rh`Czd~pVw!D zzw!V7iocf&hPFaRPTA=_oXrkt26z(eu(7wwUxUyvB@1!6aSj_Vv&!tV?a3vAE&=&w ziMEE}V31WaHlEuL{u zo$=J`5|iwT5tTkdc&?aWp#Eyat+_<= zJ*3c-f|@EU53~YN?4dpA3nqZ5x&{qWy`t0P2nGP!e<$>gxKxq|a7>|p2hd|;i7il_ zDDW|eX*5Q^z3ZY1K-`n|y?XrTb}@)B!f11C0CBE5FjVSssxZ=wF~JS3P1R)FrFWhv zs#TAb#>Q$4x&lI*?}pxAl04DVWt;*_fkueEKW_bTtBaEwnwxoNS$dV*U4XNnpePvH zax_V%DEA?NXqysTCD4;+Fzrz)fu5END?urZM^GvmR-$1?9y-Zh6Q2|l{*5L{k(u(C z2N0u>q6Zt{g%s2pjfT*>il%~O-BWNT(Hbt`*qoRX+qP}nb|$uMV`AHXY}>YNO^lPh z_o;Jps`|Pwy1J^@TK#_Sv%`&3x)Q=k{uX1#AU{e65C?pldIxa5MF&Fbd3cKO3=oBv zKoGf-Jm6NN)|CJ>-SNgGe8g8AG((gU^q-qb?Bzf+wnx(LlDQ@3j)DFir^9?;Ni_5P zlL3&n!(eEuyLLP*NtVHMh5KBhVQ)I3p#(>7L3fSJo0c=yPGRL~MciqXlEU!lwI;|y z_b00bP{ow8%ot%u1FV~X9jTAVLQiOs@N@LtarwrKSvJB>V1lb;897hP?m#Vot+qkBBj+5(Dnq75#tKPoiws9_L?q`CCi|*4Jatp>k+M78Jtg z|9I}WKW*h-AX(-NeT_eDHKNxz(;g0|X~9(vW^L8%-W<(UIAsg86PVe-B;s)ZxCH zSAAb)Cw|#OerV3PlTz@xzeLpOJ(*jqr&ouW$G!-+W7F<|8;w74rdpr&rTO`GqwHx-eGbf z$bss$?>zl)Ou-#T*6_nMjWi>h`nIaeDbq4wzfF&kr75zrPip=bC|T3)-5%jyl${f1JUZ38 z=3U17QFH%0#-+1Jqy?3*TXy_KMZ*^Z?}Ry`^Milq8JP;lLCQNonwVDBE11mGcnkA- zxj>X*%H{fBT#c9>S)n4F@wC&ft+#D61|DiF(QN@57|#%@>D1l)nj|Lj2~1HHY4n{P z#_zu$)oqV|n8x>wNh8wCg?Z=ab_$;haO2D*r=Lk~tGMN+@2_{pEL%J9wxQh)#W^HP z_I8|{BwfDoyQ0$BWQythHrQdRBw;Aqx3+5K@OgdRKW>A&RQz_72myI6RLk!-+I%i2 z&@KeDQ_B1Ts|NS_o(d!bu~1FwUFnLg5=tfzt1V5%+c|DT+Cm%DYP)AaN}lUIPk9t7 z1(1iA0JQW7g-%PuaWK$!KL0pfT?vlRAO~M)M0WNdB@(ZDP%EF1M|qXEq{By+qQ6SA zwAre1dRHkGEirDf3e!i=W~hV@9fyJj)+tAYNF(ytgA2;ocD#?nG9NL*Z!zB!9U06o z=BIwHuGhAzkNnTR3*X<;U#>krbGvhQ`ueyHGp|uAJA9m;hp*Xo&m-J%?OP)wab39- zlcTP!M_n1_V}G`<@cABicNl8A+l51yM(8_k62{rgl4`TF?t=?oq2!`Ah2yx7P>BFTtLQI*_#85Q?mw=WKExcq{d|N5A? zoh379Ft@cpq213#==-a`mQJ$%!Jn3|Th-(ljtB0IJW6wbgNwIlkfK9bW{m3TKqHz< z2f>;&mJO;KASpz#4AQNXEqOj>!{mW>LSJpE8_?Ejt2w{bHF8d-(N5&E_J|9CJyW&V0H*zW_b7;_#lF&4fMDi9XiLw^yET{oln!Pt_p<#0tvglOPvFZ!u zoHD;t?ORkT=FVt+y!X=ti$tbT=hdC8bYV}|I>!pOUa-0H(;R{dde`qr+XyXK>N)BZ zU;YhMCU}z?f}90R6~ExCskhj~ID{2-LwEO6Ykx@DDjntV&CYkYE0(_vgmOcI$#KSXBlRtjc&*1*wo8 z{|^ff%);}^^ji^tk4x`6zvZj3#S%-$j~L;1IZnV@0L>B4JZuj?+uW9B<+#pExXiuSLPK3Da= zS{dw<(^PYgg?PwHSu3J^#ZVZcx4BDH76UUuK3hIfx&a{#` zuP<|E-t9`TLvyFliZr{rT#Vo3e`qVSzhMLq^-aD(zc*i}zE1f6eDF^p?3TQLzq_?a zf9yu$qrHBvl~cPJ#RhE44WkK@OwZXN54O#)Gwu z!-SOUb6?l}kyE<2*GCDDQ32Fl|2&Npkgw>tM#>oA>T}=KNay z0aB!qCIi|XH?EYlFYFwfkcW(_*ngS6l(861kRTepqW!kk6!iYmqaE+}>(iU!VP-4x z7DaFzcwwiW`6l?1CuZ|ABd9BW|3>)p$xZrxb5-kh{T=%KNdHV)D#L_Ampj@FWSbV8F3-heS?D12mD$4R!)1GE4RX4V$%rt{bM@n+cm#ST zK%OST@$9mY<3r(b98mNOtDG)ex&(xK3Ix#cB6#))IH7__U~G}M1rqPgE)=slNr8~4 z6l`YZ)ey<=nk0tbrrqLZ!0=$cH4}-7`4`iTV=#k_P9})p8kTeN;OG@-Gw6>S@?h-l z(iQTIv9Dj|$(bW}=FRxgC|O`8fKrvojSmcW&#@~=H~#{3zCvH2b6*kV^tW-4+};n# z7)t>E18jBZU0rb#G+jNq2Sz1N_R{>~>gL9) zc-M^kO%xecg?RHF$K3L538LE_S^dWp3>XCMQZR5wDb7{Q2Md!VC zHU0FyU{V#c;K7HpJH@gdXmDMukEl z^7%%qS6|BL_$!BBgu#RL+3Wxu&fB;>JuCm>t1XggJ8~dO;kb!})SWocSXifsQtk#Q z4E6Y$L3$dB8aM_i>jeZrcjJr48^2X~XgkkawEr{!aj6(H{D%o!9#R5{o|VsGCD3EN zt{L}wKZ}^E*mxhbcAq&gZ6L~!&TGmGx91_p5a4-R7-V;gC1^{L6(Uh6QuYY0xdtb&eWN5r))`l0u)6{MWNKg{eqjf~I}mzA7h zWlpne?OU69a7Ts54sV6=(h;`msg1x7NePeo$qD>3?T|18Tu>$c_!1kvHO<^1smwrM zx!3Z4?#YEfTOmT?nMdSHkDhhMm2DI6i`%;$kaT>0?@2H92>xVg*1&j%VGTT$L|$7A z<8%L2hpamx$KuN-znDwFGYeMtKO9l-pF*IXY0kjiXyes!<1f`1KMpy1q(6E22Yn9_ z=tpS>hcfmYgBx9K)_QB-Ok7bINgT!M_x7Q^Jqx6`3FQuEHKdz zDHA3*6yky&6l)bjWs1A;)(kXph&?`^yfR%kvu}WabR;jU{XD!2l$Xc=lV~8C_@j=n z+FV@3*x$a~FL$SRcZl&vbauoe!5knszor@4%%(M;NU#zNm>VoO$ldI$$a*jh-qh&e zfv4#+2oA}pBB6vxJk!XC7j2Hlu=*yV1%(F3ge$q^$r;5GK>J}$V1l9SQCna6eY_Ff z)1C_tiKZj|^)15P{vENI8DjX|YQ6h~1ru=s=KD9=|M$gM6~b#FT4UD`5mv}cTvojG z|B#b>x_UGuM&?r=Z(n?r5m*{HJC{yo)@VW-kQ6y5fK;VJK39w)IjPLPq_z86^ShG< z!q@KF(`cQrU}fg#S7uCK?QeNG?Y^&dGxCoL4{X%Rlop6s@$u373OwAwJ%z5cz5xO^ zQM<&WFg+G&RMd~eKhIPGUdEcvUKu5}2F=qDVDv5Of1_(YMAN;ZYvUaGS!PIe*C&nNC${vd}}_QKXa;1yyNQ75Kbmu8Dn4;l<@!WPg?X#PP4a zT?eZ5n*x4E^VRu+XJjATN-zIuiR7To<%rB>_y1@7>}No5sk$LG6OXr`eZ@$m(FYOu zo|Wg_A5$JA{ZUSO37X=2)l3DGl6M2{NAAA_eb)mSwF zV4V#v0^=D5ZWXcKpvI!TF4N#(C+0+KTlx>oe9FIgZ`?SUw^AP9p4c5F z4!S1#fnu@=NspMz_dT(9p!}O!B9paV(t8&RrZKc}&t*E@M!_^*x)y2poXRdyAamd~ zL<<6B-BhJ{Naa&NwLKolm+}dM-n}um_kozh&xxb6?nRL&F0q~~pK--MSpU&%j|;Bm z9vCE85ed-(P5iXqYGBgF^lOG(&(F90)vG6FCKm$UP35S#V{psWgSf0FHq$kfG&7)e z;}nLYpMg-^kvClP3;QtY;o*=scD=iYhHSkdOK)XVn$pFK@iyYkZg}cbZYJ!I28;Z` zItSe$IR<4{9VVnNx^MipI$|{|7JJnv3$=Q*Ls+C>4HUeBpPj2qPC=+v zn`P)=LQJTrg!7Ps?^JZVu#}toJABFFq%ABx}MlFP{|EvRfLLLaISDjBxti_=$`g}^b>os# zIzSiM#2t4&%+v`7|C1`kL#a zqVL3EOb??mFTF%Wm+4Ate>`Lp6*2|?P1ZjZI4<^*TfU{X97wWEt~mx2CJUSgzg$k3 zaAAP^Z?7`euV1)(7;m=hJ5bJ6Ywe!9-%vE8#{p%JA;%&T=T1u=+1n(4MXc~`IR_t8 z$Fh!*=PO4M$x*3txMclMCdYb7RI8Qn%b0~Gb7I$_L`PB%@wHdBE9dMy)elvq6ORl< zw7%7)KafdJ9+c6k*u@WIvR`xn7pk!^-i*i8etgK6uTz>1eE+v&gj$2FkOGIaXArD3 zZWz&`Vw-wIE0v<%m;XYZ|B;cUU?=yj!I-_O>&12w=4t04n{fRJX{UU)NijHUSY1P> z4Hr&gBNxQbIxLA8$RT$RicG-{hQ^2njG5>WC?p=BR%~|4$5mLJ7D)yz?;inOk15y_ zL1Wvcp=5E$XBK|Z;ymLq?77FOFE;4R8b!mp7+oW?{)Y&TxYMqqf56PjVaaKmjd7Qx z;)wwpB8x?b{JhN0uYHZw-?sB~$f`KpI*{ru5W%j~;vldA>(M=JumpZwbAgt$FKPt9 z6>L>GpXD^uw8tpPV+{eWtaSBYyqJN+JqC*5!X-nT-DbD3j8Iq1&tfP$ucAqtS-BGajo% zx+|7#3^UxXuh*7w)vP{l_&vTB6bAnUob<%(lczz&>5OVBIX5*MhLR!8e&}-a2Q?S2 z>h(VKH4B#cCVjntA(h58PA6^9`NbAKV=ej;b{QLE!2LuTm5D}Q1p?oNLC`R1+FX7Z zleFg1+?l=e+pE~cq4}n+JUar&{~(jMwYkERI7Cq81n~E1kSi9FU35Xug~0yV%E7<) ze8ki-$MA9_#nVAO81#re+;7n72nL~hi$)N{Hy*WmevmWNXJr$e2Yan_eS~7`4clFT zwiB86E{yzV?tE9ue9Nu@?G(mo5ARho9 zUhEFh!L57T`}VlZ9x$GmWkE1Ja&1ZGx;kU$b~6gxZx)PL6?`K7NG65GRsWSt{5jGa~FH&66>W9@0vg zHdDu#$0}3kX0=eXB7ONA^Vxxrm{{zB?MMfJDJ_KKAIfCvt8onEpi!it8_ZjpE1IVW zp!B%ykrMh#0G}o32>AHlT@ghN8?OD>&}xOKKlZXMoA{Y3j*z8Xv)oq0V8-(SsZ?sB zw5YL-w1v!IIZ;QBgXd5HMT8w5?M#qj=>RaF^?cnPE;>m*A+NqH-Fi2pocm>%cef$9 z)oHJ`(TeI;BOW4^Ye~7ycC5$6Pr^lq4qE)EMj3QoLsx8J|3@;xOWH1lEv!(rZ9Aj- zpJcM_xb~d-i#l*TdLJ+(I#H?=#b;KTfKSuGh=2|hwJ@=$wG@_>gLctURl0jcCqchGF06h z8kLm81amkrYn(9nvc!7kpuw3XM=#-T^zk;E+c%Kb#IWg1Iz5D1GgDc$40TpsH&1pZ zyK3gzi4WL`}Y;fw7eG6xo`HSUg4VmuVsS!W0`O`Q*HdJ zf$+MfAuFqn(blQgs>TL%D`)xl!XB3S#Ir&E^Xi*=2U;p@%VAev2lf83O!EI;{a7ZI zvl`E3<+fw$_OnGrjq|Lca|3abhF@~7)~1r?e5uiV4uuOOZeCzBUCb8Kl2lXDDO zMPv&hH~y0z=i=J5X3N%tq?GhS54LheXaY!GYYmIRG`3g!R-B=@%uLT|^dGGQ+SYE^ zgK$<6D(5yz4V$a(*{x^QbI_&bZk1KOSoZufRchsD5BxwXM?jzDk1wSHTaGV({TDMi zJI}(SD`OIL-oCt>27U)&I|saWj+8%NzSgtXm_M(R`XZ8slY*gfk5=hPUky&;H`5N& z&nA4IvLL3>pyP{W9%wfxaqQWqMyd`*Mh!LLs;qo!|A#YKH0aFvFJ}^#+Qw&6RBQyh zZ`eOJcwmzIi4LP4Huknkv!Zim76fl~6?Xsd^AY1yN8UA0*#J5?RKY-6iY{B~D z(r%|?f(_cc{J1X3GJn3F-<~QHvl{LY&V6jqQ(!+k=1JdXh1#m9E#73odXKMTuStr{ z9uK=e%64$CQ6KzBsue8(%aUlW>{ogchtR)W!Cs8$`M>V}@YlVe+DcTjx2smpLFR8} zW)>O;@mM^%OQuL)d2W=Qo=%bnQ=CGSvN_eqmU5p?%oQQNqYERFMEoLm2;sL~gQGS z@yqGDT7`EVhJ)y(7Io< z*9LlC%$~x2<~>R@IWJV4gd74X)i&PzdSj`gW0-*3DL;&~9^IRut}o{Y;cxqZzgIl8 zsD%mbnfFjUK{Ywa!gXY(TaCT3rn245jF|(uhx?>u)AInoWtx;VPB{aDf;;<#2d5JA zR>kbR@50iPwT3gvFq<)fLhoZhx!9V&UPnGR77zz40m?;x>gGBua=mW&tj=CqdST`+ zKX_Nm9c$4+#y+Ehr!HF;Z@}YdgrfKJyy5_aRBt-Lle$Ygq?*-h`h&&)92| zC7$vv9{;fSUOs4X>HqJK+#Y-L?&Il;J($^H(;PWP@7F|jj2HZE;ocDc83pRA-)q|j zP(;z~UXgBZBxzD#Vx)VEVcbErEHYACtk7m4&?^J%026Sg>>*f$94HpPePmr4ZjK`& zP8v^If|Zys`LS;^My5JqYh}u8DW526ZbA#^>JGXu;lpL8Z}5(m1s^>1$myfaDh)BB zqrDt$5+wxoQ2pU=Qu~t z!A~lfeR#6ut1{E30wEU*ty9(AgDHXG&MVdHA3RBW{1b2I+>N1D3uUwMy}l6;RxtlS zY5l!Y0pVDBQWsm+IrDW30`hlu840XpuuMHfh5M8R{b~^|)QjVBG}Qd_^ut)1M9K*& zV7bi58~Jeu4{Zeg5zktQ)=4tR}&G-$xp})&8ZJs zDc0-lGP3xZSa^DS4b58p$JX{`13R~#nWcbx?eiLT!>%EXh+@(Sp_8KWRY*SR+9F4@ zsNoe`k%GT{CE^9EnP1KrF#c3RxdJ}7UVd-Gx`Y@QPEptqV|?$ZUwC2AVeZdN5AE-b zqqPP%RFk|8V*`;G3(O0AN&gh)R7wkUgX};t`vbLP^6(nLV}wS{b?*i9yS>CQb<7s| z_Gco0gI-n#pa)A6IqfpJQ#1Kl{jT@n$v}=%q^5T#Lg`mITf2I#SSYtdnvq(t1^=!Y z70ExxCe!M1&ys=46=BPKc2fP5O;@9KA$5e9IOo;o3_m8FKcs5+U>$}fpg9$(*Le+6 zL31+y22^-lCR+-qr3$hKHQ>MY_ASQMeUA(^PCYjH)?Dbq-@#M&(u6NzF*<9afcsfF zjc!%Q%8f(miZQgGxr9>S-TaGYgH~2-{1n4-l}65x(Xbv6JpGL|j8sLH)0|djm=Mz| z{ca>A_@>;sVeL6}M5X+UT0T8Hozv1)?4{RW9$0R<65MGlY>{Xf1!WKCpH>n;2@XOL{q_ZEMm| zR6|>CpbIO`ltty*3LWV1P$EXRA;hjbF_g7MLtBWsm$gmAjNwi%ZMBs&Gli%7tp->t zkz7f2sIlznK6YG8$%Zbk9n&oky(FHQV5fWfkX5Lz7#)AoQ9*Ofz^x$)*wOl>uSnFB zVxqTixyrPpVVr4~kJqgc$YP{P7!VSPWFsYmxIVdj@k;lC zeF~>lU@UQm4D+oKP!SZ_;`Jw|q->dgwMCyRv0L7AEIH-Z{{>L_eunNIqbQ&xc&+W%Zlc<`+QxA4gf?0Z5;-D`XjH0YhmT1%>Ip|ENn$T!JZ1MiDPcC6iq@><%vt<`h6 z&49NNSB13gEPZ@B@Cj{cCcRsDN zT9?0oTHJwL028363}Z(HM2{qK@?dQ*qz=8`6gDZ_-r72Ni?q*^J{hA$>aB(T3+}i6 zCaht(h?>ti_I`7f$!57)ou|!$)MrZRQ$xi?n-sgM*Sw2p-M%HQ8UE0I<@yCnX48rF zx4BEdC#!kYg+^r;cr!-qL3}bSX!i?g3Ok+YwOQX@40X=e5Ojk5Zt<0wg3l2LuGgxZ z1!)%YC+D7F4^`)j$LsRNVN{1MMeQRC1)e>EOk6n6u|!0w95<1@Q#qcM?M8Pd{6v`H zg~1GFsk-2iNw6X*j`h1mBe~?O+bo?f6P8;?@m2{-F|N6V$H>Zwg>ZQ#^Zhvo<4q#% zEW6Ft^a#2)=c)Z}PhYLu`frbZONQ1bsWtB0G@m-24q!c%JD=LUlem9FM1~r@Rb&Q! z!`tN3IW_WQ7t^f_w#kRW>lTr^09~Lf;%E7lLX#iEkz+h=KH#tngq26G>-Rj(EBK!G z@U}Eui`nKCUGqp|L4iLd7Oj`=IL{++@we zggZ|+_&5+QN%$Rad=^>%U?L1sZIhN#4j%4#-ueaqFg6z+Sunc*H&F6(kLh{>ODk`(R2?qEfi*CN}c*8*t3 z_{gyxwNNa+0nMpH25-mu9bx3YHlthsRMnCw9_(`DGZCk3tR-=>ZG2jj}+yTJNNib7C!bs2M+fMC)ti=55mc9s=L z2I)L!YFsFy2p}-s25^qpk$U17@}P#O*8Gf*O9z1FSYCdWQ_xXFD^2Ecbf{P_keRFK z9Fezj#*6f-7D#2~ad@t;>zW*V=5;5`8QHZgYJI;H6cPyA_Yq4tv8LaZjdZ&tFO#Dy#$y0!o|ygx~of1c@vODCC;r&aL5l+MEuvi;ehXa=x;XA&G;1tOwm*z zH??&aLA9&Y8VSDS^&)OXaCD|mmzSbR#!IZ)o5g;A2c}7}sF=#jg>n=o zywBJe&#GZqK%TwYA+0pszD0$Z?fFr7^NOx!TSl;n64;D&E2}=BkXVW8`_>^b)C{d! zN6CNkSd81<>2l3*Mb@i_?0y}@hFDa`6rfPgjIHux>m^PBg8UJj229DFz=|gY#Bz$k z1|*R!4cLHoI@0SBZhr2@ge?0~CW0%S4r`xump(LTQqv1*hIwZZY4`DT&u|_D>{Mn@ z?Tg}}Wb~MYJdJL9t9q3_nH5ivuy@*TNN>u8GULKTfldr(3-wIQyEj2I$zoi7O<+qd%x( zNU)_(`DJZb*Fy+=dwFAC(jg>hgY~qsQys@PnSObhy79_Chjz^epLAWnXRP&Z7A~UB ztv8VTKaxe_#(oiHEMqr0RbglKcxqHl1^{+agTclMz~r2`vH#}EK<|m;n%9NOZWYT8 z5~1L90NlarV0*?uX9umbN@6ccO=)E{U!4n}&j@&b*X32Zju*4LP?Qw5Ya=m8MpQUArS(5Z)b-w4)uML;5uj zt`_SjuiCK5I;E-y@M<<9ubfO`uuNN}e|mcw9Orv^nl`F7qY+i&j@AoS(~zpkm6Tji z>ThMl@RrL|n5bMoeVrxZGsfd3mz2s~c-uhO7;AGG1Os0GFk+nE{bv%x)` zYiL-rJf&w5-r92lz1VC{cJ&=R5_T{*gG_wG>fz?QBIG2Y9I#qSK()H&=nWpitGw8X zI=Olo9YJ@}7+p>Z+(JkSc@fAOqYfZ*Vib$1^jp*pw8`0y95BEytNu0mX^gZmxaG|# zMTsn)-GUf=aHOg4n*i@rboTyQ=M`DJYnu9U+={ zR1SkhO@0QQtah-$%1iFdS`A6iW0TJqKNW2=PnLD(#*GhAqfkT3euJD_Xpq!s8+~Hy@Z{9UZ0aEH=~!F_6EvnLtj17`O0HGYpG_uB2T% zoml0}Jgc=N3@(W{T<4^8%6EmaV38bCSVD}$VZ!TzWv0HHO+P^SzFX9~dDeSr)un5#vSlJgQV-*?ReX$z3uJ?>dn&Kju1IWIGhUhLKcMxXnlt!L-)Z9-kwiTxLQUeg1mspgDZ z$K014hEyx(d%OB#+ZC2RG=J0PB(}x6=M0l&U2Ec$L4VdZ2%Vnnb;6`Q7o>584oQo& z)D?$ZDeGz9@L||JPNfGr(^Egiu|L@(d0g_Y|8(kpdrVEzSy>6>GIBnTF<1A_3GzgFg6IJc1KaSWW1OM<#(tu z6jYh<2mN&-UGgt*Vh-k~XI`4m!|plp_75v;bGY)-4%u+rWABueeeSupW-N+Ou~+@= z=r($b4VjvXXpeNU>3M9l5d%Xw5e^?9f8^WMG59wvyPScsPv0nxP4a4Skf2}oWw{F0 zOwcR><1EWKaRHoQ;MJh)&W)0W!DU7r;*!bZcYAQ5bQfKQesD~2Q|GJWMM>A$u~)U0 z{#gZiT$BEuHFW6wp}rTNO*Z>g>V@e^uDtD9#Z?C_UJLX5x!j%F2n)XpAyz4F%@z-i z(hX6VwEnV_ho3|sHk=#wm7QRZR|`&D_5S@mAl?Vm$6x`^zy{rpjG2qwgU zgFiHMY>}Tc#gc87ED*oa$_y|p2%a+AOI^glso`i42pzGKwkOa+S*%e~fFZDM z&U55SZRfeT?Drh!+ns=}$EU7#X(&?ge2KLk9cw~IXW#8*Ogz8+UIvLi&J^yII>^ zlj+^Vp6*JyFnOM+eH=M8LbXacI=^Po&v~q{?U(|NDcLW1$UZlV2i${ozE$B~m$#3@ zR zk+DU$v?eIrExfB5D4kz_g%H=y*0(>Y3dXLF-ojmlKTPuK#&g1@j+&8yw*Dvqkf(y0 z*;UQ-=RUdsH!+3JKiQ9~m8+-EIN%NSxfe6kq_<6NYxH3jQdr7BGN!5mf*r&~5pFFp z&XvAv^-o`dHY3*s6K$v~$UMT+Olwirck7b(d^QlJ=OhJ1mG(m0PY@rbC_c736scw2 zL940e8Y*uR7CEBCEDxGnf-JJ7Ns~yMoZs^aW!08u3 zp_C-UPA)%Efs}7ABlsAid;HU+d11`cJJI*ysn%`Yih{cQcBCgD8{C8G^tk10(@z9V zUS@zdDjicfa=fFUrNB5<@_Yz#$ujcf7C?O^@9;T~kt?89v~Ek;SdH_G1|a0BtP@~1 zc8Hu$Z>9F`7XDj^EyfZ;8HMcz-7BOp8(DWi=3f+G$Q}1!d6JM2E=hdROQL6H)w)I% zJGz;d>x|=6b?b6Matued$3|_$=Wty1x>ODcENUSJiOhCYRU4 zx?c6g+BrbAE)ECj_54`f4I^%gt_$Y`WT_1_XT?GRubDv5Anys=o+K7cG6pqGlt!0`;Q0jE23a&wHNXSq9UJo*qn8oKCQ=w&+q^S;#wAhKrlm z)vxc)4|R@H{(1hpp+lbt^gyf0q}RZ!o>Jk}3XHb?;RGpZ*5YN5YNwQbgSd54$J1Ib zA5m9VBO&(?R?sK=ND&MR=RICtvBtKsCbz%GWecJ^(k5kdn|q?k$&A6dLryf&g-3Zk zaGFl>+0Wtej14a~3DM%gD9-3!r^Mr-Z#EamhL}fcT4b>D^o5$!bbtXk%_7AB0Wfan zNldV}Amfne0(9j&;VucJQqyTyQd8~$o(zK?LmejY@{zdxOFXer|2aX*&5hmbwOQR? zp5kSYGv=l^ajc;jA{4}p*G2V`0i)nBtWY)%E*{nJI&@{aB8ByRgXcoNKfIbf{+oqrc)4C5n(0#FA)Msy}`Bg!BWZlsJz#4kpW9F zWum_*cA;~z8ThJEAv7GN9DTD#rk=jJQz>MC>e5mTzTI3-=Ei=q%45AMF5W~64y}a(TC}E7Z3V|psJ!NdIbRp3 zxhJuEKnSg5G7ab{uk-1-0mOm#E@`nS3!S+2A$(=@ z+Ed-=1?JHQw%CNbu|e+GScOnA6*(i3Wferc#0|WA0(Hy}u&!)QWW2gTGR6~Lzr52i zUSSnW#C^))MBr8YEZScC?_cJY+7*Nru32cJ)+PZ$<|ZA#jfpd|f<0;02ZE#76kSxI zMmZZ!%*0ER(sixjsw~v=Kit3op`-{Vf9aUEdjjZgDcUIE%HaIqx=zk$#I3KWYnVD> zk93mURmK9y{&~8zbMG4J$&klfEr;Syn~(9-{BanKB!cZ11%&t(nurAKuZc3<0I4Sz z5eq(P#|Z*ss!EFlg@@&)a7HY_sm`(;{&Z(+Z%UA!CAXU z`}YE`${N%00w(B)CneU@Z`^u8oliMbbCX!kaG~&?h-Qe`I>NDcF6HX+qTpvVQ^!o> zC>kNz(d@5CntG&iy*$|T!LY2HH>`#_obc*|!XjF+c;iN4>k(5g(L5CJQFDh@aQWvX zp$$Tpz(WO(A6N0J5sIg20&6i-y-cn>9w<$O5{s^-t28gzW^mH-9lW6SY`SOgYBKJX zOkpLl4#w)$wjriwq#do}rk5KScis#g-2^%iqpy6^i`_9hkWg`Emz!wxTw;_d9~2LT z7<8r~OK`@zt=&|H2&Tqx0mur zpX}t9sp4fJqK-+VnyFzlYin78Xbx&nQ;v5#H5xumm<4;bwpj#w(!*5sxOgp+T9$bx)pMvOz%&6^Bu3j?NY6 z`xrZrAO#mRuzWN%c*1=!OF!8!4tb?V83ez>1GIE{oW8pL)zL}B&->{a$`CW4B?8aw z5`ma&JQsyyj+nB!V4wJ9Owj|#Ogz8YTit9rdz2;9!c=7xu*)Ol_EX5(6%gL3!$J0> z*;Sg{$lxmCF#dXO~al zeEC;6P>8UN9j{lToZo-U!b&7c88 z(RlocZx5;ors{rhT44uKn=iUe_ae16YdPm?$o)E~K_VXsL_$U$mB3nW@ixC#DUavr z)O1RcDXoyIRntneT`ETFpc}Zw1|rED-^#iV@!KYs)O9t1HP*NfBaEl44EJnoBDPfU z_t1L8U6dwzC|a0es0y4&wzfIhzd*F@@W?UrU_=oB_}L7KNOkFL3-~B&th4Y~tvShi zF~O|~Rm*glG>+H;5l8G}haxrnCLPvq|9Cd0BSIR9;rN<9h2Vl*A~jDCj#3b@K?P+S zo(vS0NSr}-_l4zbd9L8tjTAfB`js6Cehh&}#_+6U1t_S%*1&+}+tbP_LnakKZr2)| z6#MJUk({{aoovLgx;@AR7wQdTSD2np(&dLLN{6v^ZQ0^WjILo>LH!yJt01ow#;VFv zib7o^pH9D|YGbXV)qCA1W~#mL^W5(2=a^(3hw0`gUdNY`f-=YYZjZfbJ5w??eNRXr z_g0E>Xfb%GBMXH}_~(*;dM?fI16U9MO`4&iN@y1eDOImg2Q52MYYT>=mVne+q_%t3 zD`nipnkk$1*c7D$#&YJ}e)D4N?Q9AsI{)pMW_-6S%Gcvz80B*T&g5*3r!!(XuGOD% zw-56mcya+vYA5>&18vv0nJS-as|FhP<(+*_O$~Sf72<&=z|(XE{~w1wOa}03BtnQ` znSzoq^nsYM7znMEpu;34&M8xrq<+@O0613E>U#-qdOAJI9PC*Rb?1$dSYwwvADlJI z-y=#g;JSQOO#T)w$)cM-D3j`#i41T{-ti9p^rj2K>S6S^_s+$NE22>5A;o10Y|;&f za!zSki zY2(Rm2Z%O`1?#oecyf|d7Iokx@YHS!=U?LPC5yYOwBVXUNQo-uEZzQvh*ZGEM~JIp zh%vjzl6_9ev?7bvePa<)p91f(K2|h+PQ`i19C(!IAz}w(%8gWlXd*0lsn^}9y*+dR zzN8un&4KXjp1BZ^nOqTciTM#VdUVaxXEi!uTyE`-|G`ImU5egMC7$OJt_S~nEhbzq zCe-Y>kpzdCjVn*^JfK^&)1@C^^K8NJO!y04>5{>G#(WKj^${3k@|ncQu|bsL0Ata0 z!n|$Zeh3<-x6^>ek3uF1>6)!eP6NO+w(negvg@Q=mIBl#EVwGom8hXKO;9_Op*NHU z9xok_o;Bw?B-Y*l>Z(s=8CyhBva6N#qID)TsbsC!Itxv}2Q-hxAwwZ=?G!lnuoQqUifXjt2 z@Qx@#?!{tbhlrE#FFd>Uez)p+Hu;c77jZ;uLp@@vblY``yM7tH3T*EG)81Ki#i0OQ z7I$|E4#C}>AR)NBySsZXH0~~ehQ{5k@nFFlcZcBap5a@w=4IaJZGJ`7syb(Hw0OOB z(QnuN6N3?(Af^#Kb+N=@aRf$5jvZbtD}fyEI!2XdM5cNM^OCS!<0>@!`&;zcufLhB z!?mWX+00^=Wix#z7Fd%@E#>X-S{HxQJ`t}X9GW$qr<0|`a7_s6sJx$1+@Co6|GDj* zx6;IC`8cBpwE#TB{rE!XAokuFP<&S@pg=qa9!{cf< zJRIDv)G9F_Q-SS*R4#AS-bdXR+x)ASv0|1ezg?Gj(_N<2jxjv4+&Mp=vNg6-)?RsX z*PImM=hmb@`b`C&+A&vHSE^#E2@!JAF|RWLb?)# zU~#b$as7ZfmVgBsXoVVQ;fb8d3_I{Y-YGVL@g=-WYQbJ>av-kutlBsF(TW@kgc;mG zpAH;PYbDvN@Ok3kolaA|mcWA3fM0h%wi=U!kff%H3NHC`b)dy$&$erMtmSz~>_m27 z{JPNma46(!Z>Mb zkOvvB@_?j##d;(xns=hmTrJ1&%lo~Asl@4WYppOkMoac~s{#cp&eS2guY2$3briOg zNYc-Wd~LAwF*rUmayGBc9DV!Q6fC4wxcy>W!MS%rwtS3wDw*3hj*{qQHiAD6ue5xs zja{mW2$qXt{{!0;C*&owX{+`4v4QTb?Z&iHwpZJ!kXfeONpOQC!U+g8=r<*@Mo`F8 zt+z)bZC~6~f239~txE^oe2eh5Qu6>^b3kpR%j3 z%Q~Zp6srA_+8mgw7w@Fi4c{>T;-EyNOO0G4s?&E_8ybB6n)2FPy?T6RyEw?j-1ydFShZf5I6 zpS z5lWzDS_EJ11d5^h(~Cd;`Un;m-#~k)8S3kb_^QeN!^m+7Rh{W23t7C`WgF4R5%#g2 zENh`va+Njytb48g@`ZCxj`KDx4jba<9K^q_2xz{4X! zI005Sme+#uKI+FI?hJ5lVLQjzUEF4WRlbX2!$b~(p9@IoVO<<^wf-25`fPNvR|44& zq{CgV(YYLHA-Bn<4XZ3d8UXd03id71Xl=Qfr1LL!L&)a|*sErAsZqJlpB!K;4v!@# z?)}JVNy$vcRlVYokOqK&+>F<^5h+%VsfLj!+)I|cLy!;BWStksbk!54!{w!T@Kt1s zOHV{dYum+4ZwMe{DJK!TpzbGsY(ieMTnwyTgv)OSE!N^a zB+{-Ozuoec_3JBmL^8?Pnbm_<1k~h*R^0Sw_&GQT>C8Jl0EtvrU7m11{G-XiZ~SVdQZ7_~-!1qKEV$ake==mlhhMy- zA>@yFWhWA_YEkEX8826{|Gh42{!R7xzYHR1NesEcjsKL5I0?&;5L+6u5Y^|jow)7$ z#E%z2!uthYw-ea0GuowO%yEX36MNm*h##pAW&VIZNR)Zd+Q@=E*V2V{y2O0llMQ7^W z+P{PB4_pP>xu+^Cn^841| zrsv)ihs~2xPuPQ}yC;UWv`vU+$OtZhODbfu)lfm%OkfGCEipcy5vCe2qnThLSbWtZ zrL92pn;~CGs$4!)QTPtc4S;FV?YkHRun~S+`U^bm0dzoiFS_#re0uHbk58z_YYj-% zypxOCX4aVBzRtw{lYD*=&@m)rSQ7^O{Q7&BJEok*V-&4O7MG*pn{Uzag||~F zB71xQ?U^drbStRZkA1{X0NSqbb$93bdS~2b;}R(H_M!C~p#CJ2`aOHbDd2GLh+4#DQtvn);_D(J60)v>))mro|@|FP`*JX6_DhLnMs5jD~6c^kn0r`b}T z;z2&h?bD*uVXpTf*i##AIhs5ltDDIh!kNwrlR+ruZN?}%xaVvxb9{RMbpos_s!3%Y z60}0o9zj~i;)5IRNt!u}h;Z3+5O52t?6aP@Hpfu&aZ@+1GaF`-Fq{cg70#SyBA`M@ z<>rIp`+B8su;blMGoF_}L+7Uu7<(nH931`lffVl_t7bd{jI8E<7-{KRh8JVxuXUax zOhgk-kgP1U5ku_$Dh*>sJTjl{LBf@lc3Aae;HY6RK^`YF=R&z`-aQz@9OBApjo$IC zT+^Te>I+q0g(C6@Ea)%RT9M-cz^wZlT+1F{c>XQB9p{J(zJ9V2%URD6QEZoHqsKvq zst@^&I(rb$>Lrl#K;$^-I{R6hR^{ucz!(}JS?7pX?(5lLDZG7p33rFol%7fv2aTsm zq&qaow8s~UXaG*H1UnLGYxzehnK=u^NwgItkSjBg~wW)$E z{repMJe%o!S0S6~E?yO)GxxqtY~4p8QK%Y=er(TqE1D^vM~5I(Qz%LMb-8efFhh=2 zvX=s(8n%GU1UFl2Tz+Tf8!r`f8k;OObNIE#!(fv_Qx>TEW^4HBc9aZ^zy6E282RGA zQxjS8u-dSd>3snfryNZS>RK!Z>L;h8TMp(%^Fvy-8q;9iiYHO(D+!%~-kOHis8a*_ zVTj3ObncYv1dQhJ?rOB`R!2X>ZgvMX?{0%HN*i6-T46-MmfSoiwW=r%kL8o>#(i=x z%LAJEi8_8gSBlcoA1$(Ot_NBzmwPksh-)Lh>F<88RRo9*PEwQq5_sTfS_0)+*EMR& z7fzgXh?PkR)Uhruze1R>Pn;)fYI7jF8s|yB#XeXBl;?r(!#SHk-k}7k^(<8cgiA2i zVy2ST)8fzju;Rekmv2MnTq>1ueluS)>98A`b-(7#H^c-eFRHCLLkUhK^ zmwGoD=GP}_BGP8FgN4V-L?J<*k;V`Gh40CrE>%huE6iCc;YDpy9W8&(CV?5IBMy(P2`OA<9?nEaB#!el+f?wqb-tGrtPRx8pu=L z-cIq!@i0>3e`dNl=%i&;0ar+?y9nL`is7Q5<$aeQq$BIS6Wc38C`}(BuWzge&LQdG z&b5w-P$uLXc>qQWsI_9&rk1O2)^UGZ-GE;F^5V1Hl=4qUPz&Dn+ z?zybA?LYlT^f~!8;Tz=-;qF}jEt}XT0b#mS%JA~e1G2w@UuJ|K1B^YU@6x6=xvoE< zAX=0={AkKP6p$C+}JI;2BYF1X99gbvnS5?9sGhW3m`9+y%nWKE< z85>eIrIj3ubQLEZSx*bxzp#v@?i6|Y0}Y!81x%A=2}iG^AQp(F91vuOk>92eVpHlWgg;Cnq+)#1Gb5xbOpD zpng#Ue{S5c21$cA{+S1avqaiuKnr!oCeebw^=(R+2l2?WAh(G}7D|;;c5IJk0;Lm%!t$Ef@oNM_{t$G<<-sSn{ z-NuTYs|z<3t-5=_5b zIz;q^&!h`<9F+;kbYAU<9PvGLgL~CuaZ^aoo4V2GM$omU#G{&_yfZ#laKq8M-9!iE zWPC2k8VFLG1;Rw-*)X!IDdM`(I7OL-GVW8}a&cCSswZ~@i~aG<1YZgFYj#@`mlx&P>wPeK`Q1 z-mHLFJL;?5#@3*lWpkE4Hw}uR4mfEJ-{A`st?~HR$}{dagbc3 zg(e0*84`4S_8NI#8+n{Ly002kvp~hGS>ugKL|{N-yGWZmLZ5X6p`HJ4W5x}=%oZK8 zpw?vNs_}^bUKKj~bKK?``R@?*?~_Lbex)&mF^z$XzgRj7jRed)o-2$?dye)?E31X( zg`wR7nnuH{{uUJzTV0oB)Sm62L!>lKjgnWWzk!-Cl`1g&;?BtO^!yYk&R&wl{2w+v zMA|Ft-OCpcy)$9GRF>CU%OK>t8%P!|KleEL>{#W4s3Qf2of`j0pEu0Miy@R#;i`Ss zeei=rwWPHLFumeogRm7KZb(sE)f{F0;j5$LKjc?=8uz&*#+S3h?lSQlBA$e~%a`R| zzd?jI?y3%MXhW+EVa(KEiKf?S_9}9!2C|^LERL8N3_)fRKosXk^-d0@gyTMd^NtQb z{7fUkWBUD_t3}U{jBN$4lkj$!=y&=wBCVmUb*jN*5KAFnxmsJUsIBpX;~}<_m;*Q< z7xy2ziUF^5C>@C7Gs-kmLPc`j{bL&lP1{dd2=1#|T7OrtmuI9T{|_Ff_kev|UznXi zzQWfU%Prgh)5Ny*8pTJk4WAyxB&6zG!MShgmYgJlR>L$WCAk~a1U=|~P#0I&bFmOl zY$h2sBo_}0kgpRlU&jv0BtA8FU5YuK5R51w&ODU+fz(&i#|aweZpvEPzsfkEBKt{g zx_3_W14eGxfk|lmnw6#Yk%&Q-*hnoC)0$}hSM3~a_eYvz4fN$^7}Va>YoPApeUD47 zbmQaB-Qifnd609q%gpqMnuSlKE{f@q-)Z0g;=>VS*HHuto5*GMy0Z-fX59ZGH8E~x zx^?;Em!F2=(sV#`D?X?|p4>`8QC^{SpDk4%whS}EfhUIiKR8ljD@vmlp1gwAyc`V_ zo28NndH9%v?dg&FN}4Q8#%C|7TxiyLceeN22*;tFDcRZ=KPkS=q{pWsCzV6d(NQ4i4r-^j_x_jw4M59!&4&gJDtIU}+K>Q%bp+z^J7~5gRGaw0hY9wMVP*lxNyr z_C?zZ{mAe@-@~d#Ps+Kqzgjj(W|I2;k&f9@tV=}V7o9$^eITuZh*y@^B~+(NV43pw zfN%(h#NPvn<32Z2Q_DlvML#Ao>+NrwTkl)XuoX5W@v~Ff?Hgo6sXUjn(Wo~+;Fa{;7*3iC54bC|Q>)E%Yp4pHGb0+&dQq;>tGrYpuYF+NeppxZM z(~a})ysx_$Q!TfJM{Pgx)$Q`r zH?`saxy-4}9Ga(=wISV2foR(qW~6u&A}oP97D;-AT7brvRA3<$ysYN@eIAQg-Z56L zvx%ORRo{0n)NS3eTj;5(m;mUKzKg+eoeBnb`Yj6bSB#5f4cI0kOnCMbbH`E+LBv?c zb(E9cWZueIF_zM+$h|#6mh-KR4vWx#3%r!g{{Le6{#?G8R6g)dhZ}-&esi-TS!UhC z$in4Q_#)a72dF)z7%>t2R z$v+wioxe1zgr^T?NK1Q6Rbk{#0r#~ppiAtv47sn31}aTC59M39-G6d?$4*v=zK7GZ zjQ)=UA^m5OMW1#$i8!=B4r4vSP{Zu@SvEXf>U&CGZlI36rcPI~SRBsMYsRddA=c6*heJqj~Td z?*$}KFGOu=du^sd`ozL@PN(?DvP+b^y*(NCyjlXq%z{QEV{8AOO`HN9RSsym+A2OB zAK?>Uh?KfY5sV2Z1G3QBQ++;#u)DQ}gECtJFk8@x5Ffw5p%--F*IGP_r1YrQa&WS;7S>6(KnvOpC$pMi z-bx?c_8@znH?UbuiP1^5XQgZA*&aLBr+n+8ngW!M#SD+KyN!oDY~)kW42xN#rCl_W zTnPP-wS!w6{Bwd==BR*`UYsVB@$2I}+oAlW51b2mNThAsuNKX_ehFu>us*+Bi3s3V zDKe)m@_Gkat``hfE=_RZ*>kk8r(c=&{+0AE8`gVL5M#m1NQ1WZse6bNQxNW}&7Zpb z{RK61M%Oy`YVDTGxIfDOTJwf3cf=QT>_abD^STxS+mRleJK$u3l)BfMfU z53t6AeYUTE4EZk&d|8$}daO~@z|a-alfO~r-c|XrUl0n7EGHJAo?|U37vO|?U*?&w zCXezGQ*MHQd6*G)FMOcp_S)6>MD#r(X_aqM(KsJZC$B}3L#G6hK|i}esPCrrmFk3n zRB_N6u6|2Bi&sG&Fzq5c=z9O2La literal 0 HcmV?d00001 diff --git a/vendor/tornado/website/static/tornado-0.2.tar.gz b/vendor/tornado/website/static/tornado-0.2.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..6aca327aeea3fd9a817f67f694bd18844107b0fe GIT binary patch literal 200680 zcmV(`K-0e;iwFpg^|DF=19We4Zee6^Eif)JE_7jX0PJ1;a@#nP&)rJQq>~TNMW^-q2i#^++55w=xc1Qe+pVm&B zwOZ}1&el$QyS2@lt*z~5^BKE+_7y%^lDaWWWsthFAc_~ZgJz@A-HU_|%m4jGKCKrl z^M~EmR;Sb2-hQ#UZMB=sOZ^G&!j5dVJ8jq(eCEzQ>#)s1m!|!1HCGPcsp&sy z{TJ{854ZnY@NH%P|8@5N`0Y>Y2ZyhZ9&P`(JDrE%|LvX5&dUD(PV9fD-E7X`|JKU< zKRx|_RIQ)a|3&tHYpXS9|684Bthv(vFaCMYQ+Md5?)u+(ocK|=%UVv;I&>#|mr2>M z@>d`v)@e4GxbbY4or$;1Pna9BXv#yDL|N={b}iboXvES>&JxcJ+(Ez_ER4eSK@fRY zemG_~e83VOLu2Sar2kK&C`h1>o3d$i12103J`ablIUibYq6uH0x?`BvWtvWRH#Tl= zZX7v9pyfpIc*E*t>17n}ve&N12T^n-AFT6<>j%4RI)iWFy6pACIEuyrAIDKPO&l+p zSnqs~hlx3aJ9WKFPUHjeI@x%4u(yABy1(8AUcKUp7yHwcIP~H0?cveKL+dzj(@_*p ziYMQc|3BI9y?Vd@*!sWN{|o(Zw>zDk?Je~GTbrx%-xKx!?d|4^t!8Vn|8MF3|K;XR zr?WNZ{@W|}|Mc|#akYMF|KIAs7gPT`+dJDU|Nk{U(u-SN@v$Dyt6NKWb%(u*ylle5 z)J6YE&B2?8LF5i$0x`GTYjE5+4j7=j9KSws>86J)3&EMuYK0XW981h^CgYKKPLO|zIM7JQe)8f+G2ECkNO zt^nn04z#=vC+`FVV}Ct8It2C&VnBHC?-gvbKbZ#nmjsq6X9l}wSw|sz?|SU$l>LXG z>4CJ!1~fSE@e06N!fyQZQgVXU%iUzwS1oDPLO!N5FN=e2cgJaMJ8h?>kBq~87-V6Z z!48Jm?E{B&r|J5br+H(E0>y~xn>O74+w)(J|CBU-{rC@n1%?TM?am7SJ>~e%<^$sY zFW~7406u;Fe_X9!8vlVXZI|?a3m&cT-`Ds&|HDR>#2W)Y+~DE0bcWV*>p9ztrnA@| zU#6_xY`!caD){c*-3Ti&5f-aq16#hb>)(_6p13oh6ht z2wbx70$?MTPQyqd4*OYJbz;=zm+_>t|vY|(`A^MIHRcKHwnJV{v0|C#wQ zEOjsgAOZ972#p5r4U1ytj$;X?V7@o8p8^JM0Ae1cH;7)Jv!S1)u|LRCgIQ{Jux7mh zAz)_rPT9e!&3^2i9-KDV$AhysM<33B*2mt-N$>FNVE>dIov^*5!&e7q2S%V4(!jYjSQ0Cx?T8+D->k@+-2MKy5*0U9pH3q++(CXmBBqHz~fA< zPJK|2^AUCld=`@gfrFFOux4_^*;fr})E+;!G{_Ed#cr^3^<;ydIG%k0ld?*b$aD)T zp$mRmb+Q}uO!A^@gPE6)aEQW)RsZv^+5f^1^Z`BjX}|w@hr$Qv|Jtoi zdu9Khs{L=iZ0F|8GhEA4lt#&;PY{wo3WG9lHOsivO(QKdbo9HyHo1 zZTk!!r~`ND;7o^R2imNBz*EP67}NQDFK{3JYj5rd`@hrK>TDwZYi@6@@_$eG{NLJs zX>Gn>?+$+KodB%+`@ZAFoJ>MqCAI@+xy_}k#9nlot<4omd>Z=yI9k6X{%f`Ht*QU5 zo#qPvt?=Ip|9!jo?*OZ*^1{9Zx8>T6{V2n9i60Nwr*53iC}{+ZfT>jfm%y$}cmgDP zvZ%`o+yr(9*;|MRuvWZa@BLdJs1JGKk3+E$1C9bUB$2T&lLSI>0-MN_^bJfufZ0l- z4nveWjeWe(z$P3-f0z(UIEV*9R#=iKLoDXW`ydS<5nfuWH)Ph!|F6C_=P;>;n&BTb;4fphrhj9v-~1O02`H5hy^| zf;h|shvh27obYAfrfEL1Rg9cXp?A#FEDmuNSTL7Q0B?w}Q5;R^5q%RaNiVDgW5PDV zQfE|?n7UX@7@$1~W+3#V5szW^z}zv_f5IMS9LJ@~MHf~t%lZ;-NpcAZG6_{#ZYUoo zcS1_i+)%;Hv7`ckG!6Z0f0(&JzROC`20VyvNC?RM@D$do*Qmmx_HSK$1FS)wW7D4~ zy(xEQ*Rq6giln5|%yNsg{bV(*TN4Q(H zga;$p0YCnT;S-1CP;$9uW7^XnI+JL~?FKuHLaue8ipjjsyzJ6D8o!T1GjP2t@vI+( z)V^LfXbBmy4AKiUi5lh0QS^v}ZX$3f z%5pjS&djj~1%h?WuP{6r966#hb+xg8ES9!N<3%hHd~|Sl#{R_Yzu53!p_~rr$<52puB4mH6wbZ6d3YZ`QZ(O?nM?k8|g1pdj`mr)i3Ks7*wt&dVK@AR+6%G6eciM`8I4H+JI5=Xt^DFrpVm9opsz>ZKfJ) zlK6VVz1?WdZLHqkz0K!JD$|!MEOq(KM7O!GLbXn7^DaUdNf|z%GFANz)|C?MK>Jg@ zdSjGzF^oboFjYTJ()l?UfWS27qm2SF&lAWY^zQu|NfoletgKtcH+0R|=N{G29kJRI zv{Kj=eX_>3b9%N=9aMofL0#*vK~xWN`fz%1$WN)Kb={2OJCwMw3bD1w_ve zXBcCrW4on!8M;b=V_Ib`1dQwknFzw(50rPyUJgk(m}N;5Jnz*s`L@N~iGP%NCB{RT zj+ZjByZw5l&!pAh$mgx*?!4X)0Nh;Z2VrppyG0;dLr6Kl3SchL-BnVXX@?ZLQ2|!c zzeTGc=ZU6BOa(xav`%FNK~}EB_s8PBoOy|va5Sd-Dy3Z!v#(d%8Vu_ye!|%#Z+u}b zh`PvSRda$SM9+|JB?)tXN?SlbPPAYXW=)AS!qL1LkT4q}K;AVbijprU;E1LvaLxi*~$OxQAHOjuD@*QtydsdKx@@jePp zVq$6#kbn^pMx$m2<}D6Yz3Bd{g`lSL}F+83Z$`tW`lUBNPLE`9Hk`AHF4&ISp# zz?`MXW!G@c9$-2=V&KZESB*QW1*koclXS+WSv-v}-CY@81${EY<}!7SSW$|d-9PGm zID6ASJnHZD-o5*=xA*oG0PkxzNXp4Wk$~H$3J$#1YM*K1#fA~!7(DzB-nDyC0aZVzRS z3CW4VoY1n-{XAB$;`@1;rcjEUl9r3y!kX%UdJ<^vS=tp07E() z`a}>$1Be<9-B?RD6)7hAg`zdI_G{413$me1h^z@V^{@DBt>_>}C}xG2&JPWi3-+pP z6;&ohY=WmOZ8xNF6dFr82Tjr=H2jxtFjD16xWH=tQ-TIlRr!HZ#-7BPHr%eyb(8i* z>nYRIc9kC3^>Py3q;t;+_&t^Zg=5L9BH@CvCN40=b2MPm7e?7LeB=fEHNWQZS+K5P>{aqvI2 zdc9uNC5%FkcU8fNGiba0OMY9!k_`v{)2Y>+!3@k@W$*l#9n2t~mZw%L0N(UVi>jG? z!1|c|GjQ#cF*I|JG7M_=C%~P5D@XNk^X|!P8ZkdZhNPb?urSTZ%q=bx{JRIA?2&s! zg~xg~!=hCfTP3@akCdwHtMO_JVtSN`?=rAg;;40QP*bwUN>7D7urDr36$iQn2C#62_!9IocBlKV#Eqef8f+(A!qGtMd zB)0m4h0(JGxDU1$PGN9*R8>Gi$&4ixZMwhywv6s6ql9X^xHMB7MhfB>(s9YG=4Ftg zK2o`z;-su~k_TmZilmChA0|qO@~>rJiA0oOkoV zE6W%^$=6`TasLZ?5qKuVk+GLbb`}|d9-$7hU!#Z{mt(_4U2qaouIHd>i**bOF@N|J|2i?zj+h$E6r8#8!DJ=CLov zR5)ehf=p$lw6VUsZ^+yU6qdUwlS@G5Rhg>mGFd{GAMdy?Bcju?xyZWd_sH(b`px{`pN$3 z*?RBzVEsK=@b5Nz>qt5IZm<1auL*$ShA5Td*=xGpUNf>_iDIV_xMWeh4MQ0frdU*(nV|@sPWc#oCm92p8dUVu z%iScD7R~kFKRi3w>zyq}LP?@QMNfnQg|j&0))M!ZObGi|kV#>ZtFMrPSmK*#Jeke`6l<7{p#&l@NEg0blWWwf zUIkj8e2z}vXld28158=`g&oC9Li*uLm&E&ToK2&I+)5&vvOoTjaO1*zJK|gdCPa=a zOsBm8@HoxzPCDu>0p`FD`G)+70$bmCPFrFBi*4o3QkhJH?$&Pt79{aW5}8b{au5O1IV4G&KR zMlKzqv@Y^G+x#UkJ=d--h%2#G*SAry)rDO|RjDC__n36F=C4Y(eO34OZdhCrS2YlB zXeJKzjJ|?l@)rtFE2nU5}hq2e!< z-S5WLO91nn$jA@<AH7{Cn^YXRYee))25j5Fn|GX~W$VMM<32 zZIhJUy(ymRB2XZ!1)$IjVZeuPqJ&M%~gpB?X&Pg8Gc4Ip$IHkcUtqif3PrXIH1 zC>x4B3l)XF0?4T_ptAJlI-tFjJ(dcaF8yg#I(A@f){Po`-Jwf>?rI*N%5J6`EnJ8W z>oK&`pCqnT4YN%@u8XNu|KU^#0k-JhbiMwmavPOAUhy8k6EMw-8&Q$jQon~qzb|^T zf+uP1rhLDo8x~+z0dJA!YQ7>;hb%h!$!0b(B>_tyyGZ&OPK|vhSkB4 z)`WFw^*E$Tiw*4pGR3%Y+fw*;d_h61*7Wt_pa_>Y>8S5J2{+Q&7};89Sx$Lv|M{0m zD=mGxRl`$)oW0Q6UoM`+c+@C2Kx=`HzP9qe6ulBBG%~Wb2gVXG!o4!qq8^n%)4hf0 zUw!%7tO^rwr_IZ(!5y>hpuXF%gTAfGTW4KK>uuDe72o7(F@UIbT77kR_^YW6uc_0)GL3kJHk{OSUxP;6oRbQrIE;g(64!Y?@5tjEdK_ZC z(~bYyx^a^hey{5BH6u3CnxmwqrT;04YOSOu-S?*}sx>z>n9L{Ys>=ZT?DdNIc=pM9 z>!n5BPKUj`xS&3Jp(Z*oZoi$9bDBZ0WI;B*w)0HTICs8`zg9J)vE*0QGzNC;{+)91 z_oG~_-e-Rk{bFD}SEv|KleU6l5e|A^K&^KL?mau_eQAwHim+K}CK;^oD z-Q0{qN=gHXJUVz5Z7fn%z9}qyndQn2>^Orl1896U9pbVFg^O*Br4zmqorDifT%3po<`3cSS`_gZK9m9c#%90p` zNop*~lY}S`lC2@0iK}8E=#n*3R)mf&Ul=ETmE%VBN#hd%LUbLJ0X! zf|0jXSq?U^p&)|DB@{^AR1lRL#gmPBe`T#zRVm0QUSzXfT*1erw^|NLW5WF15}^IF z(Ajqlf+(#35XLek6_3M=47JNqG0LRsF})OZV{8}AwbH4nWB|s5aAA7{23AJvW>6Vp zP)AR2Ln}ATyyAG70hC2$t{x`{Xfz+r@`=^c zGl<#&$CVj#J@w6m@+p}SN981if&3~k*;nwFF@dLz=if7$yR86tC721KrDXDqPufYC zOhFrGIq2~SxRMrn&GE2;kvB^G>YyK2Lpk;NxzW8<(I71iLU1z#Do_Q3)s^dlAKTa@ z0maB|&dEV25oPp@$Aj3}gKh~4Qni8>Z{AgYwAIIGLws$mwo1Op*^ro|cP^B&@=9ky zw+wwAtSII-XTeYdS2_n)wCW3-0Y7D`q%j4DiVLIb$*-`BjzPA+g&75BUCoF>)}u`* zV80#kYuqyX*=05qi+Wm;FX7~3611a$ODkzYv1?SevK0kanxvE5G@p>KrOm{zXwJY@ z+x80l@=M;Kn6&V(wN&`p3>ChPDI=;!5RnBr~f#1E1XkHvIQtd&oRZ{rlk(OZ~~112J63`B1*NomJ) z2xYH2F~(IA#;`aS0+f&|;OJPfWS1T&E0r(d!MdFm#^EcKn8HP5=n$mFn1i`l>XzqE zL#meJpo#?#Wn@Y-%*(JD4SMToNzESLTy*~$#&=`iAB&b%`v}4ni9920IoA)U{K}Zc z*7Zb_`I%_cMM7W)&Sh8H552cxt&vVfrGYjdqExnvN!jbxurwfac~H0G=!gx+LP0N4 zgDwNL%59BdGhzEQcDkkw$XdBU{~g?t$@n(9!hO?<(O0E{4{CxINJ6$sN0Z=6*eBtI z`L9vGs7;FyTy1jp6*U$LEe2IihvrGd5?JtPV9G1bsQ|?wNA|&TT}WI)2M*%mE|{6m z-Lxfjb)&!)Z$Lhm`&Gle9CT%+S_lM6t$K7+fT9n^gCefD%v%l*W;&9~vvC49kmlVU zH@V>+#nqH``S~O^Ub<3+n8#8loWq@;onVk>Ly-hNHFdOsrV0l@*PC>k)STqKr2NNR zu*}#XdT$Vk+$Fe>?o!BH6Qam~d8L|cIPU_yMF2DFC)btS^f$K(_{axlWqpL823j^i zn@*H10h)SR%G08~T7m2#YAMz_o7RVg7#IU_ zFZw3xWRJY?wuBr)wc0-A^j$D@-4O%b&bBSI?McCi)Z&v#G6XcbvZeao&Vf>o4eHu=iP@{4Z` zR%rfUTnl(u-(e}f0;V@Z0QD8aB_fKD+ncG`n@@>> z7*-xZ+nadhMz_oG8-=PEM=XI(xh&jkxY{9T#-jUuZZMQ~VwQBe__*rUJ}S@Mjr9{2 zHk4{orRKeSvuBISc~ZHa{gAP1>!z@9MbKg41h zt&8lspP!?Wy;iFkSDq_kuG+Z7k#>XAcb0qUus0u~r4h(m3gDKlE46>cBE(^c7bXK) zq67|R483BSf$74=Jt#zsV!{qq)^&lLL{mMB4|QdsjdArdaDzz}3+3}zV?#~OzUi;#`vGi+(-H&Lb z_&3M^G=M&|^B?nK#s!t>AnV5ezt~y&&Q^_#g6aZ+v3H)gD8)ZY=0zW~Y?7P})A7fI zERebs->eoizL}g@e(|Re7Fk+o*xwW<6iIkKw7XVzJ)5Q`3OT%dN1m)om)MtT3=PaK z6^q@-_0JDgZS+C<4Z-S}oP5Ma(SiWdoxn_el%bKi5^Maa3IB2T-@na&tif4tw>CHU zdE;Gr{W41h34+(T*q??)+HXy6KDmx|yZvZ;JNl2u_+JFS+nd|;7yj!!-i$h(&4=3$ zA9ck4qW0!yd#m#w(e5HRW@S2&Y%o~dN%3E#{{Bw>>pTJVbhq*I*-1si5WcFdwKBcub>9)ZfMwR^RD(OxmRAq?rwcKcg(c*k$A@?JIu z9EUk5eNj5uouuL{A-^=EA1UfawApG$bx08Nr?~MQ-Y$>Q8&MLQwvoZnARlI|TmT6K z1C8eB_n_>wxDc}J5=BS=jC&YscXA_*vN#=>6F`H_o13dEAznjcF$npP5|$gU4)*rn z9PO`*24j}@;~{vmscWUP8)=RSmqx?%3NPl*rx{6TqPDC?))!^7P-#|QgI(c8ml z@9mps2ge6*--ut&qun=ujsAS_=2xoX!Cn1x}z~c&8>%-#>w{ zH^_UD^t_4Ai^~iZB9UpU07{i%KFT?GwJ6E!lsN5zU0F0L`txDNW~jz}At-&tp_FYF zK=~V8fx&Aka{$~&(HE^jkaLP=OJDjKoQlu{@8IpLqL{!?!$l&-+yPR$DyAsP{&e)> zz_gw%YtVAG)ZU#;_Q&V>STq$5*flXe5RG^+ANOWOF)Zbaycmi){BTI&2q2MTQa4FvAysR2qwJ)u1blq!K;y4Kz5X;FoC`L9zP-3w_|liR3zxJ@+Cu-xSQje$~H^s8*qp|fr& z?ofdvV{ubLh{l}&n@kdE<$%mP~wUs|w$=u%5;?!CAHGv$9MTrH_m zC(SPC$X}^VmQ*QHij&Rl22+hWqh*;*+eg8>8^leE@K7!pycUi~}ZO57ST$h~1vX7xe8 z5ipJPhgeT(XA#jkjiPR}nJcXIr{@%bP84nghr zdFOzY&;OmR$B(x*ct0`P)is70TB7OAw=IDRF>YugD(>F|>eCF@Y&lX&s ziZkS_415rcg&Q>Q%Y0fE`ZZt`SVw$$sF$wGY!oQ^=J_qJ(xd3vQ-~oI^;9tuOBNka z1fdeX0RS{GVlB`B7KW$m;tHV_2#08cJuAxs0aV#1c{m>#P9x~XP1K2C3(@h-1oQPD$gUml&W~Q0Oq7%8OH)6s;u@OG7MU(Tp4PsK&chA>8~EKpsVw_s4rk=V+RKkxtVd zxOU>LG-9I=JofwJ=j%^!#SVw&e51navdH^bW`u=8X(~V30+qfyXp-dPe3m3^#sd@5 zR1-!9gRDb>gBt)AyZ%~ej05@LwqN|QKggmCGN`$Jlz71ntEnrv%a8kqN5EdN?u2jb zyMQs>#BnJBcVa#pJgG3k?w@sa^RaK>d?m)ByLPmHjKh=Q@N}d2L3^j&j@K$ovHvkl z0Dr0*A0O@#&)a=v+wGF`O%;SBnXEz@1-)kL7ic_}6@cl;l2&G^85k@-(Q*`fS4t`i zpakL!VhQ&?3R1%j4*j`ce(F2GgBpxcZMG=hh+&~YqF_(P`E^1?Vr#6)_E9&;NQ+v~ z#ojrpoqWoLv`VouX7zZ-@emhH8fIYjl}{T0%d+Wf6sTp$MreTY=}G&vA{q1>lR@}z zOxiiMYxH{&sKn}bMGae9?MF3xB_zlTt#Od=C0H#+!md=ku(3I>`ub{yk+|;b@m@Lx z8soe`BSayBlc0^KTEvTN3q?I>UyQ5KGzLui*^pU?%NR&WWF)2+eq_=;c+W22ifL#L zXL)~1BnBAL%GD!Ns3wFy${A73)8o6KiNMTr0)tT?O?zYHijUilM zE57;0A`(9jI8_wD;=~Ga<^|*q=Igj~HluHhxbcq1Em+#1lMv)eXT{X9D#Q|Gs7`6O zwqR0lLaJ$&L?pArv@5rcj%a_b%fcEK89f?;4^ zQ8h}1v1X}xnhQ1$JAt8AGIlGk7d9VQF@-jJt>|k^fxR<^i!l6i{qZ{j4LfO7maTir}4G}&D}S9 z`)iJ}Y@s6!+-|eu7HNq@_F7kC(|$EQ?NpDH-EUdog27^)*IRiLpAPq{y@gniB;a=` z?BKz)N|X#pTp>{mS&dSK053;R8bbqu=*Sq%Ma&zpAK*HqrC3MMfyorC31&AE2fDlw zNBHZOea_`8ZFSUzyKW8_A@gmN-D`61`6btJTeY_Vv&#%oGR-{8J zVKsXaf~fAuUm{_DGaTUaC&RZD-aJ%6?PBAiBis2L}1 zFikHeH)5a7Ch$=A6ptU^S;>g{G! zhrwt)J$SQw_*e8BZqPLC4)avd5difvb_)@CCU?ZLb}*i0=hJ*fL|}}x{h{VAFC(S< zsrEnl{zu>cW|Lw#Z1uj-{ZIA%ZwK#xHaEAoA3u5s_MZ|OtE-d+EGz3czWtooJhzaKtsKepF@=h5SbTX*-rzs7$X-_)XS zD(`wD{Q9MCdqq)S>bh4HRe9e_jlSiLFJ<}?cfO*SFL&uH3j1QWzIrA<``T9&6}b0} zzS$5Go5XC2-=Qr*>mt_gzNf2>*2NFzyPJ|(j(AT zs}Y?Z;k}@H`HGp7-ZV>R+zPNd6}@_Q^A^u0--uJ4k){sUa}5R!g{#Q{7^_+-bnT7r zqz`Kyl69l?4rKkm-hoh=9A&fT;;bzm$3l#UsB`q{?#ig=a-u4Px2}#g8UF7W-Ss7iR|MHT#m<{Xt zC7lHIZ&a}xQV)%`;S@b}W(u&>!318)~-CU{C4P?x3V1+Vm zG@=>IbGeuVp{XmCz@59e&>)Y^i=e-8I9*7$|Q&`|BlC8Rs$Vy-(l-lqhN!6 zxxSUiR!r*C{TByslHFIY-u{#vzdYREeRf1B5vsL{E#gQ~EwUfF->Rbx*$oJq5-1pY zVix{|m?=^~S5*5-JS1?l5pBiTqS9=f-PF)ebu;vFvhJTg*hajOo(+>Fd`kzi`t9_^c`AY5o+uuIf z+zu%KD#2okyFF{^#*l`_BLW&i?<-{{MGt|Gy7BMC9I>k28K$ z_}WnE%j4HVVghW)CI*%yH+3}eE-k+R01^iD>Df&`8z{SdF}D14JsP&k^cwy*pXJ)J zpP<5OHFcZJ?*TrMti?;XdIan<^sgr9$QYctOO8$GeCp3flakG85(6buh{wZpbk<$dSf$E%Lt^mF22bkls%u%**)3rX;I-xbcs-NN!7BM@ zpWEhEJk46={H#8W|FeGYkB$0=s|SBgPlo>>ti=sFciJ4CBwH(oIkPk0OxzibIM@{f6z{WDPEWGHrSv?x;+WUxtI zU5EoGhVdn54i(v#g3wTa->Mln`EcECuYb7SY@a>=uc0XBK(T=GAkN1M9z$^93QE+5 zu|#=d6W|%>M{M9C_i`E)%)=u%b8nh>u*}APga(ATIF%Cr=e_&o zcK|V~3-LE)NejRrllN~9_TE0*PhRi7JJFd=YYQ#ZRCRuBJUD0beAt(hmW849JkphnoRA$U5dt)xs5a6oVZr=hC<{Fb>Z&d|2wk( zG9oMdD)^u6PN&WIpPP63zpqsPcT4<_?QQYxP67BkTK`{W)vv1m+j`X5a@W83zdQS{ zJN@6C{_l6I|9d(tdLJP-_r4-D-w4f6269@!!hO5ATQXAzjgf*0oGe-C+A44QK41htI zk@7`$o>4<*r zHbKwPyAWJq0O&A;6FCLU(;3A|#?d4XEhz2X+oR)+ckhokp6$QdKi;o^I}N~fjMD3* z%t!N~s5UF+Wnw^r1aUgRoed&5;9qHWxTusB-TbjU-+PV*klM@v*ul#7B$~(~> zQ`4JMec|cWlz8A7Zbkk1R2my%E698cb*5Yn&UV;Kk$q_m-GXYp(KZ`!Muw&Cnn^%i z)Af6K?Cg=W{`{dEZMECJ6Ftq8nx?we=&1EA zI)PBaGN@2xgZ7_4-`_hv_;EiG%QomMYLb;71e6C(&m%(8z&~mXyZqOO&j6m(r4fgA zp*{lw*IUi+TE9Hy5%(ht@-7;wJ&}M$>YHo{zyLQ!Emqz@+#rVDE;6p(Q7gBSeU3Qm z;zHS*UCoSPN|>~Yf&=mGR)}#IV(NDK6>wh`?Jk5r9fq22m9t_}Z>+FUE)E>I+K$+C zM$AA2sBRyFNq-%ioPsL*nB5>El8mK{`x^~{3aji4S%iMAU?s~_+Rq3=oVDws1hKP? z!1VG=Fj^s}n>(HAyh0-{4bU^R4aQvw;}oS+h!{Ntc{vk;JcGg8dvvmU^w&3g$=<62 z^dUfoZET?9>`H8)yCzu;aRy?8GSObOE4*Yz6EBbBO%r5J?lt zZnTW?Zv^wVydH;>B`^_S*UP*Pm@fik)GNxxZ3z)hZXtNB5K%6N;k}-U(-RfOB-A*q zXvRRC8xuBMb{coGQ5hO7n`Y}RU zMJBcg58;h1p)O-ULbR#AEf#IffcLi&av>uyN!3rf7yUee7lTF``T44m7^xTH6AVuT z?}t4eohPX9R00_g00V&716T-yV6p9iUYg@xF&_&CEQA50Ju)ICs!xEeqDO|>Sf?4v zFoF#;${~uk=$>I!6V=f{;|7_EbnJkp|HeW*NuFvy<0y)Ck185&7D3}4gy zkY}G{Z}-)!0`~Yrh19>lTI`PWg)VbtaMI<@@qG0 zuiQuI-andA@i8IS62Wie)@1e-4sv2`rTsu}VGCs0MV1rp8x<%{LwR`tU`@mD?y5WT zpc{26tHZX6=HuT{c@EXf)~{HHi3rd#~MK~o6Dl#^a7XGy0pHKsS@!o{= zMUJLO4b_S4?~>r--WZ>#u{`x>!d&OT;fdSv0(Fo3xXCdMk$Qio7l==84&^JrF1ZOT z#toI*<1`x;m-e=ziU@6x&K6U1mMXLJorO8Q{aM!TN9WNK9zBTKtxZPXa(2Upg%>QH z&G4{(u<=$ai(HYt4s4fV@xUb$J&`m9&kcuDi~O0n-gLYnS&rRrk)F4F@r2-kAR@FI z(SJwn>kj_A87NFP=Jytt`G9tmr=xV?JWPO(&r{-o0`i+zsD&93ebQu4b+ z5Gt!B6bQ{~OsHNH`0glt8)S?}27shud&Mif*8ZEf;^4Sb$pLRYGAr>?yXh5Os@7uK zID3vnf=gcfYY4q8r6?f6A^n@+c~Hg^gcqZ%1g);Ce&U@~?EtmVz8et0iHTVB?}%!y zbifBKI}uznsOH+Ekum(5)|Km&cAtVb9+<8LKWeUmKcsMQ?px^qI@q(0y}^Vf1Z zpIsynNdU}bc`@QUD>_3UBWo6=NWfXm-m15?03%Ffu)zeC$DDmo03N3h9{c$eJPIgG zgP$K%lRD8u3M-L_bvBj_fmjdc|I8;)ST)(!_1p&wo%Tu+n)AMFI*f)Xjc zcXk$kt$|$Wy>wV2v-@Q{(Yjj?@WVKEIeG=JGQ|{&nTO0_w7Qp{T%w_^*WB_ymC`|& zw6@xye2v-NEm^A1!SriMycIXA_1kI-iW|W;$fL4tt)X94nQTx4Sd7t@8!F=}p;oLi zmPkxiTKi7lTf5G}HRFBYMMD+7cUI+hslS@{yVGnk^(~}0x=Y-QTYoR|F$C}J-k*Z| zO|Eb8NVrV^>pDryL!AdBF0$9}mkr3z3W)?4oXD17#DgSh5{i_y6lpdzv{B_!q!m~p z-B+#)PLEpyJ&B&NZDUJb_0Z{)Du~~YKaPv5v0>7u8rO1~6r-`p{r(O>jH`ZI4Q=>m zA{AZ$azKs0P09Oe9IAVdCp}%N?zuJ@b8O(WCyam-u=to+c$5-IpS!s-6GJi zt=o30S|rXgO_R4)HXUaE<^Jxo{X>7hIT5Q-Ql+t)WwG1{b3LC19Q#6*VufmqgU9+n zw0ayLsx{HK(Q+(Ik9yN!j%>8|V(R%6WeN7O>-9z1`cj8{vv>i)j3C|JlLekN{p}jo53bC!H>=>)_3?SUtOY`_1YCLSuoV^1_n$-|PW| z+6&PMNZS-_X$Z)4D-ME@eJuEU%=+DX(GdrQlrkwFwLmF@B%{P^DZ z_Ws?gx4X}bufWvw=QnSkzJ2!Brhz1gfju}tlWJTG)mD~z+ZxH*Ul(Q}Cu%|B z2%I(}BUqZg=cYmKO>ZWEPAEr54`V*==a+eZo({?W)fi8Mj(F-sKk77EO&S=>y`9#j zG5`i2Cz$yd2-n;xP7s71x_=;)1Btj+GH?-Tmc}ky-~lh6S{pCA0Rg^ zxVL++pCyO;N5{K|$IzazCfaFRnPngA?MBVJJ9Jj_+r8sgx`WuPrJ0~l7#rm1;Qt6V zfYGkZ0bLZLD#%fuAH3SHEX$<G1l&_8_m<^KyNYSu2%+jeB zEGkeOF4Pax594?zlo&)nt&!jcaA4I)Z>JPdRU~b|m75V#RgThCirzN$_+U&JDah!Q zFi6Em*dTF;wMQY;E~#|HN>AFS603#cwZgm3se|8QoG0q|FUIX@M)VBkGV=H)zd^_A zYX@u1XzhFW=Lh3SbzLWSEM{casm8VY<(g5KY@4J%yCC54oo2LYOf2jcI6M%vT9*P- z0k6G#TCeuVXaIKZL_eQ4BR$e?V??jE7xYTZ*P?Ru=ql)z0Fn&je7K4u3uomw1f99_ zt^fDH{}7Wd1g?K2{>SEaoBaPfTaO+++(!J5$9MQ2UpfBA)|2Sf!BYqUckttW3$1IhR(!%W5k`Z&+SU>&{jY*>lhVMBG~1u3g5pgT&@rfgyEII0Vt2vJ4h;tUH*x)jzO-7sFa>jUFM^M7kB`>?rAm4$1=sw%0(CE2G!~sRMS2% z!Xm1fr+Q3XZ?v?2oh%L;5k0Wyu54djP$|WC604RX#bgjWH=X9gn+T20$QevsA!+w8 z@ky*qJbHIpo#E+dqR!0m7Py*<62> zjn8KnJJJ0`2NwFnwD|(vU6oTYo#j<;&Dn*ej+J7LHw^S7Y*%V_zue?A< zsgPNFrG_HPScyD#+7qB%qgf?}yy_RQG@8*xaRplHrUV|RlB3?md<^Iec)I4D!dhX9 z1B`@N0x_sB^S)T;6hhe|45|HL_8u;uk=Ci*)i5oq*naZxnNg5Xk3}ZsWs8|>y3m5}$fC@;M1Goew zrOUXSPkokC5-Y;(l-TgQbE8D=w4>=kems0Is?7p_7s zBLT*yoKI*M5jp>|ewd}h^@9l#++Ws@gorUxSF}V64AdYhobqr)U}rtcqJwvsji{1x z!W@u$E_+e|oB&&n%fg z%zCZu%&vomW(i3Zmb(!QA+tjoKp+*<8i^EY+Gm(0lZt}rp+$e{{A>4la`0yVxGBFL zz1{mWgmK^BeGR@cS`|7>(ew%Bs22WPH_LB<>^&|34~LRGPhRXFKY!+y@ZYkC=g*S8 zS8w^y&f4J-hY%^!EL$XHQ>=Np4C{pWPR`2X7ixcbMvO zquRlyvVU|+Kz2Znf(snsL*4AUuRp({KQtW zAocp&s@0BW6id`Bn*KQ05Goc6G^)q9iS%$bK^3)w-e=Yy8ZSih2w{sDjfEYp5prUt za?~N#+$_8vRhe9Sjk;w;&Zml6EE;qSU-FW9i8~wtI}ycnm|R>S0%ZXThR?BPWN{;C zbFZ?oh+ign%IzebdSElMS{i6dH;QrIW8d=@+hUyTVr_%&v^)y#7Sbai3E4cMnOBnx z$r`tB_a8*N{|v|7U6`DB1-F6fDf<#k11{NB1p#X0mErn721JF=V?V}QWjcn4OB#Ij@kKp8sZC-+L zt6rx=@P?DxOIa_gLL%m1>#y>07KboV(L9@18SdhB?3(D=&S|5P5KM>7iY47;kX`cB zGzO<@4p5i>Hbbs<3Kx_Q+)2(79q+6{5H+-6JzA2B0IYri}zQ=hRYOOUYS28Zmvzu8~I^$$4Pi4qx*=8DRezDM0x9HJX zpUR*=hAn2Z{P1Jzo*fN!yY9yYGmudc4v zVbQP8r$dgv6$ds7siH&d!SkG!ck++2?)N{84rRn9Lck1FU^Q%p{EstrqmxT4>JswH zk_OS`X%%J!rCEX!q1vunRAw_l0O@=l)w=$l}-F1CE7MaxG zKZx9Y9LNz$Vp!Ul4> zD~{a~u%QuB`1mbcw7o+OAh}#PkW%RTsKoFfk9(GvlcLN~0=HS6F>ppX)~ZpR`cCXg zVhTREU6G@{gwz&V+Xk2BYGX#GnKZ8_FmHnLcGfpf{XtT^&4FK)EHjbRWemkhN342D zqThx3L|jW(07rdUn6#pm1$1LeaT;iyiUgIAwQ!vf-@&J=8l*7yH;)*>il=U6s3`LYXeXa6!Wh_MkG=hpasm$J`B&h7559 zF38f$G#}#KB-sDZvI zA1cF53Cn4;qBmF`s4=-5XyRER_A55Zj?&>(dLuQW&p~59pI~6)aBq-G+@4S`;8RUf zzUl<@fk-Rb2mExjyTU7XASXefu-UO86)Za};b%=dJFQkrP_#)O@*U6=KL3KZl@<-v z7)cY{;UK&!6wepKeN*2{AeI-!d`NI@XIV6x&P#bSU(9D@m&PEYqkZl#IoQ(&4_Ngu7+w42YI(I!ml!np+X# z&6qX^a{oYi7tw=64nTbQ6ie@Q-28YU?J{}=mp@c z7&bTfS}RcaX$gi%#KsRS?6{Y~2B-+6w1KXT%MT!9V=AtLNI)!c^U%4Qh_We&jqwc} zm*rUd`N;|)*3*Fk6Na#emqB`%%~H<`D_4(x9W>Mhs(kPdpi-D^eJNg&I2U&|AGg}# z|2j@~;&ff+cp{}CHJRW-G}-!xdzq_{!$c|rTC5gSZF4#sMwCo6nt;?ZEP7&eQ_x9> z^$5qF0f0I&ED)O%f-@TlT!K2CiXblvi)@~!K45X;3;wMwJ&I#87oW*UYz)>Xp2%-^ z64xhir!7jLBxY7ia}4#SdJ|mVPHJrFKxZ>Bt{kf@EU?gwNJ!>ect@avSPyrXNnk| z=|sab)rBx7HVJ4Juf=hrL0k-37c}>@0-s6CFcPaNaE)03+|)#gxNeFG4%DQS5g?s; zNHHX7lu$=$&H+1K3f{I(+1B^cQrr1`%!JTy%WC4x1dLdTYX;9YW((?S`H87#W572Y z=KsvFV|esx$RbQaI1g0iyN7aB=POrb*5Q0#R8PDviQuZ$ODFjZ^VMn2ucSgm>FMHz zyuJAoPK_ZPLd~SD!e~!CiwQ;6^1Z-eQyrELc(!yRwlgpABnJ2HJOtTnOdzNyZP)Oi zB1U>`TYIKET+%}03;TbvapY%ufvxWU*%9eBqxNQ}^YGD~|L5=9|MSVihwb*t{-2#E zPafVyiT^tMKflbXU)BF}^YKF&|6Q*C&f~4E$9MjpcmAJu{-3{J|IcA|nGM%LReO$6 z=ngjCM(A{bQo9+ymJG#9%MbJ{FU6sAxhtsS+gT`=P$66z1y}*v$=4%ru~5}yZJP|F znoSBJtOKeFX_BQnz6oOu=Q3~%8h}8LHo3)^+X9Z3>_7$d4|-tbx`jctqw=N{LN%Qi z#)7U;VNV)gFr8_aZq(G8Z6?JISFAdRw!X^8^J_AhCbEDv1*@!>Pq`zbqCbZhIQIVo#bLJ@`O5F>pzkN2eStM80SbLqU)9=%})P3J8;%2;ljhV>#? z4c(yENQE23vWRGvaZV~+B1BDLSBTrVLzpfRj2-`pZV_Q0h$T)FhMuRwUq+os$lF9` zs0W6{Fs6f^f3E)$4)JHY1aT2TS0MOS?E(Z}P1m1bVBRHuWQLKxj~43^FZHJ3)?>RD zL94V|k#;h_=1l%TF_rh;ny2mE%^ zcq{fTe!FeF-9J3!w@-|>FWV4B0eFC*Wa@zOJmSrS2L*FW-ndRMK-;R9ut_6dJUo1R zsNOTtmCrHrq<69hL+`ykK&Y9`7*Q?Rph+hTY< z@SQuv`nUo41#BgZrtX4pGb+Fk4}84G6eSb}CZkyLVpPD`1SB%51bw*$q0Nn*GL6_s z#qvte$hCbkEzYOusHF3Vx}@jQR2hJi0XsOK9W0X*Y!!(~Q)?i1YEG1F5LI8_?7?4K zR&}vqUEX2%UpJPAYqQ;AlV^xpTkx?HE-o4N&(;;WW5Z$-}Fc3h!oV%zW`I*l%o;y&j4R;O}T0#=AB^p zm;iSLLmBP_$y?yFD;l(S0fFT?);AR^_#{ar)$PV6VbG+T6&mEkJCqHw8*(%gWJ2{y zd=84VnCIO1(LF%iV&BxTJoa-M~bq zH!_y85@?(58+yGr8EwR?5I8{r=8Trw#P zj^KH~ifKVc1)bZ2U6D3NMnz7m7Dd5Ui8$6{-Hz-~Vb?hyYsUbcA?!oBpc&pww=2q! zQfJ^Ut+=YN-?53+kzRF(Z21bXko!U!!E3(n!7chKJZ?^CU{wHeuJ+2q#@Slc=~U#rD3&a zo4z=1EU9x`UI;Rqm|U@G3F$T^3l(*I7e^TsuZ74aXxGrFFd4+xx$xiSfEjc;3@W+klcw&h6TYY84y2ekq6LFSQcz8k^lqH z9jJ>iP5e0ZB6B*^VpAUTOWDHVjVz+l&QG?;J|6O(Al-9$$I}W?=O{&tN84*k*(oIy zH{_ve+f>X{$?|5~^sbp4b0Y%PYqbF0X~EgLm`d?cZr07n+u96+K=Ix?sM+0?xpyjb zc2dN%_i;IdmZ#^fM~*E?QJhu^Iz?mw!qS4Ls5~HbRuCBVd!@D>V%lJ9)s%ipdqK`~ zgkIFZU|Su^uQ<1w>Okore;s>g*Mql#vnz5>U8^NjRt|^P4wc2LwC-&^&FBb4oBiv2 z7J`qcEn6 zXGC?cN(SJ0ubL8&4;0F8lQ(bQ;QgRBPE{<)yXPJ&35!xRh3NSH&B0%c_}eLBMk-9O zYX6~y>=wsY@zSnJ>=>Bqi!mL!SXb8@O;l|tWctRUB1ye&Ro0C1e%&g)(Fkt5bu7}( zN3V27J*MxnXIPs@AF-*89aNc4C5&FxiqkJh-5{9Y# zKsMdx32mv5mtI^}A(fiq%2^2}kaqvp?=J?t>8ha?>)-T-Y?{$O2sL;EEh^k_;pRQ7 z++fuzQ4LxmjV-tllWaQxhUFim-JAOGhEmDR7xQ8X0VwHk0RW%_%)-~llFjVDgVT9{ z$)I*`{VXkshNbk0u)1h}vL|g%KsZ)LClC2BtsHjfzBJi8ewFM#gFD!c`}*1bEAe{M zef|0^q~5ByWc4Nt+nWr)w-6s&^n&99TY?CyoEPjvRtX9ZRaPNs$?BO^lCF&v_+PLt zF)kH#8S3=F_I2BO#I#U7ZKDd* ztkYEXSWKMqC?buq=xij-)X0R&p~(d0p7!0RVMEg7&M>J+Dln`)DRvb?u~mGDF0Q=n)HKO%|Lznes!fzN^tt;G7yV zC}DL6#^0u9*}D94iqP>d(;$)-|S&>>ir-fz`)%K;@sB~6{q9hx0Sk+{h&OldN)&RzhujnalGm5cv zspGz9*?Nk%*a}5BbXBL7be1e~FS3{f6Bdt@cHc6N1=(x>)+yhy_#%hiIJc{|)Am#Q z{rZ2H{$K34{8_#r%lyAOkEQ=d`|;N17WjWXY~RKI`^x>lR`=}M7Ny>KcKx3GzrMt( zU)ldleC^olzw`Lf_QN~>uRH&*JO8iWum9J4mJjpU4I8q8IrkLp!3wfM0OMc0ib+Gx zV3seK<^5%c`U{8l(~e#o&cjy)d|vC6KbVgHB+rKI8yjP)D^`&aQN_*9V|9%7 zF)co3qOyd1^N=t6p5M+Q*hQd5at~eT^p<-fE%hp-UHSyqf1nvpGp*53y8vpr35&OY>aTPC4zu& z2CX3^R0XC3^D~ATJ!L4=@}#5Q)%-|2CfYY>_+)V|-ukInN@92jYuX( rh*kme- zw1K&ennMu-76dH>bKIIUnes08#r|=$A@Qf$0XW~qQ3uR;1GfvGxG0tl(ybGkEppJ~ zyPfv-lZTHVwcCN4GHflvPEp$J_P39`P&Rg?vd~A%|Bm9}e6gz{r#oQFXuk<4>~xP1 z;?8>GLA(QTVrhU<;LbD*lC}{{`QTi@87&j_k594HEm3G-8Nn3awt= zirG4@%D86%H2+m%pqVvWs%o`PQbe8UP5}|JK+S#K#&B^VSQ1^I)l|S4Agu?zdvhMNd!^i#}}Gg-u}GFZA3qG zb;DPhMV0!?Or2yFs@Q+=w3%p#u$St|a*YTiHln%Y*v}IsINk;IE_yJqj2|!y##n{7 zpbN(-Pqna2bC7Y+Yb^&rEf;A?QfE0w#9qJ@yupOvF~$BL+ytnZtdy@(J$~Z!beRj; zS!A;-q6$#N9!JGczSVRDcFVrkH*^`~+Z&AjDqF(mTJh2JS6SqivydjNP3VCWhh*7Q zvqR~U{lquX0mCPIOId107_yF0cEv0k>qvcpL2!34zf~mB`NpVw9Hv~8U`gt$_=8&bd2YqjFCSNuIVeW$n(kdyO>=JHfYghv}|nqI;DgK8azP%jZpffb$2> zHr2H_k1>!oGx@~k$fK~mk^_y=eA`?bf^kP}hgSM-JIwBAda%2&dgN6rfFG8&@3I~`zYj)mH>VTs?b-RQSo%`7Ud`GAQ=z@z z?W0{-d*RGna80eEcy!#3G!aW;C&^imyd{8_fWs5u8z(y=<>^8Nt)E9^F({3td#b~1 zka@mdV5J`vNio_gI}6q0Bj+F+n;rMg;Z2gds+hPt$QZBu_eN)drQFXJf=!5alA9$^S`8ZSevEo*oww z1UeOyNlXYUVl%>mgDXL&rCa-o`EkrgzM%tDtPe|+e`50o0 zf|PC)(BP8-Bita^4McOA^)6H=APyizInLm!F`YvEQ5a@DsMIi^dJY!`vjG&0Sa@th zI2q2TFd#H&K=CzkNWci$>ClM!2X`K-*>GfRows-xCRs7T5jbhJT6ESKL6gqmUAW)+ z+U0&7VC@fKFJiJWcI^}Z zW^pd@q;w$$c&rT0C@ol=b=dp=DlapJw*v3?nwzy%GfhW(<)wUtgATf;FN+)i*ufBr zk$;M1TuT@eR+Ctqjg$9BaV?o?LT7WlgeNrXb8>`ym=^zp)iJA|IE6vrsu~;#f!e;I zcL_k-mfBwRu!;oL0;bDUZXV{=MydpDlHtWOUZDGlfqfNI0KkCT@T*Ls`p#ejAdAi^ zE=ssOwQI(R#dRP;@Gcw2_<@I~(IguUrJEptA|!_Bt(s1e2B8j3Tt+OG0~`b0aCe^G zD12U3a)g!9aXGT)185fgwkSNo7`6x14sE!~)1;SV5Ajwpm8^|N3zsDwqi4=U3RZEJ z4T~!qkAa)`N^z8gC(xN|CW1h*@ZCU|$QWJJZdel*8ago=Rj7s;025Asz`?Kt4NRZm zDR8{yCV*wi@N~?ju_T@ta=@cV&(^>L6VKS(SL|>faVAD$|AZ3owVDQ}K}-W;NW>;V ztWXjRK-5#nC#~@(V z=+k0$U@?#ugaAauib^g4BWvZU_l0=U6SDh_CX!beEZmwAoQl1pA31g|1Wk-oPDvl` z+Tkf@ht3IaMdUNOUe0d7H8KU-nGA(^cu@>72p!_8Fhh8KgDHT{?o&X(GH&3mF6JVi z0`V2S95-}pAPj<)jg)askTto}Tbl98qgQYDc3*s5`zvj&zl8Dn#a)dIOaf6 zG}dP{($nIql)OaF2eh7Fe}7o9-Ld1 z;uWGsy%yx4=3-ujfBIsS^k%^5*dU&iC_f5z;Bh=dSq`aNf>KME=z0vJ88>!(#8tU# z3Qtp(PFbSFETr=h!jY|9TJ&lYAmlza0OS+wK>E7*Xj?BPtJ zdQKsOKFI~zw0N7!-KaJ*36Y)0Fz+Mo)BTeCgjPT#r4nR}`&>vT0bJwY1O^ESd_Y*Z zsmr2&{-trkQwD@)WYZL9-G6>DsmdHaz`Ux*N3vvjE}%=@sKl3bzvhNL-$Qj^E z1uMWZNU9e61;tGW`wU@69Gf5M(uPTyo4aXjf41PF54HlWU6y*naa)xaoH!E-b_nz* z8Y2Exu5e;1;e*>WIbQx;S6QNA^c2t^;91dQFK$CVt!%fLUibIjwK`6JWkF&^3sYnoa~KTgbj=^wKIv9oNRy`9f+~bQ9Z7XF3=YQ!-wOYpiEF+s*P3}ekjGRV;nOQ z?Cb&~x7izXJ^rtBJWr=LpsN$Vo@ZxM@x3X)S?!sjvccyryO+I!Ng#i z*`f2dR?DJP!vZW4t+g!uE*Qmu!oQL^`@IP}=_u|BC7K>6-hL~4;ID9fePY*Il`s)f z_RL+3ia~41-euC^qr+;28ixe4Sii{Cij(IIyEL9tvj|MCXqz7l8#m%pNk27R1|dY? zXqFU%fsoU?ZGgyt+AlK%n{9%ENNj-9F7om4$poDF5-OaM+3V&xekpCo!z`*}^ejUG z_2Tt$!&oNzX2H0$5OTU~A4LX`dd*D;lrsk_+4Q^+CsMdNFr5CzD!=&uv$7b@$#GC} zbJ9bpu46BZw#a)3(a9btv!Ta4oZJ~?uu(^hU+3E=?YN;YCfpH%VXa(B!R;=d&Rsc2 z4T8JDLO6$;Nd|}u848ww+yF`qP}&WKMe3r)_sL38-o~}m!+h2Q9ks$Bry^^1u%}=) z@yQUP??s1rmme@(l; z16R(lXUWU)q_QxvJf>xLb6Xc7wZgs3YJ{-~%lK0|Ws6@LUb#0;3V2gnMSx+tcPxro zNH-|mljRiA!nQ;W=tk|GaIh`v!}rm{_RazWYuNBRoEld?-jH+dMqibKpK43&zbO-3 zmPaxFig#2IztomBH2Nlb)UFB)M$8FxR&%u5r)(P{N2}df+C$2(d&uR99-3@#Ne^Wv z>LLFUJ(NWRdWh5)XmuT$js0y@xiwM4p#a@%EKW3jjpWDy-Qs!_b6h0C<-u3UT#DL;h3S%}aVH%1jDb_<{++GtTs2o-9G zznXoBvX=Hqkmj;cVTm2@0&mw8Bkzih+1lFP0e;@;j0(8V0X`yUspLjru zhN+N+Z?bf{qyv~b;oFh>^-t|fs2HbPCd1=4uov=9?RM>JiU;W0ZCbiQVO8y&4Pfzr zqEWk%m^zU<7>hKerHFX==S8yj#=dqj%WxfEr!w$Fe80bQzrERy#nP=C>j<(o&4opL zC*-?Es)?>_!-A?xvodeY>hrY^^X;siewaVp+J3UujQGVFzi6B|4Fvze&gN+|pwhR~ z-51H==ApB>q))EU9~ddnX9wGVt4jSg*W(nB&f>`^uIi{kAk4Oza0T17%8NDC#XFV~ z!z`Ir?RwO4MlfbBp|VOHfti`#)f;YR&N!RaZMN(v24YwMenMOpPD+>nN@<5j`6X~# zx>;~o5<)EnBKnPwS8v>hmgSBdLz}PH+UsW(|m-0TJ`aYWiZ$pK@ELi*IQ2e^RU%By$ z{#8unCS6#Bm0@7zJsPF!WhN4#*%02NIX44*wT9(5(YMteN`|ul`_zn{qGYuPylWR9 zpQ3@s-$64L@5YR(SQ@cMMd^qdSD~d^OtSV3Qd3ARY#?g_b^)B03!khzH8Bk zdHd1!lW48_Y(GfFq~n%9hwj!wq=E&O4Knw>Uk9NmNGmZVjCZIBWLrSZ+gb0Nx=awt z5osxb(L0-jOqPfkn!}rfV7-_+F4N&WTa`K(5X`2Jd_2p}9ppDl6bOnajo{h>5~mia zulN&DG`vspa`NQVan;6jJN`vx3!|>~shpGbt({X_(v@}5_nq}E&%-rt#xyYyIDHCU ztu*X#X>DG)zy%iFQm2FD|Lls1`WdfW5-|olf>?INY^A(n^)3F}!S_$_eTvWDs=O2FM$$#&TUS{Wo z%6X9`2d})GFSE00cID2(!wlHr!+r;!pWySi_^hY(A`fxgmteT&@mt(0zTFzYzqnU? zyWPj5u8&v`A-e_g+pf#UaXIyBG81<$J$D^KoFed7Yl$h{K@Fn)j1~ zXZ~!A`^7l(sPt(HrY4d4s!J)81AlG~dYNGReRns#?)6I{REedc^UsolV?V#xBHSkM zJ3YC7Pm}+8=Vkq`RPcnX%$%CDNBlp%eSR#GKl|hU=VF?^$|j%Bx%mI+r=@T#vVO5tqrUAN?ZI*`=2s-topaSNTUDlh^+4^*ZfkeV?(R ztkKhw1xphT@!xIyH^YDX3nY%wFhu{a#U6hn{x@~BJm{jE)W9^f;*0ru8!Z|Z`} z>}1&8SHwIHt>JrLuw$5A@8`JDJ%exy%gVjm7QlN%3{ zH$K;RHlH~>?oHOuraw4E9PD%xpACjH87A-i{=F-PE+ZDNOlC8gB~JsJS}~o?&*x&vMfIoC^q;vi&iow~ zP>CtZ+!$+7=DXvCWzuuHuvDE!mOCwyLw_O;i;=%L#Y<7l<`~6Gy*j7!P??puVLSkG+KOUwuj_KPE^1(mfg$m#HlY!mEs$);A^Ia+n+)1PjQ#4(c)* zBiZ$-VRv}ukJQn{Kk|vrI86=%GF~=43l306pJ#AWtl}}{Wpd<82}f61-;;DM?Y*SD zO8y+mnTq49y^2#0#O|SEqo^`1g$?;3S)Qi+(1{u<0hNWej-|A;P$7Wb^wyF8=Ot6HP z0r7H}Px5hLOLp?5$~#SteYS8spMG>F>{!U8q3dwf?1CLMDgD?H@CMRKY9rGPg{?+?(mXuKhsnno+-0G;701&?zxyE|nb3<8uKOSzfkJ@gu4F>B+P;`8nURu}Q7*C?srW9J zZVo9#1@W>LCy*ikD1TGtQ&)}RWj!9Zne7ezUXo6;7%8+SGzx7yY8FtXPqU@iMy+wMKiaFV%1ftJ%Ik397`bM$UY7s2cNK{*IFme~HsT;JbB>E>n zh$1T)^-5vKq*n+MHQ*>cHJ}PP0SF~CyO;(VHpzJqzcK~rqVna*d$ zNYny=!8Z&gQ&Zt#B*j2#`i9J4>a-cmSAgmy27@NX8iESImlO>F^K)W6z+)JBfvLqI zB0{CG-?gx$gvv>voO-;G&3YR&OArupz~oVt6I+%*%K#a9&s;VGHtd1EUcs6H#z}Y; zVttDVjq36G`sjv=?2c~m8aQsWL@}yklpSpu2)TvcwxSmV$$%#_F@2&XS#Uwg^1*^k zRv|wCuQ9*k3rr}xtnSfzSP{O9_LwTbB!!W&I zh0n>^a5e*?J(mtK&`Yr*F4KGnJ4a1{u1korxgdkGM%x~Jp;yx4oqWr=(J)z>p^yU^ zj8a$ZBFvgNxMMaGbx!G*$yXG|FjR(!^2=U}9qI=RInJ%{?V{BDq{! zrj?v$GyGj= z-wn=*4M*S$@vSvLXa%w5;A<#}3XGfk}_Mg^nc1-{2o+aPW_2aQt_15sK-_2MO5`pdi72O4A)zki@$b88V7BEYA=- zV%}s@rp8^AE`SiXGR)vm4u+)+f|M*d)f~v7NNAa|{-<9=AL8*+0Vd@@dDp~gIt0}T zNli4Kdl+w*P^pX8^OmEn4YwH&22!Z2{JM1HitUtf2O z?>!g}5Cyub&LJTIL4Dd&5GhO?+fo2-kOp!h2S{8q#8a)uT_*#$w!zYn*dlA|Yt3ja zS@YtNRdNM|f|>w*y77h#ma%5R^8_~99l=f3qWh8N1$vc5U3w^(!qhA*^dPRBv|b$@dyYffK>Dfim8j2hy}ul z)B&<3b1{?)d#4FzDnz}6Do!|!jC;aM;pEg|LHYziKua=CXl7dc8_Wp3#t`RGEM1oD z&22878FCY1e@RxtsT5*KcAfQ%is`fb+fa-}l0Ln+q1P6z3VWL?5!nMupcjf0>nVn| zbYXVOSznwLjjltQQb8sK)(*lWQ#RFI!Y2OV2OO4W6u$=n2SA;oleh0sgK`S)Fp`l< z(`q+cxUjd}WBPh_Be!G&!H!q19-bB;@{&Mm9@#mwiI2q##n?GdP8-rDm_9O9K}B*K z7qkPlF<_M}R#Z9?_1TztGRuzJF-s)CS!_Xh^^e(2cbJaO`l;ep+(>(gI~{lVBto)u z&8!tHi?{axtQwW^K=7+BpeotveYR}O8Hlj=*SFfGbcCX^;gV&_t??QAHd zSqb4y({=8&lbA@w=uLxbPtinElSjjIeF;-vcvvg2u+$u$oVwo0=+6Rs0Va{ie9aJ? zrL2AQBW;tTVBop#QS-SIFNR&P#-Xm=Sa`K*TmGg8T_4B^G_o5Ps&V zVJEK21IJ#Xr(qFJg9UP$iyS?$#Ve%FVNo6Qk4ChGIJNyco$8>87x{xu01Kfyc%fMu z3lQ8I=wM@^3$Zn|i6^$ZD^l`qGemsFI$KJ6=!&Jc6U^Vl<8fp0fyB_vJI9P)f%g(l^}yl5@!zZ04@YAMd(55#uR_(YR2#M626;DwE~jlU(wrJ_1Gts zoEi3_-tinR>fSB7_5~-5+%=ekN1xcUhgN* z-yXi+JwCFq>EipM*=N!Hesurk&i&Ur_m86ce>I><;`@JHzdu?xzE^(?eK`(%xj(8b zPJap&=BKsZgi17uG65s)J+gu!;5mkUbeWxagSkQ!TV_W-UX<7%Cs;jGA=T9gMWFo= zQXmZmP9H+NtS#5>xWpn=JIp+ZR-kLo_Fo|M$JJ>r$l&IcJgHzPgv$JV-*+i@-!u*COahk&Sxc>YxvFp@nKUbER(AT+T@NN}L2pdl`o^VRLNp6o&AUlr&2Rija< z@J{=EStgg(kj>Fb6~9DTlzLOGYWW7PZp5E&(5k~!VR?nLmSoi|{yf1c22B=??JO6) z0{n$o3`Q#G6Bc;r8-Fr@AZU?ed8K#$mu-Bu|MdNf9aW+r`B7cAq8V*BnnGlJI&=`B zW#)r7&)-(&-Nd}$wLT7J{%QB{&B2@My07pgF}5jZO0z|D^DH>MbrMGO^Q;qG~zD55XHZycLwWY5lwN2W|S={EqRcKHDeD_M zlTwJUo!qbE%@ry#JFO02^>FQo+e7AWjdBM9Gmf)y(deIo2=O7*d)XfYDu$McN#wY7 zaR#coXj*gKaG*Hh6x8LS9wM|r#xs%ds9&6#;mYL6(QS>mKVhp8CIj#7kUoO5P3y~GBdLU(@GT* zDmz!fBh3p&b+f7ud{QTl`Tkq5X0RImZwy5v>bY~V_}%$`X#cNri_KU+%N67k{J*xh zx9|MFzE1zI&2P6Ki3z%m|JTDi|F7TE`u{Sker5l!&4-WMuK(Ag&f`1(uRH&*JO8iW ztN+&lz{@JP3-k#ZM@L&^=TZtz19cFa(PsV#70TWzwc2|!jE8{G5Or>lLEN2825rrU z!$`=X-lH%1j*erMC#x$OqWuY~K3_5o1O-^c2hQlezh}364P@GXxuAlTgdB zsz-5W{Pd01S<%0#Sq?g9a8og>a@WbqUOG{~F4FR1n4hVif&tKA$Tzt-!xj_yt0?so z21}Jc=hLB4%vE+KgSCTRwhk{L5^6C^R9w|=yfGV1#7-Imfk3a4zTj#X-L?_Q9qpx= z9C{ytN_at79xp_EmA0IwPaY7$uKr!O+hn(dCIE&c;aa=K0JUBoAHO@o$GTC2mkC4i z0+MSX_4zcUt%J9(;Ez7Rl6iPtt9R|Dia~7|zx6160Y<`JOVs-advAomy2>a_8~e)5 zC*-N444&lp<(YH<1eFJHp6DXSC~UNEl{Y2a>LYigU~P;&TMTr}^(-H>`V<@KCp~p8=ygE&t_qEuo|W*|p?J@DUnbA4 z0nE8>7w24PiL|5iV|dk?~ASf8C(a~<9F|mfib?=$3M^ZU+o|7o66Q{1{-m5J*v9U;b>^S9R`}O($22# z_7HK}?q->Rbv8zJj>Rq`R{d=K_-2B)Kn9E05H0u)t|F!YS^xg{Io|0wt`>ChLt%MM zBO zCdub;v8ydmo8*1tLYq2u{oWE=Q1xodEf6KaF)WY)Wf~=+?qO?Ovkp%hup6ky2ZUyj1xa>p zjrtGkX2RA*cHPg3@TC+OeF&SvyY zRHumcr=BfqAeT83YI8RPqPoA)8&~1d6x(PdxkO_NVd;uts8^4TZXtEq*(yyREi8BW zSS*?c+n)z%cRVPl1i^P^I3#ugZkAkRYApuDB_$v(-sOl+2s_eMqA@v~^S*=XKXv4w^!^Mcg~qKb9xZXPl6 z64o*L`Q*5RMmHs{}VQ9nHEA~`6(TX zRyn1T%k=EeYOgYebIF!Xdu!EZ%_4_6pLT+Gv}f+fwPJsLP5a}HT&V8X>p#OLxTA>E zNpMgeOWG8P6rDP$SfN6pwCK?hBb3wPe2H-IK9#TMl+z%pU5B$t`@py67Qff&YB9}F0FF0 z6j1F{6aG~kOw~e=pgN02$Sv{SJe*k4_3byyI|gga;2iB3KYhddec1obve5*B*Z%7E zzYp7Y@gKg{_z#=y%_rYJyp8?uqdWWG-;@3Cms$0z+W&5EZ9Vese;+=&v;V!b|Gl&S z{r$#&D4|+WA2SnaABR@A42)2!uL#?yFf98}$WyeAouj+vP!L{eqELgUpJG$T{7{d- z2cwf8e*QUPnEL3KU%ubKcb38FtR-Qw>zuF~|K%@#0duNX5K#buW&}N%{8 z;zo5>eo`$U+hud8r-FQGxLf3Bzdp^th7;yn^u0HqPP6fBc++aNqWML$2B73qh~5lX z;YWmWE@?*<%vd@1?xCd+wh!OXtgs;bJtz)-P*U;tvwSwpe)#!*bT$MdSo|5?|3&&b z;&1Ua6GMAnOeQ6lWo?*+eGi8oqvZMU2ScnEWstxBybq?EVAev9r6|`D|5aOLTIahc zdYA)|v2hb8Bm7IGIPuvnk=kEd^zQo&-Qw>zbZ3}?jnOTJeLcER5Q2l1QLM&2hJtHW zZ{*1CVt!b&*3desy}m=Azb_{#jN8pnP#KgGFB}F){evD2RW?*6xGNa&q8&WOjBptR z0qpk+I`hqEVsYI>Q?x(pk)teGN!OUs;Valz06-q`-uifLg>gpUJcdTZWeX9fA>M=_ z25e3Vn#9^{nvTmM-XLYyX-|+onE}%xX+lF};r)VkrtCU0>(<=KC$*CaA>hIDBr7mv z!62HnWD-OvZULwJA==zJRinNlp&65S%>96cOpFOU9TaaRD`MFb*w1dF!F=3P(78sB zY8gb6y=h|4aHVrQ8Y&C)2R~e=Fd%|yo{4RPHfIXJQkFGcKBO*aM$~L$ybnS@6%$OP za0&6X&!mR~ki4-q)L282_WKk_K_1@OY6o1=Z)#`*z|0lkjm7?27dg7(e+^aYvBAA? z9Dl}AJAlF<#ur)WWLjM2ed@pNi5!V0=sObq>+fYTPS9Qxf5N;%IQ#6MpmGs?w;8cU zpynVXxUD3~U=AK8Ng_jpfb^3n8dk0ly(2WnJb1gBH-r<)#PM;MYG7NnpIWe~GN_Zi zrD?Q#xz`xWMu%?T;2oSPkx?fkoya|m_I6`ds02w5CCPzm$7?J#gm-Erqo!{BJ^lP4 zZt}ib1?UQ$g7uJGUF2fMgxKgoxmE(P=7Ui7Fo0$#AtTZQXD3j7-tj%HmNFzW)Be4T zO@lTTI1)Lxhoruz+iho*8a}csWPJ|mG6sL70$u1lAbX|Ple3i2nW9bu-3)bgnLb}p zT~|_BUDel&40OL3mU`S+h=ioD7!q!7LT~V=F1Mjj9)c(;89@`?FOethd>CV>52Gn| zHH}4JOp2Zv=EjG{5fuJ{6VXXYtq*lDq7|lsG${WdWn{=SokfZ4WlN3A)X zPOjfMi48`mQyVXx>QhwnilMK!m)CCXg1gbf(``Cx#|M(s~6`A}c5(xH+4gheuipRc@E z#=Q)@y-I_>%o6pMxl1FQrv?66Q|sD8Ap>$6#Ql=#eH$J52^9GK#B8eKYv#*F=uUE& zeNt|`-9g^HC#LNu{8gthwzKK^!P#1FMY)g@TLXB~KGo+;YETe`j+zC=mM(mI@V=Fk zdsnT_+?vE^Iy4z1aiYf9wwCN&%*P)UkSNXp9IRUS-Vc*vxKv<@W|ZvH+3G0_akVc7 zHEt1ZAx7P3-&X zevbrqtQv`IRaK0HU$WAZ5UZeOc1Y*b}%O-Lc7 zBRi9_Dyj5uAj@T=W@j4!lV;oaLSFN`!)#X7a5*5Hx-YY26&o^HB|BL(Ts~wK=3RC} ze*&zC&mdOb6EN}8nZSJ74!(t%7}nD!mKB{xXmEY*F<4-=<5Q-J9jDHo12);=BO4JY zAJ))^#;(n?YcTQXooY0aXaC5#$Ujb59biQ0>2KY%Yot&C7hm|h6oacgN2e#pirg?>3Yj`G)(X8 zQcKU%EZ))&m4Ld75k2;?M0t3o6zub-|nQiG94O?W0L%Y zHWou8zZ8Z{4Z2D4?ss&@qefJ8dNKE{E><*4lMuj}+^j@E6j^Nn1L08e-N=Z+_M0#f zs$YfO#e19keE=`!LBTgkPn;p;Da05t>mxYSb(Zb8_W^eRiZ?3$eeJxu1 zjRs;t`wf#+Vh%;a1VbOsrp0h2f>T#G3usQI0gkP%e17Zg%N)P^C6b2S+S^dag84Q! z0zk&k`nmY}NkhEaxPI|*Xxd#zkGML-dhZyWJCdotp5C~#X>9ZILU>tY_0_T4TKz>d zSO55|(NAV0a5|;{o=zVwM-BVVLN#6-sIJ9V z$7>i1W392Y`ZDvH4Ny4P^o9{jBRS5lmr|RH7^P#m+0n38nK7A7mr*mKkMFVOS;$E< z1X4r|bVPSla7`{%eSjmC_MqQ@;0BsBt*bW5L+@S+n+{PrFVf2lFd1d=aEuj(7;iEK zs9DkS0J&9a6vY*P80)%CeTfu80{69`zKXvWuYZW8MkN57BL`-hwaWQfeHzzKKFp$1 z1WSt}jWDw~&xi7x4`cC9gFxgRU>Y+J+p>*$&D{>C4KqL$(~qIOb_24+aeN3m4l=1R zta#8!3~#0MZ6rr;QG2Jl&{!6sl_K|suRC0caJ74Um(cl|8$M7>-G;5>kszmT#GQtQG?qqF3DFFnvvixYv$^iUHX@ z+LQ1Z);U|&o@M=^j11OoH^HFI9dtRl7!B(OQ4%NDV^XN=Vex2#cK~gfFY^J~%muA+ zU_unW4)hL><8`ZzivY!-V5d5x{qXgNuG92YtHpt7xO8|8;WpPhppm=;=Wxq5>dRNyCNJm2K^(JGSUD?wVJ)qnH3VL9U1t#LSDKc-9i!&~~ z%<(&s>2@gDX^0-_lgD*TWX(~Uk7Y13pW#*a*;r~KkC3rJ9d_-_+V`2gU|A4Xz6oNC5sId$gq07Q74On@4U>9FrR5gv+_ls7ti5P~^>eE`I@<@mMeoJOxmZrw!SB(@GHb zP?DESp2A8D4Qh{oE(@eN0Jji#n8YjF2Y#A{@v`oX#ef=U?gZBnpI6lUV|sj_&oagKHGd7 z1|-uJ!#nftKo$EoABKU9I-BD0kvl3ki$)-_rnCbxZyBine0(EA2pYe$Dci#$IMRiT zF$xgh}vhcb`tQscf)a$zGR?JQm{ zP_Chc`sEiyiHDAo8XZ|$5T(i=Dri9~WQkclq-GpZE_apI?~f+48?)WZ8XY1ILxHEj z)cmd@4I9&p%GRLdZO-!jxM{wuEtu{5=4^k~OfQ~gb}_oYc!qB|yVWzha8|2klpV6t z^QaDdTr{>3Y^}a`o4_^;@j3Ct4~WqNf&f)uYJi2D6MV6GOjHT!LG{d-YE#u7kx~&j zA;?QHh8t)^jZe{fHZPaRA-s?f;bCqT`UN-~?? z_-WYAv@(&lDo!CnYSvH4E{uUr-ELuI5;^+Tth?Dw*)_E8t2cU|O752sqPPUNBk_pY zmhJ0xO(IUqeCsmV6uI>|8CGl|712w_h${r1)-$k+L}zHD)n?0-%(0wnRo?&Hf`{!3 zb|4m+eFBiJK?WwOia&d{_OEBtG$4>IW7ejuRMAG1OV}nARt{g6!(&N-a>#oNo_fUy zu5MiU#qF|8wzHZjUN)pd*{pagI(`rH}s|6t-7Ia6V=*S)(a-1-*ghCVfANDqGd5e zEl@^5ONIfK&=;CXh?4uorZGf{0UjARzWH1*z)$do05}G*Xz>7>B=8b{lXjTNy$BEX zqMR{=TY@9}6vBg*;t)QxPkdN-B2W3YKEU5lL#&3{`GqEkT%F|`{{ z&L%fFZGeJOdr=hU!)&dDWMXbqGQ1r<$B!7|5yi}NArC_Y$Z~^-T(G-hSZ+|(jYhKu zHi2Xg*zcnz6>nuk8tip35nEgS&r~5L%~HTD>R+bg8H^XvT}}3DT#VOItc4MTPI8D^ zC<+S|-b(=GHAEs&U{eM>r0A z!7#=sYNNT(Yq5$X?!J*E9HMkVKr>}s!DdTVZ&>e~!_7DxS&{?~Mo9u!r%7U*jqI?- zW`oSA!NK^nMo+>~WbL5G{x~iSe(V$=10X{zZ3EoG8vRAVkaF;^+vt^5upRDO{6g)8`Ypy+hyJr;`nsS7NAoiYA_=I9_y|)c z)(#xhVfi4O)1-iKiojJy$W_FcVSze$wGqx zy*)Z^LJ3${c~Q)Vf}aWEol0OcYMF%D3Pd1gAU!O=?la}JQ4PWoj5%~MGztDftBg>_ z@aFw-Y#BdU_KC(yY(|W8-;5Bq5A0RSpf`=pMvPx>L?J8=Zv*kkF&L#_7i9uGO{{h= zTo$DwaQ8qoE&80!X9cHmqf}7>-4|d>`iCHp6UTIIo`dG)ZM{BbW4L6VCqxSMQG-l% z#HW#%xbU@+37D_4mS)`9hFjxFJSzHgAr!@@p~~?ayjrRL1x=w!7Brie#B!0i0+ULVW$he} z6y>A=ho54hQ zzbO-xdpb#{?%y$58MqgCc@E7PX9$}aV_5n4&sjEE-yPV%6gb=|b;tz#G&lllZPQ3FJ8AWIvHc5~=eFL7FaM@Ny7)^oT|h zK6F>DG%BP-*dl85ZO7M%V`pTm>C>Gyj%F zCQdM@N6cV)7B z_#%0>|9toTt7FXrSsu~xk4=8T0K{E0D>}sH6QVq4ivt;|G}bNa+ho};ASuFkSuBMs zlnb8mmHSP*7&tBoCMCX1NxycM8G;#iZBK)*!p29#i-!`=DYLBbJF-THX4{ySDpInd zR0%Sb=8$SI$w}JB=wgYO>ohEUnGcATBMARyMQ{|dzKN0`S z0*j|SS??Iy7|ma3#9bDH*2RJvcOuIbIbfrSoiSE}e>CW!YNHIuZo5&RaOKk}>X1nZ z(t2G8d1=V;$ZSwumXt2iGM&xj9vmlJMI2Iz$&aEBTs}m&q{q5v3=z|ZSV1s3;#DXL z@=e%$z^dIqd~r%VkyW1hh65qsTK`m*h#f%IQvt>%KU-%dei`IU&bc13kY@oFL`8%c zP!qQ`g|+U^A_ur;uUl+fwZji5Anwba?G@|E64e47l=4Oa+D}3t2erLmZ6*IgBt}qb zhK8M!CVj!K0)ym8eLOl)^9Bu@Gf)NcOCLB|ZT&X?SSS{tqeY7xGz^xo*#=I4g+A~E zRpLC)it*Y^?CNkU1kz*qPF<VY1*uS zKRJQpa8rz!tN`D&eFqxZ^F9OKnKwMunBs4r?=EOO~ zkZtivMqD(Q;Uvvp)J9K0ThNOeQ3|i*SmWWHAdfVi+-mNQpFVh z3$zk8x}TYoD^b6oo#);ZG{VOMlLNuoS{#GxY8hDml5+Q?o4MB6qish1aZh}*ur22#BFgA4GM*x>7xH&G`}p{ENry!>2D#k`I@~| zUZkCdwINLqtzk^GwS&!&fl($Fk_itFCb-D1scaLEVw#c9J&(|QI&7kOP&x*T9F%h0 zLkRfkq$(q;g?WY7>CA!Ry@V}qMDGt@xkt3%Eo{gbT!B;7<2~BQRL$fOF4c+%5q6ek zWVIzqi>t$UZ?!uprOA%cX>zM=xPjh_4zp2l8H6VjqZHDQh{MnWAQ$mJIzx$cjX)#h zMaZjys3dMGlLQFAV<54!JTjl=n53~Z$h!_4&j8Z)of ztXQs@h_j5d8JKxBdG{A0#&8ZTlKvQH*|4~>wS{iw7?@M5YVewXs)O5yGWxHhw{Ixy zTBa3RP>6XeCN&1?CWOQYyeRv7dYP8JX+D{S_jta~QI}-u*1xtejtDfl5kSv|lZ6)? zi82L1Qk{!2ea+Y8Yd+n2_p;CMedcYS?lyEJ(k<%9+=80)(y3op1z{5-@Qr5)g{ zDu9)g=Yp4NaGR;MuTUz7E~{5H#CCYbDMWpUI$WR7%rG<=RH}1L5qN+3lYcCR-_1rB26d+ zRs8%*1CJi0cy$jS*qmfS=slFFh3`c76HQ9KYEoz1QDdN7caf}gibf#XhFztuiAKV& zPh|=^PZ_ICC2XRI!13(DT1z=ivf(OkK=~Q;fU*kV_&61XEF5V;0*i-Oke72pEbI=l*Pjf3&rWMT0nk>Rq7pMnb||DlGqwTN$YlN4KA?uJrOb*6n0&n9RKiP zys`29A5Qk3?H=!*d>F4q4+5ba*Te@pu*s4^Yqkb?PftY=-)~THKdjk1#IWdrcqzKn znj*kkeU0M?uQ|g->=XW+>_E0tQNHR0Rs}7?@9^0U86!#F4-i8)VzWS)HT3Hh3@e=1 zIFwZe1`2*ed+)e{1hGXe2gUs3IX;V~CPJ7)LhB6Rc zu|5&Ax*V%%Ev}-a3Ni@(r=t%;Pa3}o-6$^Rh(V>bd?2{A*<t zr|~t1Xy8QIjgbn3crH%S8!If7MeF3(A|Aab?R>%d<*4h@8{w7&4~>w(ll`U4eTyT2 z5yjRG3SAw_RyXMQ`;N(nkz(0P>4GYyVm9z6V^ zsnfX7eWf*9O3Svaug|swWx-d|>2N49F7&C?Dx_O&m`rw*R`S@>W?G+}__9;F#w~A3 z4BLpm>uo2xMs}7+2yFP?8lb<5@J_3;b6DD^?{1Lr+rY274oruRGD2U-netg;ji1lIR#5R)`QlivH1ar@S)rminl*o@FS2*lnZbk3;QZ%f z>7+m!H1rA`4{r>0r_2jt1o$~Aw}M)gsk{s^)n&)1ozP(wjJ@n^q`Bp9p=*?yuRb@G zdSOkZ1OWEa2Xv7O4DO{A48?}9aLLGrhE339lp!i22P-_wdMWLp=`0#$Vikwbbl*H% zfBSs>==gAd_ccilm|e&Ms6qw5!H{gHysFd%z2S9Lp@ZPTQR!OhDZy{93e_hmJ-#+@ zyS0ejnDF@0AJvP+s(Hhg&^#8QCAt=KtZFBC1!*UEPaJ;GUNJTbq=Z4y5&ED-Y$G85@tQ%n(5OG+yf%rm{*Gd znlbMfm$4eV?C>?DT1bG9D$a}X{W6A_G@Qr0neH7hxnj zC<4Yx{3SqIXhtRE@T%;v`fhRiHfL<_7HD5I4pCY~8v_Q!CLzXFoady{ikN9IVb==4 z0vy$)wL@dcY&TZ&=rUW?bSkuC7%9SEB8kvp6@?2|II_*{JpQDvMg1uq7U!Owrs3p^ za))84HSQ-E?#Oo{>ppC^Ll@)vw2RyebCXXQ3ukB3;;OW+SH%%H1?Ly24wF!(m&dPP zDSIa>(kLx0wWGtC#_0nK8BB!&Z7X`KeZq0?MgCxVRQBejV1ZmR+=P|Z#S|KRnG;pz zbhQF2lU5`&qVhH$drj4=%UkJMP0?YO@}o_NRXmgDrwMa4V5YvqZN>(umlt#A^d z17yHJ6#RwoqgrV4U{w;py0J{3Wq@Ct(}I(jJTO&ou1)y(3QCmT1um~%IyN8~3S;i+ zqMCBxy&If51jlckx0+E(mZ_6zCU`HJNQ#M^q6eN4)lTfODda?>IcNm46{j7dC}K&w zJvh*}HjGh7g1#%x zH~7z5h-L?T0x}n0UlNEpy(*AJItMRS5H%4v%H9GF24st$Yp#xVFjDjAOy9z33tGel z#*F4uy5z*!IC*~*yY|!EBW*IhR$4u)YM3eo&~OZxuB$D?dU`i4Wb*yg+f}p~J9V$E_0zYB}BhN!(qDD9UPxoeMB)#PTn*fILc4cb|x3DC#GgKw_YBPpcyBu(lHpH{W0po=(w7!OOQ2 zImOhyR_uR1V={3qXl<;p3tC#G3;d z3v{$9w*>BC`p=tn(2ZN0KDI^2G(wOBvsiFpMtl>ki2yVZgh^z}Ht_T%@fDhVtButh z!3t+nBZXxXZ_aX+kEW5vanxK)Bh_1+HEggQc+#zf^L2(zfUaR$Ptt|i+wjHR<3b@W^rl5wu9u+Om9SdqNyNswu#&|h zUrV4tYdrPV3Xy>U&~pba3T0aI?trV?v4b}NE?a;nR&#EiN(C}9yDH>W#0Ydh*p_LA zE~U7JL7Q66&lu^_z9^m)pr6P|@Q0p+MV4&by2?N16Np-Zf(!iG*i++?aH|q7NOhyf3n3oO*lHG|)9DOn!HcZ-k-ivCOZJNSF`L4D ze@XidvIvW!iON_MmY*ci-YuMe!NT#@fr2|xgyey7RICLbR%1)7L3b8)>t zQ*s$}_=oQ5bpbP$l+-V>Qjb4Yq}pKOCEC~1A&iSLsR_T&$CLSN;|H-8N&Q3a4@^FN zn=){MO&gqp8N*i+ZR4OsSfX?hBykS#n_wX!s&N&WLDkFYza{v5aY8l*~ zn*=xwuB&>DlB}N5$O~J4fD4bNPTKxyQ^*IEa4QZs-d`w>Am1}YA2<)9L$LW=Wi+!n~^xef=hfa^qD(%y=FL<+Gw$jz%D_!gW#D!gV|#g`37l}>>< z|9IK`Ub+AMAeHY*WiEOzsT%NEAv)W%W?Pg{|0N0`^J+l@0AZTZ)M`Jrn=2Juw`5&d5;v<9LtB)}R}8|Pmx!P3kXX$$G{qHC z-bU^=NDaL-)hdW9K|p+~G0ufIksct3Yi4C7w(Z8$yM=jNj0kywM_(N=Fr=XwfbE>1 z@VYdhL-UU6o5k?y!W_9%W*3d{ZE<-b?bYO+@u>f>4zmG{7>$K@w{W2%=)n0>g*)Wl z4O|r7aE6g&eur4Wc;CncHDS7t>-4$bA~6Rh75La<)QzBU=nQ{{l9p zwj43e`xPfI*{Co)TZMx^8b%6sMqPC*e`JAOjf+m9TGL^@dbli*{i`YLQqy|%m{zWT zZn2uC%6bg7tg_}4C$69e!U~tS6^m3AU2$jITp@J1Xk`MjRfQULiRnZ_dyYC#D5|Mv zp1y_0-L!hQw-ghIWsu=U-Hj=b3H@>|NDe{N$3vI}PiUCDPbOLjBAK5=?S2S(Zn3?% zgRl{_)>MElj7N_m+5OT7afugBOl%5hjO{{@gLP|g8YZ}VcAfR+%KKcKuPxyn`;@J; zG|nEB8GD_pLIX(46=3Vt$P*Qoo2E^ThkL&YV&lUAnyPX9V@z9;4hi}m&)&|to7kx6 zxKkK7U*f1>9>+rWNu(3-)uts&dG2zqrI(av=Ip2dZfQEI4WUD(uVx);H_#m$ zqJp3somj(u$w8qW030X?Pdq!8!D2&x_Qx?`+u`OuJ+<2)J%>-?`{j;eOU>v~1u3}n zY~sw=1E)5Y3Inr%7evSb--HWnMkl9@9q+1F;u($2vRYvWeccPbp#)UNbwUu_!4c%E zleft6j>sPi)m+|o7Jrz27_)&lmWksGFY$vxM_~5mkWSFB^!q4;{gX&0qIQ4Su5JA4 zP+1RGDvLpd7KpIHY$$-Z+NT7s?7C5>-EOzq(Kj}WFcTRoRu^B7@Ao0n`Tf@BUjZ%Hx~$kjv6Z)-orhY6ied z5~4ZPvsQQyaDlC;MJ%xR01jrQ>g4V+u%!pvr)i(T^L5059SHH;v}>p|yj51IV1v3s zTwga^MJ>UFXEyx(xR=f$e4rL~@0TCOcp9%-0}I4V%ZE�=oVwll3(w8Dxtdhgn|jtOeLyP$wp#h`>v z$+Ey&@Vzr9#7}AX=!ehX*r3L#dnd9odo8Qgl(vGE2z9Fhsg95*2|Msim2$!?PsixO zWPnGA*p{XckSd;tJAd8GZ5RxE+WLeY24)gLVj;`FILpMSDcbWGmt z9VJKwWM1hOT6$4??4W3N*fd`Fix+rEVN)Xp4uE{6pt}oi$-kHAvwF483eEwiogBUO zF|Xju$|_f`4da^YIoya)h|VTL3Q!7B+S)I4ftk_jvC{?1DU`sjP=e-d0(R|{@0R~(xJoP zf%l7oF4vuB5w~};ZCh01oFzEkKc)t@GZh0e`eU@_7me! zxjfu4S%skeg5b_`u-DM`;~Xj%Ei5poBUAz-UKdMVtahy;!IUiLLMR6zDgOi#DVnDB zGO!ttSA}HNFe6N$;*w3av1Cz4G3`@OZi-8dEuQ5B9~8+BNE9Ek^Oh7uttHb1YM-?E zt4YhG0PEaOdl%GZq!qB^NOKJPF0qhdET(;FnQv^N5~^p~P5ka6wyBYGsk~2=9uY=R-+; zgy;Ji6wSBK_0~6yxQXe#I-1eX6dGekFuluk3VuWJFE-)V*w{yU>|f?n3Nl)N1)91$ z5}RS7*n-6Wr_q-*NrOhVF#a|qA!~_FG!9$sgdX0or44JmB8A zH_SHbC++obPanXb?9!{?g2(5-vR4Ih8^bQ51ec7`08lW@Jk$js7z|-COf}ks6$bPw z$D=2)F<1vwu9BDK1V--<52$4vu+7N0F-R|SL3SV!r{=nBrhToPgG(jzLQ#JpMB_$PZmK$vn?AU?r9@<>H?5r-K7ON=xGKz}LdaO? zu?LD}^&2C4H-c%^eU(R(K1U32Lxks2k#iaWl`0fd=SBAcR^8{7M62}!EZ0ve$c{E4 zM}d)8EG8MZzWHCVUfTqBH>sKxhc~29jT{7gwnZv^Zu8&5g;hrqS;B?|> zg;DHPq*!b?Q^i?!kqhpH-0v(M!r^5a)hV@LZg4$kvD^4B>xWr7TtAp7aG-_m@SoD< zV7EP?q{8jHY8=9We0)JbUnse<+s!1a%P%0l!Hcr>8g~kCMh^p4*yZES2F4`P41_;4 zqj96Lf-R#OQ#fmac&l2}s1Eq8>VV}DurEQ&9{9FX(dyBnOI#Vit{DxmQJl%Nh^d67 z#%O^ioV?$7TW_d73~*CHO1&X62~NA1mMW1Z zu%G4p40(KA6xod4AG~(2?kg$7xd<2eD?%J=D2ghW^U{c2EUCG2UlIG)9-;uiaobh^ zvI(qZnQnPBT1UrOT^3Fn7sYmUveTL`%vjdy(jewEB-Tl+RCmXe)%spCQdX~!qYhb1 zEH8TlyBwQyD4!vzgSO7AsLx);wA^ipWC1C+kk?Gr_C$`AU1yInb?U(zBurKXZ|Dk> zM79={EV4H>W(%unollGTq^yTio=vm#W7skUYY$;N$_65-%PDf&hiZB&auNv=u#_`M zx6Tq{r17zVfAay%(l!-iavd{@llsxJK(D|)!oq_87v09$)8 zJkeKh;&K;HOBu6!X6%93dYDDsUFa(jb)#-vUYMQ&cu|3hGb|_{!~LBF9O#A03TVbG zLpZ~Bjx}Z9IQLizBU^!qnkCrDjMB*R(zDB>Gd)o?{4v>RIh%4Ljp}uAG4}I*UzzX* z%)ZR7Ragp}hQhugsM7vW-B3|;l=)=c>-c@$$p#xvF7>vO!z5|KT9HBP7$cGb`IFd) zh^W#wXAKx6m3Kp{fCu z=@>E{ZG`M_mC^|sM)Jk}vFnWnns&YbmOyF0rkPBO%bbxIT+MkDgU_ifS)H!|3+FSg zN{V5hJzL47%IY&*l0c5W?FySx$<>0M}!ioQh9$8qwkkU41PC z$mi0HeS58HF?-=up$W-TZB~-fZd4++rZ&?9-tFDONGEQ*@I?JA$|nA>FG6m87uovEJxiUQowqR*P<{0Xrp$4 z09w&e(PWHWDhP0Px&D&uI{`2(C0}NU`$OdoZ<-1N7<6ZhriEq5smMnFBe*fT!H3pl zeE!GF?)CL`>(iFIl>$52X{%&O885@&TPb@8ijfT&7|~{rs`1>ZeG(?V8`i{+kDZn?HF$AT)wdv| zwueL?n#iMr|FdtO6_EKm9#_heW@mBK=6%8~6LtAxJfHQ(#Z^5V)3D+{l^Oo&XTw=q zM|`xcM-RX0v>VG}NWYbf8!NaVWqIJ=*j#{YzPKD zk3@?TP^>mp%ofmgh_|F|Jr@I8Vgi40g7!sb-?>$sY?aNAGhprpJB!fYIZhfPt8mjq z2d75gJT0bS=#L;h42WVBIfkq29WRcr0!5KxsB>)=uz1urXuJkWlWPhIN~#T{{-sVp zoG<=QK2cM_!?PQS1B2x?%rmt+KRMYgydZFab;J zaiAbq1T_~8`qN8NHKTpbsc1Rnal!>cdmvwB#TVSaa5du(n}X8k=>hO zb|8hItWN}J?130%Tg*f9cQH{VPE-I4S3{f{^GR`}?P-<8$!SFrTMAXL>!w)l3tJ%l zT#S~`ohdUxv>l+(0!Y!zt5;*tD;HM{ME&J@twPuT%H35#%|EHHDA7JTBd)7)Cemsh($;Dqwx%Z#Zpn>p!CF zUt<8~a4#d8ayCnEhQ+ipok%tqEbgTEFH(PhC;xSxMDx7g?L6Gx?mT+*Wb09FvmNz7 zncNjKvbDLrDdq*Qk$+RZ&&Nf#(|Ony z6I&DY@Z0e)AAjsVzGE?8?fU;RtKMJ#|F3xdQ2W3CKYRUeKHA=VXs`dyW5ELcBWmBR z|9|;^_x`XkFQ*%4`FJB6Uq+J~ab_Ra?$tm8Ke?Ia0N}RSZhsp+PkY%}QGCR2ukv0t z26Kx!NUs!ha)NdW{G}QF$WWh~t#(vLlMMb8H@>5A9O;cH2{avZiA{p{DlGOOeup^a z<#^km%qzGM#0!BUgb)S}^r9pPBQ{Y6BAiaJEo`uaFJKLBij74r?vN6e8?O%b_TL=s zuZsp_miJ?EB(cvVY|{W>55x2dG?(f5ly#)}7`6RqEm9VPnULT!QA$5ASwu4jOSTU1 zSd0V(0s+LkN72Di96jAVIyh=ZKOG#ueEa@bl=ai@;of`Q9eV%j#8B5b*fdX0jL@vK1s_OS<5jo7zEiEOohOCRZKsM^gm&Q3`hytc1_C> zr8A+Z6QXKR_{<8|9;wN6QyUjHWV}=^0#t_vrK;1lG`3R3*Wu4Kh&_SYQ|P^lR3j1P z8!8%&fxL!+#}NmufYGl7n;_A&Jq(iRd^~2j?bI8B+7(RNw79Z>!^NoW^DWVw>{V-q zhpG`(F8Eq{VDo$UJ*1 zhBC_G-~y+SBte~Ol8i*{Nn$8gZOb&XS1o7ss$o(VLZm2kl;Fs;04|1~>uYPx=!y8x zvUqY!YT+LU&II?QpsbQtfNSW(c%F~5H8E(Tg6#Un>IzV>Gl^QH^dYDopO0&)T2KHS zFUI0UQJfEf6;JlDNI@iML=r7p6zOG}6M&70)FyjUA}U}_!{{Lk0L8x2u0kPa1xF`Y zQW`CG3}A1p_&q{F(V;+x*^B}OiEh%0!gT0uAPN%2*A)LKWlS777=)HeGYE}6*mxTu zE)DWOt3GICtuGGA`LJX|qaNxJ82ks0_BCE4J+WrwBLOWRyp=B~_LBU?w&(IE2Xa!kJTw#quaS>u*>sSC&6U`W zR^v{jYbZ?i85ztlM0B4G-Vuc3aJyMird^x`1afk^Lih=U1pz}rs~MHs+jHo5Kn(<&YwQIxVBOIeK}`ru7Tcxy40YcWjS*M~YDH z)0%IEq0$+s>Lb&GX#t-4g$}v&FMw2+4U(Sk2CBNKq8d`YVGgG;pX|G!uh}cSep(nU zB}>lcgMs4%B&XKB@_H>Ko?hvqbRFvWcmW=}U~Ngtu{7lGz%tPE%*P@*Dc=7}W1LD? zMcmXC>N;mVi4XVx_xpV?hQx3fv4b1I=Cyo(`Hy)aL;}uA){sYghX?PDlQ+As_rZcJ zc9CBQ?QT&C?{<%0CI@exzYXR$0Q4}rk?W_#*jPT44MOATj#6EstA~G0j*brx-n^jd zylZyC-1EgDAB4{b-!t3Og9AFuTJjH$z<}wi{BLO`AOn|$EOx@>4BhDD5{tnQ39^(y zba@)OxTfzr&gj{SWd}@1#)Dx0u8mHs%tIZlrM`ffiM z7F(HsSayM@Ce>PHoGz{E)&83o$1hi_jO|~bIy(+mZC+Nh-3~q^K?VdG)_-wh| z!$>8SsN=FtBG<{o9l-RS48^7o-ui-Q*W-pc79!E9N5z)KveUEBD1besIld^UW7Ny| zlno)5S(b7UF2_;v*xb?uM@UL>#2;lY)6roKoaKAC|N8CmezN=Q*`dFBt&}*vFEQS0 zXwC^pM72k!UW)SNWBo(orEJOPXV9m{VkTwt8L;_f*^Z-52~k0UX;DoM zYC>g~Mxu-yg0yX5SHQW-?ZjCzANNz-1I%Jhw%R)$T#!sF5f`YVXon2Ftjq5W$GANq zA%laIC?oH>>m^6Bo=j)oPy5D#)feO-){;+!5m`$X5J~e#coU9GCht0FWqkbvB6?VxTd%uDs)p!e5E%oN33g|W^- zp~7H1)|hE%2N#UXpRh%N28}6?);*e7Zi(mD7AQqaiY{HUEu*AP-)In0830shALuQ1 zusTeJWFp!n+eTTUn2$#bQ5_cN-8M{^r!$3M0j)aOS+{bZGK5pSzK*7pa`j4U%7z2# zG{wUR7P}Kh--DthDvozF9h$kuBZf4^F znDbx@gCZ5N-#XNAvLhPipcA4#jzbrBqu{>rEEk8N5{>1Y?Zbfx;&LpOt2*^vjEV!# z|3!}a5_$`c-N9J!|GXas`TA;OCOx-sd|=)L`R%MbKyKJMB|1B}ZF1X7By|}>ancdn zh9uGNLVcngO;-?gbzv>SxtB0yaaRm~rx7fITc>1l{f-M+i`N>JraJCCQei{|@M5LO z2oW-MNs==awc5O3c$}`)H)_ghD3FfTVvv!lTHRwQX`z5*N3EjHTrOO`#3pGv_JvC;5YZX1CdNis~!Z%e7D;Hn4p9gu<|zaRnWgxg6jYhtVIql zi^=r8{SVm_SYCez;9Np_oORzHKer@?P%*>}H2hRv z9ASy&cwVRfC(UZ2Wi|;!G=d((5FEMr^+wsemxvV=2 zK{rlJd*ebvmxEUSb~5WOL8nA5zgwZ9B;3LA2`7NaA(rqolnl=kvkOooi?kH6p(+`B zjf-@6g&RmFw)><2*bT3!Uuq$LR^)|CVUsIv7EC%0-%8Y$_|V?2R(ZLws9w1>Ea0au za7iU!wN>E2LgA)D-8Zbm9+1S1)*PKP8`}H8kyyE$K!j=s&(P;)`5^j25v#wpSk+6y zRC7#SW2&8)&?|(4u8IWhx84thZN*!rws;Y)jq7hI?b$9kyQcFE2kOf4=pb?lF1}9L z*7wp_G2K>adwT>ay^DzHshNfxcX(NlM+3r7HRla8yS9QK} zRc0N|_eJ%HHAaHO$yG}TFZm4f)d@lPN@}5$ax`v8{Ix&f)X?N=EJjojBJwh?efC>3 z#Rny>igb>x2LjJ&fs8X#?HG~1?nlIf_f_gJsR@HLN_`u{w9bm#x`_4@zdyH)*vw!Yo$)H>hV{y&}e zodeL`k#Lv{=c>9@&AW+{QoOm|FG(R zd;Y(@eaHWQYyRIyqqyII|8KTC4?B1K|KErIKfL4rznk?>tNu&w0H47Bw;nydC;zrA&L|NFaH|9aJbsU6@G?0>e}cl!U|od1JI#Fw1^+vxxADs{P0fx|4RA) z=C|9AHapw3ttZi|gQvTPf~)_y-@;hHIY6{z z`E&>#r3B`(woz|=(^zXpYpAJMt2JtX!9oD)CiDyBYDVY7;w%MM)Q0po#%w!YDCZaZ z$6;Su1M+Mq3Zaj!p|)hr^(I~8=oV|y14{WITKmHq2UY|}X6f#_BI=Up8@>jpt2%>% zd$Ld2!j(UxjU;&o6*?>6$^wfz5i0l{t||8KP)Z;AaMM1ni{|MxEcKYa9fb@~5E zXY)?}|GMP=FBJxGnf$-q*>u)_d#lsAlmGAJ|2z5rf1>l{Kx$pUKhq8`K7SjB$oQoJ^C4w!Uf1K^W_-*8$4%gx6#8H9ah1lTb0>!%{V-U{h; z0m7?y=x~OR!!L*>HxhH40HhA1_k1EI?3X>6s8#~b;N1!3*OdP$!N2zY*Tw(oY;8Wi zlmGvH?tdTo_&;|BpkKB8&!ND7qx;{-5AXcHz7qa_m7RU5`M(AJ+xFI-|L?yM|9|Z9 z|IIu6x378qi&g*I+W$Yi!+-m&=U?vuG91MxY6Hc1Jlft~#Q!@m{~g@+w>Lrm|EPT@ z|9|EB|CKfSV(s5)_J4=#|1(#;zy2%ue`jl}?W})6Gk5y`-%$S_$1z!d?L;Gt)+K&{ z_PdPgQj4CQO>RJe-T zPvK25EY5HKbUsS+AtkS!iaJ=wPZdw*XX1C%FF@}HEdcE!YT!jPDM^`Sk=sweMH!!G zWvtZO^H~lCCjGN!G|pxerKySeFpg9@;a{Z51nu+L{G7o3rDi)DO@`?#Yl0qrqzW!G z)VA}eVl{F=zoB>iGv|e9fb#}gPpaV>?4|P)(4Cr=GUeCzn(WvOphgt1zd-;;`pE13k zvKic7{@dCD`TtJ-`!|vQ9^J`*zr*$anX7(P`R}11F?aoU9&T>m$$!7H{HJBV+D|0z zso;@yDJ%#=XkZGxV}tyVqJ(I_lmZdTFEw>TuVf%!xEW#$Vj)tM=^&erbBG?KWTKl= z3trRVFjuwBreK+p_CCrN5RYnB6uQzvJxx`4aN}N58!{QuQ8nb~rWB&t{&ha9`7(;= z)HEBkgf!S6La>-60!qIaug!!Ql%*e|NmfjTY|K&&`_Vfbag>cQB&U{vjlsuI!$LSs ztap^GoS*gcY2u`Yw@exwa$NCpnENbd!!|Ys$r64@B&9qU_juowR z*O2^XMI^JzP)}uamX>7M0#WWL(y20({^5roB5B(lljAwGiXIwPGBWXAXx?omH!Z!@ zhgPcYi(iC%mETE8u#WFgsKC2 zAbL^E%dW;~0JO=Xuv%q(K#eqn371+A)1%}8Fo4YEB;SJNP)&@qpauKIAr}nTXsAC> znipDY+8h>M)gj8_WMOi$n5ihWl_W5*Nm8BK=gfXA>k96ANOU^j_dgHuB;YJMTK zEiNra3J2!9lb!X>X>dFBlI}P&jQg4R|5R*mXX}P=1_r8^LJE&Wc38AoIvwWO6m4TO zdrpNM42g8ZXj`DyYaaR1dw;NH?u!Ch5GBK33zNdGX(xv_m09fM3zH7!!=asAzNuBy zLT{L*y>v*k z0P{A6Ns6G-skzkdMaMX?Adw6*P_8QT`vPstrEuShj67HjFswj*#S~1+1tFxDMc&87 zZdM2ScG_)@F<;hyCYU1{r}qUXhV!E$0Y1KAI#XPQsNd>st2ducv+<0It6L$<#r}-E zeFoB6f34w)ioGxCx~87qQJ|xf=P~ zDBaxXcKdx;ekvJ^F^s>dyNPPS-Pb;Rfln=*y|UhgdGz{(Hk>SW|3#hzO#qim5?x-3Ts2hT5bN^dX@R`?`(F3{`Z!R-J?9$<9OWF$*%un9cTR*avZQWc17*7wA-m zWZTq_-Q(#zTSy>cAIztM#(+lD^A{u&S>{TVGEH+>27)Za28lbbXyKr6r{-33yP#A4 zxW6i>ckY<;-MGmzpj~U9(2S-rGoRa}GT0_f6oUUq?<;XSn2!WWDTYL0==FElRpky& zY)3QbdGCS}U>Nsma$4J%r?7bE>12w+)LIo;s1&Qv#j^Fkc2mqz4xFwFrYZU;crjp5 zNXaxCU!JT%Za6Nbmyk5~5GgTYQ$ygk7bdImGGO@~tc{7cP# z0rU&rj7=o|9I!<2iAHRJ2tV?@zGkUjs{4zOs^Zad>`YsY3 z{xvx|K0J8yqKo~YqK>+ykg#!08dRaDUOtIVi$Ax*+S z*dJSSAtMQaPSBdz33^$5Z5`HKveq~im0i}X(ZpEX)U+M2_SMJDJaEajyT<&Hn8Gz+NT#-1z zWvExK*OwL(R~)zgUgTrCt4uA#B){kr4x5j;J0H_3e|3xD*R9>jtk<^!!DC@b`ohN=PjuYa7hPt9*9ot@J~7cZ0gFdd!s)16RT1A5dT z8*oRIr_13Wq=JR{0RC8^ghd%Tl&}8h?B=XUr~LzvyQcGr*!A?ie`EEM{FErm3Hb-x zhL^8RZKcoxQ_pmkh|Hn88Wj1l(RHpfSj&<;+Q47J@ROF$0J6i|Ur$!?u;_WhQ4+ z>`ypYU^YBmzVxyB-o<kHaEGcentyT-HSY>|G>RkY`!iml# zIEk9R%8Sk5bs`9<9yIiipa*WyWRi|@hZ%}=A^4pv(AM z6oHGp!c0UMMiHpMT0Rr}n;yu&kp=`XcEHjHc4uSWqN|I%cM-`=nRY9^4Z#pY4n6fu zajoF&7}MJlvx;okJ|vhk+_3N8dgu9VD1ihHo@u`BY4N3|5S5B@HoO5n&|p5K%Y+w2 zaXthLvq`i+J{M!&GPQZlUGvqy=}W@{A-}kw67t3mRJ7=Jz@`Ot*TOor4g4vNbV2a4Hrh zAzm2S3X5SCgW(Nqs0pq~ZA(nsOL<8L#A3HgQH3&jXu`?criFuXKFiZ#{!bx~fqenl zT7f#ioCwSEzIxHpI6@|aN^Z^33*m8=`5RSs;+11+7tQ)m z+h;3OsK#Q9_g2$6-d}Z=)X5E7pu!Aga3(+qLuB{NL#mQLh}L#CHb7Mv7)a5&6Cs<9 zi>vYJTEi<*+HjO|z9|3U{_d;fXz%dg-Es0}_ciF0RN3}xF*1Is+(M(t52Bz2%Dq4R zDZ&hbaghgNU-0Ejp6=+zHRoNAShQ!2)3QoCo6k6NI#fj~ltZ++pySe6Sq$fB%1w7Q zWy|UY^^&T=nOI|FRiqu^4Uqw^Pv(em9&xWp7h>?Q+I0! zAeOU9UmSa8_-134_dbf}?e-&eXxj;{rlbACANLQFqqonGf7(5iibGRjC=1_k4M~i) zvR^~p>)`vt11Jve3hHZrTx+1N#Uz71X+Yu^Wt@Bj#j)WCQ@=Du>z*`xK($Xgyh?9Mkn%-wEACiUGSfcrDkov3NA3~VGw{SjqQx)wcb}~xse^DKkM}*eafCD> z^L;8GEnY&G0jrb|I9o$UY$X8P=nV}s;U3(6a*-hchceW^vGuJxB${5Cl`&s3;Q*=)>0XW-n)hVm>x)J_tYY$o2#1Gy$FS1@p~+{Hn- z9E*Ym3)=*~4kh0H_={{{@o2e$MQecx2R_+_48(;a!8tmOlg{aKwGCp43R;W9x((B& zbrl}v)OEd6c}|BWKalM2dI1T`j>AR0d;JEtlP$cB9QZX|n?uqHG!hNtduZk$zSEV@ zHChsjb#Q&M9vHunExutq!1inSbP~s>ew|dEWt#6T&9JJ8aw6+G8JsSqJF8S2Dakno z=;6~tXT7zGl~7-_?Z^|4z4A1})i+Mc??CJ;>2Piybz!>^n^IEk9?|$riutdnCrYmAyHNLUv>SsXC7Pn9*f+i;) z&oiIYy`EQc4?Le^5~Vhz;hkbXM)kG-2bP`a0vs#;*IJWP(x8z2P zs3B0Yr+LT+T<(sL4f5F9B%Vk1Z5AfGUE$9*P0PgyywMrL= zitj81n?Fy$m_ej%827*dp9l9s+NXslMco>1f__JCxPf`LD3tz6@P0rDSXhO0bS z@^L@A4*A5i@QdSZBbg{>`XOrXRIV-0#Rl_uZZ^;rwF@|%Fs@D2-3L*~+ID*z#aYa^ zCZx`?6a-)!roq@q^`bLcmNNvs5AIR)DJ&o^*w&Ia#q7D0!RoPnuf2M=ABq!<%o}0m ze|g`luHbP?342+)o300Y5U5RmZRy9A$NPXAxzD+z$G!s@Q1|zGF>I#$eku6gFhv)V zzZr@DhH3<~X$6iax-9n1c@LOnO}d_ottL-FEV^MutB{aWY=>bUYuJ#wZL4(N$Dn5u zF~yDBx$!qC_(5C?8AZqHhq78Ljb{N!ha;!nd4{3-r(zLI!p`P>5)CPDHNMr6MOr zzJd$Z^sEH!;tabGG4+RRv7jw@X*XNo$gVtZrQaFJGdNb>B*6sP#qdCR0(6q7rHObW za(Ziy{}H&`bR+t54<##8EMpck1n;^V^mv!*Xi_5cO<*d{j~J21$0@1#uWaLTU*4rk zgWx7%=5|Yf5;WT>%9+Emem4Lm+F`strL-Sf*>wtGSv~+5VoV~q+v-i&D}#gTx%$f- z9xrRM6B+)Jm13O>5K_WUBGrYLsE`BiPOF02Dh@{|GzrNhLeV5#ztd7JVtHIOY(C0& zby~I-q{J$e9n|ed_wXVBZ|84kbP8pCA!E~DTgCAEJ7|9m2Wxq~jX(<*4ollB#^79! zrW=5LeAwU0O*JgJulsf(7Zt2Y^0VaLI=Zvq>MKQvvd8>}HXLX{M zQ`mpnG#V6%IJIe+Db2=Q^URPL^*yjA;T>6wJ)$L{ri3deCAW|UK)Im++^ixloCcEp zcCEtBF?_z7roL69&uqM!lsALO7(N7>CEIssWShH!RlikBwMlQtTotdc)ZnsO4S}Tw z%0Sn56R$E?H%<7ML`?fittGkulp}ETwA)1Xi-8V=*D$VM6Q-^{nP!9hy1TaKtYmF> z&KEW)-Otcjy)f+Ru4GCO5KtVMxzo=^UJh% zW2M7{e>02un>d@!#Y~A~m$HBqYyEHB9U~b|^xuq<|Ar}zBIRpQRv9de6BvB;K+HNi zZ$_6w$Pn4wiW#B%Z!i~QyXh?MtZ|aH#@}jeIZ1W5Jk}^xKb%v{zpY_}^ClRRP94A} zG~Yy#2G)W2EJeT#yjZl;ienP`+5m|OQN;HZV8QT&PPb6#A}>$3JdrzCS{Us|vT45R z7iHao3&~qKBXj7wo6QN~>blBbCwM`2+Y2?*c$p_s>cu2BmSPoq7g_IPiI)#qhA4vL zn~2b-7_rK_B7Dv_dCK-}y`+QrTjFW8?}=C5t#XWYr1HjKKZCHY&jc zt_5j*cgA6E*^K|~5kDNf#fRW+s^#Qr)YHnweZJgpO>TPZt0_?sv+GhH6_2m=g95HRd!h8}#8sqz4-xXSjxZePN~go>KYiF$ydoG>i4- zdZpScOn#$)9xk9ETNY?lD#MH3Ox~>Va|`|%p&!c~B=^^c|1TYCKM@7|6XHKT+`7a6 z|M$WFe|(4k|2tg&pSkK+#sBX-0;b@`f7;xE zAo|=flwl;kJ->rn91Rblf=W#$>~~~9-_ob?-p+WwgggZ+me$~a|W1c}I16x+d z*MCnzi>8va_VDSyET1HH48<>6@u$q~H_>aBGQP8LC3PK6VQAOib=O? zx{z)jZav^F{;s0USS%aWS$TE3B;btLbfbV;WKHKC=HuO$O$sD8|Gq1^D`tV@LfyCQE{0~ zr+Gi%*efQzpTp5qa51q|sGxG817@gdvq$|Rv0){}FI)A2%g(uNM#PIX&M11ghWXI^ zMtZ30H((Q%h;|sH%%j71U{2fYM5yMLI_U5cXjE&lJ!;}Y;ZGg9>K#fU2lmR2RR2whD=Qx$%Z zL18%5L|dXuCN!x+9sT(idmX(MSxN^$ROGdmDvdIG8o{lj#v#ILEE{++(}7`D%?Wtz zn>{nu2$GEIT6s}i1=xUYKI0OjVFld{rbFCtrLbl8Xu#wNMlR=B3${_v7hf6b-a+dM zJdNsV{>@LlVWCIf*my*JTwN1lyf!ZKDEAaguF$v#vUOjhU8c8HBfQcL-aJkl|;Rz*t_;F1h0Rg;6@sb z5kapRu>)M=)O3`9%HbsG!DQi7beFa8B*n(0sH}rwh7cFK!(kn?SKYOAtr4)*wd-u+ z_%P#wM?p^K*)_iq>V{Ym%rCL1MD(K*X}0WmBVlEn|!tHtI7r zoG>qphU-UazT}$xSv{FuP*29(5PSyKERm_<$gcw(9c|(_1+@t;QR0f5 z0$3;J)PCXlyv>wFwO?(%c%1#Y0%qvhSZXfib%54F8ESiY5NnR(UwA+rVZ35+)d%Mm z5U#I9wbeGMI-^9RCr2-$Nc^`&HeR6ihq4-O;ndnsuEH?h(xo+8r8tiDF6w3j5ZJU~ z)enNrXY~yYjd4*mpo5kqN#%dJ?y17;-svU~qJ-_qAWN^PRL2moxwTr{T?C=SlTZQ9;vp?f-~(8McrZ{W%!;PW-{s=VRclXe2?)gS~ zX8Dm(Gsr4o#0cQ1>l$V#Bw_TGVJ;q@iv;3y`1%z*aL|;rmxT}E@^X)lRl#kGWkg+i z;It`EWkwU;rPDGyG(~Oa1O~R4y^mMvv|lEK!Ps>o0@?xvRAC%#@X(2GjgTEde%>S> zsczezi6bn*J;p(S_Nj-vrg}g#Yhy*oKycyJCW|;3AO)c-7!Nr3hRYam@b`3ub+oWJ zm8U*vRBzpwJfJqbkA{dNqb9s93gL}|t>-ajn zQ6>8GCrKPDUVuKZdQ0iBt_l_B+PwF%jxD|2@jL;M7=SrcD9(@BjnP|ov`@rm6AmLO z{p`N&w4EIQ$RUB$5*whz{8wA8Nlm6xg&DATg=KO>=VZDY-R zXUgX&d91xE&aJ>AMq+c>)7fa~BtAP?1FJ0fh+x-ie=o8%^Q$z%qOWDv6T`e_RzdV< zEO1bL0=zS;!@vV;&HcFcQ#R~@^(RyZEAp(R4#JIP5m;?kWF9-$5z6+&k?tL<>3$a} zTlFnNT^8MF7~~;8nwrO#mVK`18Glsvu!2zNQ9HJfs##NxhpWqZ69fkqz3a5wwbe$f zYDnOEATjiNw(i(QBsIO-?1PbtVOMXJ?iD6VKL(Q;wvak%&9KZ$p#9bC ze?V<0ru8%K{~temxXAuz6Yl>XZa;jy`Ka>-X`|9$K~pWNAh{tnmwXRi7c?LRy1$B!Rw+UvjbSZo7#_Mg9!{b%i&IF}%1$?m&@ zS{+W_U?5gF|S-DP58*uqDt{-GP> z%ky-ejk*D6EQ49N;wM|Zs$IdPr)r_vzS#6Cr5K5Mpw_h~Y65Ed)#vD_{dWKP{*%Mt zZt(2Qs~4;a|M7?YH~W0~1Z!Hg7PbFYr0veJ9bVz1{`Cm>LO-|$9;vR|c2u2@9Q4~T zAqrOmxl@J?0OgpeUmsCJ$Jc48y%!7gQeP@~ZIp2OQss+V&G~MqCNQ)uXsRNlYp3ZC zy@n>?pF-PH(WxpV(@?qJYpyL9MFGW~sor8H3F$L>%B%h#`x`J(&zDTKWn&`5a}|c$ z56-rmt6f^CC=&}D`@-uC&^%eqASLZ?P{r!DI%sK$1%1%wYk;HI0K^5`V3cC|YL*RxL|5KrL)XohAte0-9VjP5An{oUknRk%QumOBo zuzg=#ZaZA*Zu*5xqhLT>w3Fbs`@E#~HjaP}%%S1$h0CUg2d`6&&IO z!zppR0JQv}98JmupYl9WG!2nqi0eWd5<^VIW}IHp#$2LbGIsrhw9hA9BgK-=!o+qg zWM)J#6q-s^n&K}MIo>(vDrPM@0xUEn-%jYf4F4VTl$H+-MUc zqTbr!{?Uzs0^ecR^Yp^u>9qjG$O-l8Xx5-@Zihc z(AX7vo#T2|@{hi@8=3m$ZqL^(wJmgg44(GZRUm;=b%Udxr*nF&#u&WFGJECRzZ>?m zG<97fE?-~(mDZR3Hb&bQAgV{J03GqY${L_(l&e5tpSe80T4m2@_W^krS81@wxD^~y zN)rrd3QMj!L6laIg^Wp`^F8C%rcz-@hgL;D!x9F{h<7-jnQpp9^G(_RsJWx;SZ0bV z#-CbiZaa3*=8l}($ID`IpiHdNapUf5%=W!NXk*8RpF12i5`(g=d-#9nsjbG03GC2F(OK(;$YdgAn&ypS$W$)E6^r(WX_b^` zNyv4Jd?%+Mn?Z5nAf+fFd$y0(hWo3WXy}mO-M?x_k{cdNTtMa-fu;W&fi?~WmhYsU z@=nJ?tfqVw|GaG6Z&ZSk%h)*VR70@4USeqb%NjM=Vy*1ob>mU*>`MXxnfaMI*p?i{ zU#v@iV}kHXAm%Z%!5j1_lLIAfclPYK8=5g89_JXcz>wX&OlP~Hm2Jmt`oEwgX0Dnp zGT;YXw&JRYGdput>A! zjCbQLVJw1i>O`f3l@)t4nHkzy#@A9$7@gv{-`mNhuzpPckWXMK#-A(X7Mh zk|Tb^#5Cv{$^%ygdq^=RHQiCR7*p3khXK?L%!(TZsW}h88nu==zvipRVo!OTxvIf9K5uk5P2NeNuCN4h13YY zLddQ;ICKRD&U7nC0Rym6V0b>t0g|yIAKn^NRV9Bv*7P*ZKEb(abhc#Ik%570HGQrV z;U`CIrEI*=QbbU!$@YZBHOP#a!KaRMTn$##q9(iq$lIQdoEr)$3a>7Pe{(5sq@ov} zz{eBHY_PysqQxwoBh&!%bYiGnmxm!d5`QZ__se(BpNGD3L@?DRi&Ww(wqwBB2^~$; zFddQ3?xcr%F+x)!mOCoLQ9oW;|slW?2AFK(N1kQRp1~0Y+DUjMYPZ_oMv|3ddPl zu-;o8Wl~@3s(3{r#I>iW_zCBS0QI)ECVd(*)JcBB|o{X)WJ{j1}|A;119Q zzFOnyz1{8b@in!2^=kaFcU{%q=T?L*XuIo^PozGeC*a5J>Tw4jVL&zErA}0Ifp2;% zlX<*eX4QKX*zvGoy~S-NU+3TfooRcj^&IbZZoFKe%lNuIuH9pS79T_wc0d%A*2e>A;h4xHxYsgIorKI|$I-#N)@a?_tD3R!rB>brhSrX%{kI!X zDg~f@IArdD z#i@@(U)V>fi=6h9T~W8_jVw#)|8*FqOCyyq}j#5Q>76g!m_w8jF-8`ODRPAWgSu zVk}gIau-u|lp7<7gGPX(zH&eB)C6S%&Z4Tc;|R{{HV4A8_5H|u z&-V^FF=pg3zSUD`nHAIgG;^I+p~uGPfC8~drSdCg(9EZFYQiZkzgQ!{nkwU}J#>$t zv!mi1y&^e_hjP^R0h5B)gYs9O7t<;Fg|7mj%ha$|E{9)+I869@Ey_%7{YOt9$HM_e z6QbCM*x&Im=HH%_75sgWUZ$fZEH7QPy6uQQqEgJIqmCba*G;9N_X^+c{bm34tAm$^ zXt_7L8kDoa9V_v}+OujxeY5|3fA8&na1-d)O}YjD)$`}y?LGN(aQN!_Hkc~L%k%TA zn-fF1NRzRoi>p(WeSoH9FfQ>>f=UbtppDt;)KIb76IAdx6EswdSQtUdfvZ-ZM6gMVIwE1OkQY6dSOkjo4&MmoXcwFuNfxNf@MDW?JD!R@|3>ie5DLMSvb zo`1l2JV^gFN5q`&>35Z6ssx=CvJLDuY~$Tkc4lRhO_gE?{x~wXm}flwt_n$$&_FP| zQBce{`zBvbCTMkgGM;B}CZaPCipGfw!7>W?3mKoMMgy4j7Z_uCMoE8=x`!#5%}LtR zBG1!NWm&Dp6VHlS6f(?^)7rUbZJ+>Oc2@hC0G;QvUDZdp=rpYOG zIL%QLvpLR6{L3M0QGX)$kB*7B`nns`b3AtgCvFZKUNT272kK@Mrk)~I>^h;3n(ybt zQ2*(7@6emT*-g0r!K^4K0Tl+~<#Nhx02N7}l5C2_;yTTrCFMX>W`%2OMaqxs>er(= z87O}(#U$abpnMz*uZV!~lR#p@Rw<)24YgLZY|#f(n|y?c;nNf<*W8*7#;`E0KtHu> zG>L1a2~4Jc82Sv>1}LHrD7L0V;iNhZ0Up#bhRK%Ctc$Bidg2<~)ch^K z^~GUwN5{xHFeN>iFBl)aDWO~(+GFRIb+A5R2m`#SQ57ORy(2)V;EJ!r&%9`T>&7P&d zNdNP6n$oHJELFUkoK_|k2PuW8jAL)hH9WW(A5|Jn?|N$xCI?())oSsgpxVv-BiA^+ z@z__*P<_Pzz+qNLb=r0Cq2)V;V}KBOY+!1h#$C2CPNy>11{mfF^hzsM#4WXp$Ew=h zyY*FD177D9-j4Ip4ZgeM= z#Xk_oJmvI{Ng13_g7;#6;sC;Iao;UZZ}qmfd%f*e;}Qa*QGJOA1G#=33{+mgIP3(E zf{^+W8ZR4D<7r^(A)@~8+7m&M+-ft>e9~(KE~8%yyvZ2tjDVp`9_*kZ6Z%8L@j6ju zfzq9MZ#5ay+7;t!)ki-1(^{Vur^DFQBvsW(tTx@O{2?c`X<)!bt5oxbi<^CUF)~ zz>1dqEn`adv2xK`0kZs~>|lwX=i2qzUS0n<*DbySwWd+&@r1`M=X7Z`_{e6Nr&A8=U!rGFg-Rak&5HO+W0i8=0PnMwON+f;McWwKMrf@3?7$ zUZKTu^w_m{ly&`{f6%o7-zkIW4s2I=Iffp6~OpubWPYcZ|dliaf1HO|NKw7QB%=lH#m4~ zHq2nJx@zG-6%L*?* z+SWm^U|jtCOUF|}_?lxIKPif~LY8fU*67CvRXomnD;fx@i`CngZJ9RGDP8V{ldyAL z`zs-$ec0#rrfpsd|H?ga%;a&*T0G-ll$f~bOs&a?RyU=Is6QM=?}E{Dn4p@KhH`&E`s$2tTE5&qw^ z8HN5WyI8H$>G-3(uh#_@W~vYm6=fO@7%Z@Lyuo6f;dC1c%U7K~+%mQCfS_#aXi0 z4ey4s%gS3aE0Sk2&XH#qThk1Bq3TQ~WvYIxv9qbL@F&=X6YKJN!glfd?|M1=Lati4uc2b zXYVh_Y}K6a>Zlg#RfKd>(XdEqH2*@dGB!nu9n(5NfF4P9fn1>ChC=~Yl)Q)QUA>cV z3A#@-1S_RT#Q;9?dbcg&q++Le^ZKY3tlwn{Kbvx6HCcLrn@0q~jk%_q*0H&6oi*LG zj5$R>GCwUF-22peHWu=U$krdyE4F}0KVpOmSc>A}0th}~peJ9tyciF%5STP~cWzGP zLf@LUcl-;_YOO4m^HI9H-3> z{r=6Xcdvu*ehQ_pwF0l+Jl%i81!xqt0>rLE8C<>kvuAJj@v{utL8Hk*)rr^Sqm2p* zD$=Z;$Y_R0`^e+3ORG6^+K`r~O`*wj9kozyvue80OH_DZu@jUD3&UCG zq;kPgd7?^8cL0bF4xYxwX154Vq#q|Lgw(-PBKAed0s^^irjvwR_zFtZaYEfZv67?_ z-_FWBTm&$lFWLq_(upG$yvgpw5$=Ws-oU^)h_@-UHX84GJeQj{t)>V{?Sq$Z_um`_ z2QLp_(IysA&`X{wX{i(ZW$*dB{kLSNE`6Z}JMG8ohPk*=Gk1WM48LJ?DcjwHs)^9#Ry=ZjEoj3R zaN3LC2hr5PSkri?3^1ytUKTLGj}dj7T^0n*A-|U;x5~!RXsGgca$0=4|7`Ew^Fsk{ zwQt{R{mS|u())ijGmEjCXV5qVxpZW)T+F=&qW`TFSaptN55uo4<9)?7cA#U4Wi8xA zQ$Awh3l*5=ETLmAoLbM5F?q{k*3Q5Ig={Gst1u?q`E-nW9NQ$~W3L;0+{V$?5PY51 z>|?L9e%bA7jbpFZ%~>~e!Ad<$rJpiWu8Jj`j;-niVj1iFL)ABytmW7h*7rkRe=GLI zh7+6G40T8r%!lRK5iu{K-Z0PcFhYtE<-5w)4@r*Jl!l0oQ9ZTmDQXFsG!?;#wuKZu z6a@j-A4r-A5gASFjlrYZ%*LgUg3GuzJ^!3LHR^7f?M5vlO;ts6|D&JVIG8qUy?is$ zPS2BUD!>0Ty-d(F7-_8i3$(5a!~Hp=xM_|0He(eKsX2$EUfoSI=2QH#qDgS(kzM7l zbG`-fwYG&H*DulaX63{qsD)m0j0;zq0GYg}h6a&XxJSS^MNBb|O#$Zc3eZ23a;|Ef z57f!VEyGIKE=N3vqB{@=u$R7rirP8klk@`v#Nd->aAZ?r1}qA{OUAT~hOO30`6P7Y z6TI!OB%eEhIjvRj01}emT-b6mmN|+UhBY55rO$r_-?g1{~)xBQb z2xEO|?H?wk+3K+6Y@94o=a$W|Bl#Rv#XExOy%JM5Vx&k@S6XAS24P!{s?%(wy;#UM zlH%glXUHs^J~;^gtAI6d7B*=g1Q)W6vxYX5y06`-j*K)VO!`Fu8Zq1*9iz*a*>RpkjR`hPP2vR0u?TT-l^Qoabs~ zMpt+A_;au3)a?TE2EP3;?WBx|iNdcvZ#xD_o3J9qxD_Q{(pKb_X4GJL zFi`wNM%?j}XVLN^f)Wmn55>WRZ&vPHoSMAi$ zmok(z^c_89cq=){np{BE{L}sC`#^`zeVf^j&$o`c!4kp7%&wqtMu;}sqq=<#6{>>V zhl^+o6!q)k_SHi=04`Lb8efCmvBwP)qksFG5S7Q9aS&q=R=00lX9>j&%ugyjq%#epGi8Haj(1VC5bR z$Xl){KhAPt=3lL;dexaiDWd@kpRzGWE@7P3IJM!V;MEWE8hDn?(_uCr%PLTuLA?&1 zTG4)PD&sc=lKDVtL4aG^O>3G{hmhS{?wb4DZ?PS(xg3sgDIB-_fUHmJ?q%02Rz!`m zj&{WiMWm*CT%>e;Ec>NsY>imW_+JIwB`g@nbt)h3k95UY*uRF|>)K|oVOTeMJ$UoB z+1m8&pATMFwx;gae=qZ*g>Rc(|()ghJstXLtt5@M1SC(-F{4xQ34wyCvLT--!gfec$J@Whw^E?B1{D z$!K~b5jk*q$LMmvP=tG|WF5ih@eae70S!G4Up;-*Cx39X4+6RxmlR~_tSBjG`<&z` zi8Gjv10U;wZP3qkf(Et|7BtGxR5Lnaw~AL1tSxvOdo+Wu?a(T4R71SODgqq`w^^4f z245_42kR@(ZUqOGWLuPFCI>l8C=50E{)4j((HW0Hh0u0_Gx`^(p0%a2&dEoSfQ#bomOqzFbFK)t!gywB6P5+b`m>BfiV||KIC$Qz*fuC zIa0a-NKu}4d}Y)X02He(i@YWIsET>8MfKt>rA{Ll&r}~zy(M`Y@2XwZTSnM&SMOz_ zX|r-~u~M=rTN-~glcvpe8XRTtyS+h1TH5Lt>kyD=h_SztyesuPauF-9gEF})@fBsp zktQUzEiQN9M3R*jI`2@I3gc1C3 zGc}J3^d0X&RMnW?7-h{(flGT;&CCM^I1*Oxc_0XDAt+t`6QeDmNc2eCq@%_oK{6ac z|4+}dzkQgV=f&*fyj(0lU0h!My={b8XzzXZuRlM3@$%K{|AsT+yTAPS z*Ps5^R&RUf{)2~Kef`a#{Vj({#oTB{x;N+>MLBI%!7Pl#IOAeEP4Lp1X(>h{yr_M< zUB`-)d=zXsyW|jdk1DWY4hX^FX^=-!@m|ogFAS=ZfQX-+>qZ`hGQ*6vx(XU}tIrUR z9T6YCbS0<6ZlD|8?ivBfDilz;y49;p8sp5QDsJ7uidH9;c&82G2J}_c!&}etXy>?I z7-_J=4%h8t)p}Vpp6B9>qQDzo1tM-jj`g8wl$-s|(CO}|-EZo1wR8k#*F#6wuAeVkN1wHxK4)FI>kA_cDFg*8 zBimfR*6M3cLGK(luDkWtU*#F>1!6Pe^%iUo7wmCoWu)orS0&MNogJHexCj4>ES7&; z<%^~;!|pmX!u+mkU$v_4)v0W&g{E4jQOjU}lEz>l^e>XcxEP_7%3=gR$M_E%Twu5U z5AT1AVxA}CVq+`b-W;drMY(xVo@SdD>2ULPa`^*>)SqussKXQm1dwuvXXUp=^g|NeHcwY{_5+xm~-@;`oq|CYG0pew^g zay2dHjpd-&WU_jY@L!<+{)_zA`#M-=fQD?_aAQe;NAV;?UNr4p8ph&(@(n~wtf9|Z)+Rs_tg6=FLuBAX6vg5-?X3^ zd6~m8_`~jY>py<~@1Nnn73=>CSABi`|G)73zV(0ofA;#{{_5fOSI+wH?R>TKAHmk| z>;E7AA0AC-u-h!IIG}An6JEA3rZ642ff8W)VQa%JhErILlNtIYaMbm`CHZL)JQ~wq zIE(%*9WBs$_i@mAQp~RA+3DFLh(?`Ydu!`Km(J|ZlZ!dsKKxe!n_M#8__3Hz$E|l5 zL2{XoC37w8TC6ty?(I`J{*TfecXBz3UtCy#e*KV+(UHaUxGk@J!n;Ml39lOe0b8`LD{w_ z<5{veYmtg0InQ8d@``P6;0LA?xfqiz{9pp38VowV;$=EB!X?2s2|;gs?;Q0vFzBv< ztO43y0B+y6Zd2?r^a_YlWlS*8$+P6QV6X z=Lb)M_QvMsk2_B`H=iCpBDwsm2ciU-BFF>5Z%a}*!=^_ab%trcgnSI)AQ>uh~v4K5r2l8vTjp`7B z{B0n1c)QERWaI00uvx(&C#Lqi7#54tsQdEO%l+=lS9@<@-}}qHsG*$6mhGR;o&{-T(DtLJSS+T5vT zc7epswTvrAAp4dorcN8}zA<6JlXLChr?PX%C>WtpM?G-ym_LBw>?&&eM6{e{k)^Zx_%?sK<)jG%cCiVwYn){1RvzSc>x^6l6R(> zn1J}smaj}P^$_R=5?p_WCQ&cQ9{n^r3+LsGy?!&;A;xI~_-Xbzcr+)69gh05g03?B z&8WwXDdxJQHW|cl3aB$z*oJ^6#;_JB#q_2{g3emJOw5|+=Se=U76PNsS_D?P#O)ll zHcm&yJRyXd6mY9tLbW;W1O}G~ZcQemS`p&kUUtdN1tziIN-seW%Be*gxJj$Gl>F4K z9yju0v5{WljgVb$@(};8R@ErKoElWQMgM1Rhqugi(bS_k27)UWSw1Nm+PBEKXEmTu zrFIVClZG z-wGOtbvjAS*hZtBoh@ho92QgA1s-kEDrPZ<0-u|5&fk(x3BM(AXLIw>A2v4l4sDrp zHZKrx_c`-r`Q6nMykvd}yeTU7r{{DBc$l2NWK)U7Tz~HTtU1BvX7J-YTY#{J_w@e- z^5x-@?385q{kFzVRCh(I!ccD{8R z=ZH#17xMHX*qhIjtH>y9;twskjGwTN!U`Lu(dPS&%~Lo*y??*acG@Z4q^J9rGl9-O zqIUFu|IhpPWrtk6P5AeMb_5^(=l1PR4Uly7TQh!2gkQ29%))~08@|!7pd`VS)S?29lMRnY513#J!1_L zD~5lu7Xio^>E3i2rBJBs1gb-5#7CIH2?N}u!J~kRXuPC=m*8hZO%5NA(&LyGWjxN{ z6qsC*F%@v8P<8tkqYoEZJ}xfef>QwQ>OMskT-CdVrK)JT8-&CT(P9|ow6LS`v_6Dl zN-31)>4t2dUjn57A;SgZV17}Yo=(%qgY%4Wo&nBPN;#Vz4ZN#4(#yK);&)dE<46Ml z`ZS0qo4|3~gab2Jo4VQ?HJibof^ax3MjyhU?@Vw-iR!P=;{@%Nyoi7y95OCYv)c`_ zan&(FQuqn|(BXq>28WIO25b1)R)|7Rh;d@fI!jm;7!--jH)7PFJb`2R6A-?mt;_8P z!-rN2<7-Nfbm=nA@;sfB)?wHFwabnEiJ|rB*SFW;W^;36<1z1Xw1+)zJ=$d5!y`H{ zw^(h00rOZ6K!2?d({eY2EjdIKn*L|E1B%p*R6_Tz<5(hhqp6BmMck%WaCDd}pZVK; z4SvAo>0fGxCaLmvusc&L-OwuvoCjs9P3RF1h8W~-`!-aqtX-)%$P56aeLEXhA09uV zlPdg~)yi!x5aP(2FsT=BzkRO`VqkNMVr?Um;2h z=cLQqetf8f^+(7p9!Ion&w+WfmnO^{y=TNc0@+_C+5|-8!pB+|JdcoP?GYTkqyoJb z#oHq!6G5Iv+@no;iy-Xpfw0jLke)!$SYO-qfc~$S;m#O>odzyMM+s0mSR(DZKr& zIRLab_$AacWJo76MP?`0n4`uX{6i;6ma{gwu2AyHvKX@IpLG_-pdKZ`**u->hPT41 zpRyBIvIu5Ug??In= zfayH-TWabkTnxkGcrhH%Ps51B7{0SVM->)i!zC%0c3hF2Fk_?|N|k|2;rr3%a;n*7 z#V?KXJu*5#H}czJxtJ{%C_4}vCJPV6@qkahNy{0WY15!AK(LvsO$9&hJpN&S@9F-V zxA1c3af6g$02O-}toYrlr$1G-9?jJJLKoYd))lB&v?ii{2)5I6m@|WKR=sB^%u{JM z1(M><(})8cGENzBUK~IRJ8|&x^}9pvg^7K<;mmS?iuuQ|ATwJy^k?Y?Bk#$pSARa( zf9tQyB}=O|UJOay7yE~MQV~zUFbKSej8H=1@SXAq%M%A^WM)x;lRm`66-vBji^4(J z@#We3)FlzFb8PjGlAUp@4c*&+`Imz?uU@{`f9XLDb|}q1b?UTbDDPNsnein3R4hSU z#N^oIK!*8@S#>l_N94zKRV+EoE6*$=FrQ?nFN=UfMnBpJE-5}6*Jn4}u{ zUpc@-23@yCN-|*CDKEN#xn(pZPtr*z%(8$xpp%qlJ(-}hg=`CSg^Uf=gJehbEuI1* zsCRaE$beDr37y#6x0;u^&QDa#(9%r?+==I~{HfzkAi`;JkN z^jH;_m>p18)O(^;qvd4j)gy%(b<5TxtP`Ufz$^;i!$C7Jw@Dt@k(C>| zdc$v3&{kLpEJ&hfk5TwIKsF_W#L{`?3&6I!T*1i3R`pAtYj(Mm-k0Oa09ggFo`EV; zRZ^Ob6_ulduY8j-BYaK5VOjxnc1XIZ|&XMZ_?V(YmQLvey=RFBFo^Zt7(2$8C20L z*p7F_;L8ahLb0!F{o#(1Q)>v~6%3R_pv5JeR^Tb@!$0tdGFV8G82kW4gnjsj72GtL zOi}bGwgd5kv<(AkK*e8{pO#tqVtrdJmV30(T;#Za?gerKL^mq9O`Xssd7iKx-3g0n z9B*KOSI>C{BEt*=9+TDBv{-NyfkK)wWp@IIwt(p41|2%bpjzzh_g#Y4f~bdw&%-oX z0w$Kz;K9}w11Hhwh^-TkBe{>O87Xddy2{(W%uaK5***F(n_^mu7qCj_y6y$NJjZuj zGq!#ER6=-|%)x=6fPLyLsZlU$SQmW}FKJ z>eR4)2C^bNgBXZzvPl`7D99%rl!yrgp10VrZhFN%qD&0jElPe;b?u~!FOm!+k_pCO zvQEjhd0vc{ykp`*fWwRFSiO&MS)ffaO#a3u0c3Mf*-)~PdwSksG ztZYN{iB=qc>m3^)2MgNzLrEy1aV6@AfnKxG-Nn$@K~SLz^@Odkp=iZE`G8z~7M-lh z+oxWmde-;}Yinu&61gNIBhU@{4-H81Z|jK|Qy!u<5tZBo9szWtK0JkO!7$#z?XcCl z6G`VfCH~;?E2ju4dU)x)~o!|Yx{$2dP9&Y{a|Mf5E|Mi8der^9RjQQZK|E=DG zhrj!O{Z{^8Waxsk?Q#1ejkpRiR?M!tu#5ZsljJ3NG(j;jS3w!0igh^M28?%4U5aDecA5V(S;`QJVnno<6IDmoyPmTvPwC7{AijC z(;{CvzLUBq)l4*e z1gGJGV-@55{RRGDMDYj0l!+pH4RE**l{LL>uefS)a$oqSOT|=HUn%}(ok^*?NO?dT zzzh7>xZQUM%i>noufR`uUX;y@8|emF%&8@vAS}ACb?X{#I@62@bG6eR=VV>=r)bS$ zr7Zf?y1qSb3;*TZf0(REx64EDTCtI zysnh2mkUHLeBVCvDLyYxn-bfSi9kIs)6&TOGRdY>AMrWFrm~(Iy+@)IqNpfUXLwW* z>5csi$Oz3m^p=(x@DV?kD~S>Jvp5$|kxZ|sODDSHpVKS3v&~v|+epc}Z6|pDEwP*+ z#m5AL=#i>kZLJmMDVcytYk;>Eg8>>vMInC+O&euvWY-P~|2B?m4|<;smkX84S6|2K zLDDK^#CBCqZ&zq&%Arkj;5;SW`;g-p#w;G^ z#h+YCBcWPf*&apAce;unbiC|K5`N%X#yF_yd~p?VPjtZY17eII5vQX?bI5XT>u$A9 zk)*M&rMujX-B7rtiep-RkhGs2Kcg_}&?^;D*9ql2cgn&GDA{hidT!^9@n=b~Qya!F+nYBs0UyD=93#!<) zSRfZSOG=?C53ae_{u?oGOJEm9@v>MPV0L&^_@`r9zO8>M;m6_=$rw<%DBUv$0H<&p z))Yj!vKp)?K{S(uf@;iCkzajW7MM6#@eOs*CH`SB z4r2$0UR*B>J!>OhK7dy;y$qbZ;fh520FnmCiiJiHOoT>1)VWRJ-T4;6 zNjkKelo8qZy0y+6YHGOwV)Rhp^@=jvcnYfO{C!y;VlZ0W^wkgAg8oqafhyqM^Dbhs8 zvt@~a3`#VFm^F@VMioXTO+nCdyJGITvhDT*M#liJD?|B_qSu%_wZnP`tU6(xS%v6p z!cy9u(pdVmmGK9Kas|oRE{LG*cR0FEoyZx0rS1A$B7c*Nv*Nd-aBA)!K;Gze65fnP zZNh*e4q1%O($NQ`3VHY}qOkUsIS$^~SPUw;Qv%U!K1R}6xhl=~&}CkyLQM9Gt(Wci z`nTzzZVg(pboDMhJwaq$BgQmrz!hE?T_XYY`5W+xDg2Fg+wYc(MX}}%a^1?J1;hYv zw1QTgCh;O_^#mcp#m?&5?H;FcNd2=m8f1X{npM6EWSwHwm(CaC{RfH@^(*3?0e%+V zFzF7pA(I?RI1d1no`fx^P;+TrCT}YM$SXtaP;p|zOiobx02Fqv9eB7 zU&(S&U?!c@T*_+$JzJw;t1|-I+}%{$QcM?T-dWGrw^a@;Kuf-yVia>d{HTqNhS?mr zbe9s0XocE5JxwoXkuKT1p65`GhV2R@_yHQ5qo2w73~349III!sp$nDhOIG2)olxv< zq#KgNAzZfej|?>a`+xsx_CJ;VADY1b#`b^PU;S?X_itnW*ZbZ6?_Y5Jf8nZM*ZvPy z{#M=o@2lSL_J6;f{omp10COB>i!3c$5!ujN27pEcb=dUL`5%Oz5<{kACU+I7y-gkk z7%*m%T`uQS{O)|az}N&Kbawp4d#B^5R1BXR6njS~EC7M92%inGWdmnJ>?W0H(kL z-l)jOPVP)wlS}oxK}3=Ir$v5hYI5l#XL}?XiFJ#SfZpZ7v49Ju2G9|+f__*oMlg&B z-GrzLyQRa7@t~tnun@9}3t1}Nx+kZ3WA#OhVA4f$>hnY-HCn0SY-F65k(@9wh4U~?^D37E%n*gYPBD~uNo$2%I9rBu zS~?HQV3dr`Qp48v$1?zfK|LtfVtGalz>>vRn06K5PM71fh99(iI@v!=PERCg<>HK* z$8l91y?~D56jU9hSQ~jz%o6C6B;%Th#}8M-NbE~Y1|=iGBOOHr%08vjtC)K{XSXW& zOHndagW`Hcj$iZQqQsPxDTQW6S~b#Nh!Yd_7Q2LVz$ZJaHyW7>!Z`)LQ#cF`>={AJ z9_GDcqZ`l#079tz8*GVc*`Mre-N#Gxz{V)@4buK7S5oXf!MQjqrqWh&s+k?9`LuZC zf|7n>6-4}oMm!CbNST|$5;%Xs367m-5#Nu5>fp2ZuGtiLC3)6l_`r&bF5*X}6lKPP zSJNkA9=BjpwvTwhQV`yNDwGz@FWYaMRatjVve)}i8(^1 zT51DI^G|Hsh4RAS;Mo8(Tnt|99X|O1UGc(>qz(zYVMlC_&P5eV`?F>?jR0S~dV28e zVE^gh?ZL|@`!>*TDHte&RmLl5cdzY2VA_#FEc1=J5j%;LzY$77J9}kUAh%jIz?ynM#`*D>kKGCwR1L zcBrZUY7p8iRL`N%H0vf32n0nvO9e^~w%WDF$T;Vp(l z!Dw`v4b+Nr_mv{Yd>6Ru;jTVT=HuYW_XiyPU#0NXs}**7Sg713UeUb5dGVbD=s)1B zJ+o~NY;AX^R!E1t4&Tq;qaj-r+y@)(a>|rlyN=NB`ZcC2qv`f{h5}{lnn&Ez^ zq(#&e9Vja)SwYls%-PXZKFbYrt+TQ6xRKCF(p-E-E+${2#Z_QNs-?opfa};=JMKti*o+M07{%}FCd;Zn2j|HtQq>j5XiFBs$tHCC zPswiYkHg(<*whYDW;roICc8VG6A+uGQ|S$_Dyg?a^@5EJmLU-H@7_EoL~BV)hp;AD zP04s;N$yN?NKKMw%+(^bD1=rv{R0kPup6FY0(QN46jDE2nDO+iRrJZ95u2w0O+`Pr zmZx>w4k~=~;aJ<_v&DTa@`4&szvc_{CuEtmiFHlxh3Lttzj=CJkVJ)t#^Dw60hApv zLLYERGpYlHhd}0@)SQye&%=ccY4C?#>s1s^POK5QS7QqZ+n1+chVv zTBq*?8iYL;61oUKss=t>T#IbWDBnf5(n3VZY*s&q!D%=fNg2eB8M6kX5~5sBQ7rMe znt~Xv_o>**1#hK)lO~P0P4v5pMFP`a*jS=72Wl5~RcY!kmvnD_(4dL^fbIoH;Uf@1 z9wS1`>qUl!1;R(0RN&aej@zT(?~DG=x&Pq<;%}D!7yZ9~zyJ9+x&PVz{r=})^#11y zR{i?%A09l|a`XS*-+u7>{m*alpP3tXzRb%Ey=B_9iH8u9!@XwOF3;!29Gm7AH1-Y`S6DJQ*PUQfI(HH9rq~=92DyChcz%HFr@L!Cu334>7FFJQo%*|8DOjeJHYKC{E zhbUB0Etlu#R|xt}iROar_=w@(Z2tn%UKaWEDnMl=cY+$WO~vz16q*zV6w9?j*bky! zH`wZU0<{KRE(*OGl%unhq61UU!WE_I@E-QX;7y-;{DnDG9G1R9A6N{&xS}+LtK(Z@ zsM5QxBuvf-_z@J{T2U0I7!K5FbFnx3|NU5R20O=ifRzrF7>hRT*SI;$xGyfRKg7Ve<#a^w* zg>e-P7?Ye*QbL6)R}qC|uh*wq%tw5?!o0zNSn0Aia7G4UC#E>^QRmppe>ms3>VwN# z-lmJ?jFo^3)bQ7Qe2GC~4B&yft=n~$QdO)fapZ)vuX*fq>p!M)8;QXvan#wo$T_25 zUR(gaJ{2GMw$}{N#Dx46qtr=_Fnn4kj4|1a5BK2(!f|`QKn(%n%;2nmrOo`4%w z&&CBX?YD<$GH(cC*5`MtzCPW5zJIvy7O*5dDAf?hRk@qE`s{F1jvt{1Eo%#!vDxq% zex|(FnBr*19*Bp3X?FzvIpB*stx4Wc$M&vJj6reik4fk_b4G+;LraGko< z&<^EQp(}X&b=2p-F>FQSC=9~RaZ^3^gOO@(ElQ4a>|Z6=!)grxLyU}`pnIQ&U*D%l zHWRT-OJr6ewdN+(O}AOS`@Tk(t zu}m=>pl zb69H_$?Y!e)1D#&%%!Qq^Em)3{F~F!s*gdeFQfo#Vk#|CT?ymSwz$g!SHl~wPy9!EfpJ#U?X&NoU}GaobdhNm0r&ByJ7RwDdG=aMIEuD9VIgm z!ElkKRm?EmN~I%^$&WhMt%ArP+pa}qo!TL5QWqkIQ`mk7W%hUMZSP@lH|T9`->((p zgm`EN>X$=?OY8utKvuu|gfcWn70gc4shIpjmXl^(zz`sP@P56bC;?EpEye4LP21wI7Lc)$Y%qlq#i{VJZ4V>9;0kP?S#b6vrSn>fvf(p$b7f-W zks-{}s{X`>bTs3=QwAIGe@A!9OaB=1v*9OvMVJz21g^6)#;%Zag90Ma_xl zFu|vPcOSSGkYDBn#Osbf z%iUs}om3&J79dy{poI?e)}>o%iQF^l$CYRtxLHr-7R;@*_vG;1-t&R0I2Z12LXb>V zYwZe_)|!KWwo#fcQJ9Xd+tgZLvY)xvZvXk8=qpmW1P+riI|DB8U`9Jut%9E;v1bnF z!~=MY4_iN;oojB1at66|;mZLVbw*C-qnHar6e&TW4_^BVI z7*<@)rx;9HfA! z#oL)n@_pTY@Syp~Re_f|{tGGs95?A&Su2#hK>|n~5&m=C{9~E=_z!DJ0+<~(GTF@IMl0(en~{TT zuZG=hcC>ZuQ4d!-fXwDVRKdZ$^q4+*YZ3+cYRaH+cKP!P&qlt3ujUhu)qTk%g~P*a z(GvH(qI$(5HnE|wrTb;zWV>nw3wL#&W-hK=M$B1~8rJ5o+-nC})R4#P@K4P7YM+`wd~GwK zD3m1~;_c$rR;Q_6Z~H4M*t05EZ{>3G+UosU2W%KCx-gkcEZ(dV)ko(Ag-01tVtSB? zC{%(&h^A49!4v}kdJRpup0(j7MX-?h>U)xO=y`OgQ|GlL=Bh|YI}?)_g@HdEs`_ty z&oLK>Z{jw+r1%Q(?%kW`en)wR7WOwc|1{a{{qb+R=FEk6cJ?+m@9aFA+}U~ByR-f9 zZ}>O#!?1m>!-i7J@cnXoYiDe=vcG-j>#aNAJq=wX!YH+Oa+Mg*41#5Zj)KBn6&;|{ zr@DO2h`6c{Eg#_DSp12@<8T&~+^Rxh8Ph>dF;-9+ z3sGtSf*sBLer zVoZq%HPOD4bb~wN;Lb3(^HXr=hyI-x{X1_FKwI!*$BXf^zt>oXLXGKYD;xx*hP1e4 z4c-QR&4{X@uZOB;5ev5yst?1NQGFQ~9A*3nx%0o;oi)WPonX~Ky4~5ki=+O-V@-c@ ztcoytg@f3AB4-e(@Y@Md+&sCcsP2W22UHN-kHSB`f8Ty=*x7}DoG!lo>OfOxLc1Mb9s z@Z;UcT+$R~=mfr<1Z6>34>CkNRxw%byTfN2UpFlQ)x^ygfD2*e9;gcWw!ruAFVk)K z=j$!_XEe~J4a*k{pg>2tNgpArB2 z!OqU__dowO_doZ4zyJ9cT>oFV>er6{u>D}mjsLLq@T;$WzyJBI?tk9$L!VEsfvWfrCc~? zNqGjRX*LZxOL`bTkzd95^E#+d#;gi|;yk@**`X`EaW0(8FA%f@WoT|Bj~wWE9CoDu zEG+VyewvjZsvqCUBVJiie6u2-WT&w#wLujW0GhIsKL5;cM@8`=OG#!zM|*%VyBetK znQmT+!K{Eh1qCT+9Dfo&LU{T0!~0usfV~B4@ow;Ni~fP10sKBImh+MwjVbmhSItgy z=>32qfL{Rs)ZurnPwXf)$Y%CefC3WW;6sYF!pZZ$y_;Nq{Oe-!vN)KI_qO)7{!4Y7 zql=G~g)|mdRlnyzn2xD6}%)t_MofBuEt zt9BIxry~(h8vDnjxs8oaZgJ*C4Hfuf%kh&^TulXoq_SnyAes!ATiKubU>Wpjnx}IFZJ@2=&v$FGp8DOst&XTw z=u&Xq&I+$fK|rmhC;Y(Fh-~0#DO48p;i~q`I*3(!-s$pK1vbrQ#%i?)!dbPqINCk4 z94c`g^}6rqjw3fn$M43jR;9Jk_nSOuT@t^UJ-ck@7Rxdzas>`p%D~vF2d~${1X*f> zP$D6H(L|-11h3_Z4;@;#sSV6KuE5UC#|+vT)jytZg$|JuQw z2s=Sxaa{ZSYC%w@W(B*`Spi-Dh|ki?adwJHH9K|AvMw8{dz9&gr9imM!kxix;N6(# zms;q=g)E6R!~gqeYvY?_W3spL?D*d8uog0Teklnu4WRZ$$-GcL|Lvfr?BV>K)(=My2WI}Dh8sisb` zNDsa^QpW|xp-=RYPm8K(Wrv{ZqBb)%GYLjbGPdn@oRg!4ROb+?c5&=Fw&UsdpD{qb zAyjucdvdk_lBfJr={!h2CD}9?PE*?*aJrU2=NqLudR7YwAfxM$_&6G@iixju&X<@h z0wrX9#>DmR8aTZy76<66j_1{MykEJ)G7Q*LF#gz=fGF!AJn3t%hh#3puA**xp62O! z@d>R4WV##zvlitVEz0ee&3GJ%6hNkOqpl_cOQ3E^8QGYvvhEA#n4jYD@_bfC7Vuh5 zU(&nDU7UD&2^>SZ8mGR_+OcAwCu6ySRv|<1O5kA=leP_mDbSesJ#0RXou2d}c+S#BcMW;45eT7N36@ z<8x%2iJ^9`>J-*-z8{4Nu|Klf^?a&sY-c>N$WlPxgGXA%B6z+Mm-a zu~!3#7Os7Hta0?Fm@Wz6M7P5jrHu070+w4=F3PUtmH_q`yK*YXN#t(JB_jtqS)`XZ zE7{z^JIB7_>BB!QQ`%b$5U7_!LA$EWGM!P?jv`?5lXLP$KtuH4%jlLRgDS~jbO0r@a#3(F_31q;h?SJ2gNMSxd?|IXbyKJ zOiHtW>r9jtte*9lhGyzVq&ED>k)^toss5$cm*g z<{F9;3luG@v`dp>IxQ|JkxwyQp66xXR6*(!*^?jk-bB6auRFoJmk0m-Za;YW>M(fu z?)md>%f`pky~F*(gBMi+O`N+K7CHL|&>eR)tqL@aM zZHHu?POv`jWiqelnWZGN&8(*URmS1QMph6_73y^m*-V7g3k^t8j42swtrUbZ!#^d19Grl3l`y z*-%hdj<3C2sFtBry6zyIVXeI>R5}q))=c@h8ExN>x9&c~|A(b`OBav~wE6OUoLrT3 z9Gh3SLZr?}kcFmWrIu-!dtm>r7Up^++^#q9Sach8e8@Tf48Yij{n|F6HVIDJc2Yu> zPw5HfYgoc*#<)pUCK33(>NbJfHCl?eL`-r_s4Rl;m+N z({*g00J+3NFG+U^e{Z6$V1{5UzqkfsHTO-}oaK|gWW(x5P{!87&>OUWh0_kN<-isV zl<&=r8IjYfE%9`n$X07F$=Y^NDV^#AtOA={D>#0c%=W>ar5Jiq3u50eA;+I9gsFie z2-o;HRK~|)@PCl?@mlu3r~;V+nSZ7!;AhzXK79DQ{qMhz{qKX{?SKCT*Z&u;`Zeu; zG5X6xH~vd+Yv=y&_P@WC{VxZE?!qx5Jxy}fj87?Xa4VW&60u^UO@Uj7LKU!=mf30E zH{OWHFp7d08-cws7sZD(FLj<_^%LaGQE@)ZayV^Jxg2i_;-E!toglKLDaX{n_js0$ zgTwT4@e7*Sb@eab~qB+n>; z2uYU#r-cGQqqA%}*8OVjV;W!l6^!s^@;u{Q7QyUlaaJf#?AZQE#W1e5*wnR1K}|ql zaLU?6j0!+tPSJG=yPjUAqh*zSS%6CVfz#|$nwx2EK(OD>i{&W{0tW}6@obah%?({c zmdiP(zfpD%<6xu^Nt2>x@C;5tlz1L$(qFp$?glO86-RewC>JUd+LEv_D=@GgMKw%D z3$6+Wt09KQ&NcWrhz_c|017#TA|N7hl%OwfxyYu|7>Mlqd2)W9%td9TySMQe)H0|!H-bF$X`9~v2V z%6Lt!24V2Xc$aGWd`!g&B~Xx@KK}>5cJ5YAj_~18l3$@=(;N_p_TL2sbpWL2!R^Q5 z1w5N`TOkkx{el7Aj`S}X;_aWSH^1tTEr~w(s7bcym-KE4r*5&ApQ%X?v zltJJY7wLwmyrCtt8msVP-*M8+nlX$un+)qApX~wJYrt2QLnYzUQLlfD0S?fjIJiZ- z;4HY!hM2Qtl$JgfuGQl88eb$}UDC#aux~#W<`ga}83S+4Q_bnM^?S91CxuA*;+VJ({Tmd+ET8@%gI*`;VXwTT`)EhSW0HXQYT}jTD z_R8d!5f-mSt>kZ=;887TC7X#}!Ksk%DWmWp=a^mAR5kHGku4bhQO~?Wm{sq$k6Ui8 z3lbeUx*!zAph8evEe)@m4nc$^vm4gc^+5VLoUSD?oA|RiHwm-?6_9?|uLq?6AraRL zv?#ks=Hm^NtkDbsL&jdY+)W0zb4(E5qX+6OK3H$iLC8X&KiP>#pkjuU3e$>2jwCa> z`h_L7LphA>WqOIX+*RyRf)s0FDkD#-IaVmtvLRV9OsTTpD~J^c9PU55jOk}*MM=py zq_iC@iFf=P2}dFWIV;Ogr|CMl#UeQ!lLjVk2{#SN=+yXCRUU%(g192QIR%Y9qVlK& zK_*4$V&zB9P;HE=1<+<*ai{E{X&s&<*?f+mUT+e1qa`c6{4dFzkZmSEHQV_iJBAubBR}rpK_&so~L-}PKmUL1Nzxm&go%Y#)q$)D_CUf zQj|fZp|5}g$qPfsI}BH!TdlKd?XarCVe9Swb|^D&wB2_iJlDb9!kp{3DEYa&+pi@q zdwYO1T~o$*n|B9eDB4gHRB3=}0-_75?{li=GXem#!<}E(T7L~sWDVN#7xNwjQB^L& zCfC~KKbxQf@%_uRpGN&khKizy5-w;@&GRJ zDA-!PG6*$YGaOpng#N9XZ*Fj^kd?exJAf z+?GhrRB;R8(J|Tb)>6b#yVkKql^O#!3a(_s>fVSiR|j(I1d@1~(~nB(GYUZYY%Ceh z(y?96dW$77+`=Fk(XrHo?3DZ;bK=c9cqxc(#Z3(w5TWRi(hwX2#7O<lBRk6&6V z))B#}uOq9)T7{^M>!W@B_Tj0S#6y~)QLQ?B!Q(G|ybeY%tXGH1@P2ho?Bb)%v)<#T z&eO-$FhD?o17HXJ99m^B+pcc{0x|1CanpDjB<~t7W~iyiCYcqyr$+uhbq)eHYdae(k!YMFb8e{Y zwBQJ*N=L0VP;sTUu6gcPrE5WBZAMqu=jy7loa%Rh+B_;%n9>uV;_0rCR5n z%&<*pzb;gXAH`Ou`}GXOgjsc={BrNbK87V?2F;&F1x2)3*wec$2KkLXGyOhG)m0tzzw{-4EKGcGrG;gl~`C zw?E_CUn-XNi+?uJO->Z(jwB>JQCV=XpkWy-sZaCRs`T3V8gPJ}QtpVm!Ei~YMpwak zc6vtfy?HUAb(&=|6x-YMA?8KJOrQ()lv~V;@e&{g-vO#hx?GS`ma9;G-i%>2Zh1 z%CkUfQI{w;ESX#hYGWPgMy61;;5ltABuF4eoq^Y$^O6duFV z&T+@L0WkRImH{SW6&kUmyVkwkwL3eK+zhPZY+G>a0SBmY(<%gal@9sgmMx^ zNIuG~jiqAVjwOzBe^>R7(v?}=SrMjmJgqvo-u(rBPdFXwNUcFkc~tWm~p zn`nzF1|S@}dNk3?z^DE(jnP(y7={?v>BuUREPdA}61i59`Ola}&HCZK`gmRCD74L~ zZfH#PC58`z^gZ3cOLaG(G)^6#*<8(b$%|qm(IUkbz`Q&5iRNZ1Nw-xS>F(EStD|uo z2$8mZnS*;Q60QM>)`MFYU#?S`4QSljLFk>gz&x<0G|EGzTd3Oy#-jZip#2TqJo>#Fu&8AUtY7q- z7?f>IYw{=4q;@w9of+~Y(Sqig1MB-5AXd&r<`-B9KtE`h6|dP)rY$s%udyV(zIV)B zY(sHYbfCwJjZ}{z;IGkt)!Y}2;kQNdjfKZ@alY`FmsG3R-n->~w9)<@!FhDUDmEay zD3TIyafa%$X*yY`z0|UJQWiHN!8*!hVf9pE>e|QFnJcbRynhyg@o6(@J)#U}1_=&= zdmj5z@{GiiDnu}Wt1(j!+XmwDKy7SN6E$M=6rauu4CvT+{8>8fTSIOLpXd1M0b1_v*n?!P=- z;U;k-J7Ax;g4bf7V^7MuCF>p`=dLURY1SH~BXU$BP-?(*0G8`O_MMnfw4$!_;ZTN+ zc9|ooejGJ7cdglM<+};|@Yi#h6j zK(hV>|J1GsuJQ9*$Ek0v(+b!Aj3WyxqQsnK%R>yNTp@Ao(rI=Mo2*Na80wl!m+JIH z*1)4;TI4I_GQWtka$omsT&sm3vy>|d)sED){i+#^PkiF@vF@s0taBBnt8V`^r)bcd z+P82tJ$_|n{SL0OgEQ{5qi}_;)5dWp>|77;XnlCc>%jYYeR#iwPKFk}XN!%xGIgF5 zOT03{yR(v`w4*LMGYPk<{x-`;X|?O;MUW;Xu!7_Y{ZW9OkQ2DFm*$`WT@RdTntV!) z_yb&%F53!vE&OE0?!)K^JooIy=*&X5gymVWoR0AjKgllBaeNJ>kQ2)qgu=M6OWvw8 z*!KSA3`E9s3~aT_2sN=B>zKmKK->)<0pidrkUFdajO)V1r*lgxau^_iQ3(%n>)FMc zbSrv++-QimtV_IsrOQ&5hzR8szR~ACZZ%j=v{tF$`I1zfjpz!_mO=e@>V_iPR61MC z89vwce(Pqlx}V)u9oVh9fmedH&CT$|?l*=pwSBa$EndF);xxKm)Kx}ta?qyKL(01y z(+`0-752kx!#`@UA$PR`5t)pdBdaO5bn+#s0^FZxO0iJauvq~(xvhH!H1_+^0-zH2 z4BR1w3-0Q0+m95+XpP({ALF8k%;qE$jJQRS}@zXM`VEFQb ztR85%2mw9KV0c*eZOZDwHQ9^SN80RGNJc`K7(KZCa~#LNAe+4%!INdWtAJF7+&=?( z;@O73RIU=gguXm!sxz>UD%zn}N4@?=?-=R46Z%DZj?}8GxcT8m^A;)(RHm*@sHscY zZD1H*qnXYPrdB1R;+l>@y6W^wOXjMSu~gO4xGmXyf{2TCgB6rxUkNo*j+5eD1O)QC z7@2++%{X4E<>H=eZcu?|JLns)nznV9CfuP~L?g@VMKGZpT^ez~oWwzba?_!|YoP(f z-3kj5pu|Fl99~xVM^;TAyB;M1+Ut68L?|#yb#N6qha`I{LN;N+WuP4|LE1s;B48Ae zyGwTd?1yPwO|L&&ti!6!lj7aHJ0Q99k>FNv-;Ed|llUDTZdcA?<006d14+Ac&UR1d z3;%}bb+(KMxN|bJ6dtzzqqRP@I7CvxtG98_ETL<0KxBKN(I~Ioo2{79AT`x0Uu1f zYl_24i<}FR&>DrZ>mk8`?bG`FJC+z{;vVNO^DE0=D3#-?O&8y$|I<*4$tNk7>u_0=}S8nKWxpYT?bw7kzI z@RApoyg@}Eyi*HBzb#XG%WF*DLf7G?EEjo)%NJfMKG^!D)l$)(stA#|l2lwrQiG`< zXbPX@OC36r5kO%dVX&{C!;lBxy*qe*c<^$79xDb+%qoW=Khhj_xH*NN4BC@vk zRq&XLIaQn$7c>~Nhj^05m*fepUZPE}dWHo>o5NrL#cqj<#Ktrm&XYO%Hjr_-N?buH z>oH?)P*vmbKxvug`YsmNJqh7Pyw1U3xP(Dtp#0igzE!z>k0g-FX}X|n+D0YjsU4B^ zkcUEJ4Mc&F{iTEfL1!I3Hifn4Acfe80e_U+CdryTukt6cY?F{_3yfMB*^?l1d3lNV z5m2%8#P6ga!n>+HCoNzV4XBccFex8KzF@PeV}mKgam-e^bBvu!RY(r&@-vXK`E!+7lPvTT4ZIs-O;YSjW_z0D|*&VNc;7U^6ab4?W5-k zkYUW0+U+4uqOBQTwT{}Y{w7is)&=q z3%XgbM5aH#+Ta_k*r-nTWhByJ%99>&oo& zgVFT=(RYFCyIiZyaZE()v8KUwU&l_)6QWQ*y_|43>tKR z)D{9P+}yBAczmR*TUyK9gIib(r*u1&*T1RF4@aKE$g}1mm1U5sa?e1N(itXYs=}GC zRDc5~0dipBg=M=g#UTf&3%ycF0;wHRR28z9;>TJ6yp7%!kPdGW`0!btobRexR3XTt zmF`_1zia~+BU3!92J91g!=)isnelu}cz0yqg&tc8!YG2hbnR{e|GWF)!ss5WMtPi({_fv=Nfizi zX3Ir{X{|h{FpF@TcK$0$)Gl3pla|Y=$m!&g!8) z8FDNs97awrgtmC&>!7f5S`{3Xv=lKm2oU9r9+jq zfV^+{OhitISMf+U9g`|@8@x{NgA>dUTnHz@; z?a8Z`hp4VO{OPr!t|6sKe6~2Bh5>L+6mB>1*SD0@1H{MOI6Q+>^!^)f@A@jkMZG8fPv!@siE7vS`4{&p(37J< zZH&8Nm8T(8d+aWU#}bh8OD$yjS-(0}vl8dU{5+Xve^1>QYvg>-s&~;y)6S)LkI3%X z3SZknH{eEfrg*fY$JXkVoxL76JZfF0B!(koMD5bB+=~J@6=5c23g*J^EOK*eo6A8> zi1*N2%@zT51r4aKr3`O`t&0$q&6$TV zESJ`osz6z#7$WK#UP1Tfb5zpir@{OA`<${UTfVBF%Hp4Hsc=B#3CZKFrV2_UsLOQn z{&F~k|4l;6Ew5t!yll$FoDF?kA3A`vA}S;vfvLo?@1SagyI2J8Z~G#SXB=@nSc&<0 z`2IGn0_cJ@yWz(ct&Y9%MiDH zOtgl|7QL;ROu?~(hN4_G3ZHPP4cvpnZoOtVfQ|IyQcS{DdPz8|)<3%Xa?|$nBjxRH z@YcRD$urLICXopZyUt=^$9Z*N#GIIC1)sj9J7HRB3f$F@7mFD!!x%1Hkq2-PER2IwmC>@Zm zSjCn(1pGRR92I__5=U24IOi}GX2AU>9T0s!;ueqBQUm!Soiuw9+)~IU(qKX%jf!oS zPA1Z2mvXk`S6vRw!wyu=QJ*J)K#m96a;`dio4SP%54vH;&AM!at_P9B?e91PyrJE^ z+N?NLAL`yl$r3}4SLX2vUWrwmT_}})eErm=@p*YF8_WA#XLr;f#3}O`q7E;ksTX)7 ztpKp6(0lJGl5eb2tuyJsIAfmVZjJD&VpXY)UdO8x3_Jk=%d)kG2Jn`VByl9E8MUx$ z@Nb#RN`1o`HuYNAlfAIt0-V3V-U+88Ap47C`XPc^=IV1fi$y`M9_xvG)aMiDY~s_5 zdznR(R2GIlfmmyXCTnx7yGi}zs~2XIHfd!g&ou*QB-Ganhd}6bVKtkl6IceYAkfVO z#2pSwr@H7~>~Ri57#?n@I$0&ZjY3_^Sgw3$$0g!PQMd-wugD!Hnc|KEn8b5{M2t8Y z!tQ#ae(|C$I8h59jIq0%JMU9EH`e;%Y)%#-Drx6u4yH>`c0*t&(6Eya&_hSl zVzg^jA0?x+RGrlj6hDG=z}|s1HR%9ApmwnsIqy{k_^2MOlMS`xKbP>QT@si%p(7oU z!f`rWoZ1NF2TxTvQz*(|2h}|hzNCvH7Oh;lUicAL z)zJ(=rjs13nGQdKWWxkhE{7XD$EBy61WM_3q1I{ImoIwNVv$aSpRQFAD{%Oy;j#mL zjrDz*!+P)lZ5*Yw#*FC0y-x)cs)AWL&gKz5`XlIv@Hy-f_!T3N*5$?s1U=+-4As@I z;>$)MBgA@cbIm$dRVxQV3A;^~+tY#quUm}N!QO`qy`-#B2(mpF(aO0P-NVs`4Kd`! z9WzowgId@ofg6z=Wvy3)fGgf!#vNO2s9PtDHH2%IA(OO7lOd9#p&a1Ot!k`X>!u%$ z{S{Y5$m0M_!RP|VB<5;bG@0;>U`GbH#blQqL+rrIj%$(7T+`;7fN$oDoBr9c?v)u)l_aDhmmBZoM}Q^fk-PI!za_ z^if|wDB?6px~^xl4IU~0Yvxv134eX7k?OLn0hC6XX6U9uq#HPLn0cDg5Ivd!?~t&G z$he>KBCMvsxduZes6yuYoGr}-);#DE2}#(lw;P){~m>x;jwWrQ1!IkTfCg1Az$?2f`uKW1BFIw z%GplmdN`HJfYqy=^_!L&(DKi}bWGpQHMTNbbe2ICG*sLFubti`-u1>jtpn@)HPgU3!2R=Mh zqil&%;eaZ#L0+qNFs3}`lEyt!d#oeEW3&nibi!$9WbbrBKH^?Qd!ucv^*LL-jpLT@ zm{~@svNQU}-z!aiN#`oVN4@IMfV9<4lpot~qNVQPL1f}y!2_AYR^26=aF9;vRB!Y~ zZh@qXEfg4al>lHm&&J|B>k$o%_ht>nDC5}`hz~8xhJZ}ERKJh<$F==^>^AN(4v^|F zS?911L}N@JFfK6Cz$esea2x?B)pD#XXr3)0U!&6QH2l6WI3?7Maj(HN_efyoF;=Ys zR}IotHiS;@I2@w%-yT`jNLxUXH-OGGscLmPHb(6^^qwsIL!LMx98l{YlfHja$2o5a zXjL(hJg`u1i@rkZU9_&Ffkth9{iLNd@Ukq)Nr|06#k{C> zttATG(VmjYT8J)aw(2o_pCbpkgSk4qUzL4`Y1rLZaKNbqplnSUr|hP92WeBmb~7Zh zhCpF}3bJ)XV_~sTF^cmWZw2p5)XFvh7anaHRyBv^0z@nIHX;2^AVN%iwyJfeDm8r% zJ!v0}ujG?dLpV+;`R-HYsw4a_X_|@NI<){Q9JkBRt`&w7IMcdbTbW|62H!qr(>C^g zG%fr$RuV5uhi{4YRScbuvqvjWt8}zPlS_YTQGV*-lqtsRhd~WdZ+|6Ga>fO@g>cNh zoC3RC>1~m{;%h0B^L{lDO~ejmd>& zFNZ$>4d0X{9>JBL3jBO7UOVvk67yp5;~~4|=t}8)VbsY^$x&u+d8&SJ!SCRp{Bc<< zv>(iO8Hk0{hwo79E)SRTWI8E5fq^|+=A(AE{bWj>OaX}z$vox9oV)qNab|j&jta7O zq8d-dU+HNE>yiC;o@Sp^+5PD>o0S=r+&`Z!uF#oE^?m;`9qEGFL4~}0fE~>V3_N4k zxxzEp5LJa|q8HRoN8hJ80>LWs_^dF09Zbw0d7kPi`JRPvghb7JPeQ#0%oPtNP)~J) zMM_owfZ~K;p+B?WhL6vQ8PZcaS-d^VCc3L<7T0-de6E(`hk*ODBz z6cxQJ2<|Ha@mi+zH4v}H;On_~kkLaqxE z#W@#P4&_fdf&<|~wRkwc;?;^@;m0%dKb~G`zb@sF_AXE6SF(EOnE-O3AJIk~Kl})S z8vgwWP0r!#v3A5X?wL4)NXK@Ddf~m|LQ5AEUpb~zb-@4&+qhOhCqN2rm%$kU7{M~9 zx|t!m+(o8&mXzc^KF=t!RmCr?QqXk8k|c*8)RIbx($ z=H|u4Mf~?yy8__%Ce z6dFDG3uZJ&4xM5`A{8$I8>3W?oCMw6FH}luHTC5Cfe32f(2$)5jyIToMYA!izE^bc z^o>oSltt`f9zlIJgoOvKcx0eHYgq156}|=aMDtVg@(I>7v{%9*b$S(iQn=F@h;gC{ z5bPIaFoJb9hNEz7=lP>0hRDFB<<~912Q-zzg5;oxr3V8m)3YG90%j*>p5%@?1Q0`G zL#a*GcdZ6ZlQLE$T%=g2;eo#p*ba zNz|bPS@t~I$`z}>-&?c$Yjv5prMr@A-Nq*oj{W(-DX*bXOL&l7t$vG;m4jt25+YYr zRXl0Y^FkY-rV9#}JK)!zB;~kPY@xS(r|jPu2OA0#h!ZvbAjIPhY9i7+OUG5{0@rq_ z8dArjonNJHN;9FZw6Ly@Vm-u*CrSPtIy^`7aAZ(aK7qu0&)%(e` zAy!?YVbU+KVNaA>TJoz&IGrTR=|XRawEQ*pD!CxGqm-{p9R@SaIhsr(-(5jOXn?g; zfwGBubgbiYJwj6ba8>MeBC?$d#1%*aQdV*ro+Qv}WPW93f@5d(Vs2Wnh^>erqwQ7S z&^Vx+rK4;o}NvU#Z-`^D6R&0?tj$){v~VxS=AY=_S4AQ@98i9EeP&?@w5 zi`7L|-`F-7Epig}U&41Nb}lHrB3Y&(h?aQpkRy@Db&3YbjQXE*cL>c&ONtKt2BLArB z8D)sBMO!Nu&2dJQaq4PdH;WA8qE6z&;H^+>SFUvSAjB`!){a>&o)#l~#o2z~!J_DM zI@n1{9N-|7O($4N5qi#d{<|q-U@8VeXlKy#1`P~R}z7Q8cF1@g0 zD6N)Z4?PT=GPmipf~z>{yw7h~BY`Tn6R6X>Nu}MbFRYgA`YX2BB(h|^A9-S>V50f*Z|D-6xr~<>bR_CPbIegfv&kR!L9q7qH`<2NF{K`dzYjVNI=3JC>ilfg`DLv- zRd+e{`uqGtn$CjTRg(4?dwyzno64LP_;sC$jfWr@0~i%^dANJjJMJ{jS%rRdLt|Ap zHq}}S;i2hVmavl>jNlPIJhn#h$LZo*eEY}K#kZ@6;f0JocYeV9$^=;-TE=CuD#(!Q2Hi5Nv4<)Dgp=p`FoH)xkI>bF zr|mT>>KHsny>77G4R((E0nB8r=Jxx1XEhCskp>Xlhi>qRZc?kPtgx#Ew?6>XpAjj) zd`_?4=kFJ_j`7sc3~lqDoi)}QhpVVI;Ef)oEym+1-z+qRux>yNP5Sh+P9rF5)V54+h#bV+eQGSwlk4kjA#V2 z*`;7dFsJh3zIyRMUWC`Rx~-B;(TBfTA9N2eX_|z-SkWttu0f6B19yW0YS24+;+H-S zSOqn1z&l0w?8%09E#XmVV7^$MdGdNyn68PJgFsS}repo~usN}BNY-4p-0Cl%S#iUM zkd5Z@?OwYHgO)77M{Q#(i?zXZg1_3t<8{MtttO$2yvQ|cpb8@duJVp%R0#5cK@1=1#3a;NTRkT zzGjO1ljyQKD_l0744f7w*x&vnYG1Zd+MAdSU9QI7T2-rQ9T(GSGG8rLwK0)6u-e<_ zuv=fjIUNmrQq&xvC>+}iP8If-HG{cZz~BGMz^f7R{#B+qX5{_^40&DA@g^sU=rZVD z>1viaPD9P82eSG0U6RmUiN05tfU7Gy<*g4J4u#y(v(|VDu?2}Q2;hf0b&1`_yU`tRc_k%Fw)buh~=x>+4k~%tbfUw zxvz!s1qghX4>3I2^8yMBBB%Hqd;(E{V&5Sy$w3WR9;O0LC+M#4BpG)oLdRb~k{E%2 zR-9*lM`#V$!fbme{dLyaLaV|?V1=K5`3F=PSQx$9! zXo>Azq}hB7lUAmlW^UKBxgGWT4~~^soHZSy?l(!->frQqnBWSdFsS`t`T7LO#=#T)NMLgD zFUB93VEHJ#a~|CJDeS;tbPMVs+TPmwrW??vWMXlv7vL9#M9b96+Dph&_wk=e*f=R{BKdr^JH9XY{lD~;}llJ=0$m$ZC<3q&DY804-{QwlV)Ic^|@_q zZEZcge?Rz-ujs!3e{XHy=fC)0@2g(W>uvXXUp=^g|NeHcwY|Ny{ndX2m%jx9mUyiS zT^TNtt7$QBEC#@RCasY4|cZi zZ*M=ikM9!oZu^`254U^p?tbw0$qxt5e~QQHr`_$H`(J!9#cb!~bplN3iw#`u~Uj z2QmTO$?`>OmX@t(hR#vNggkF%SDhA$4YE5gb94fQ?YWW&x9BqH!vs2z!?#hfjFsK10$mu#^jQ*Z!9#9`7);&jm&(s@G>V?QG*OF7i2|1Um^<{myY3&0FjlHWb(pX zG@`qw_f_ZVEWv%YeV;>h&M?lex7go=wC{MKMbV|u>TPspSdyd38QwG`e5Y!jLEzAO ztU8idMspkJA!%V0K-jpfdfMeS0J;%sb53R!=&fcWmZuszkX~ks2wUco@kLU;U6Q%t zWI2t{59wb~{>ROs{CrX1Gvxp6-q!E(|G$m=|JCpE|G%L8|Ani5UHSj3o%^-*zqRvK z?|1qCxA^bYA2yfed~=xPo0zJv^HzLZE+$`-lU(}{xKpv@C>NCI4SthJ8-IQQ zq@Deps-LSzYF$Tw;Pi1I2MpQ8vVA%)mgJgB^hl)_XS(K-WCp@r`gSn}oN=x@dQs{Zujj=?urgDRSaUJIqK*^J&(QNZ9V-{u zh5Y6eepn(|E{a6?>tgL<=OI7gB%UYp598uOzLjY{2Kj5+Hj>TFG3_LtF>yM(0XIP5 zYZ2j#qL`MgDq4y?uvf%XFcNRM%Wp6DH``s4dt^b){%%V$6IqxeN;{WnI!N_$l_YO0CPZ$zXYV^6LdbC6onO4k`a=l z!G5L;r?KD>T^Ajnc<$85{X+U=S<2GKIBB>LD=@{~anr{b&26?-*Cq&(cnzetuKRPR|ogj>w zx01gYU;w%GVKt8m=i(nWB-_&77l%B2q!t0XKzck4Fqq!su+B;0Vg2rF3g( z=5UsLN|m=MI>w-39WK!m-K`Tc;gn#tP(r%Mn4uYE-(spcp)71zUybBMt{k6H!pj7$ z{JT5~L<8NF_9zWyf9IEOFx=hl2BY1r&Pi~QL9bNMDhweF{ek9iVHG?Ig^LQxFzjzD zU>%d49847Mpw*Kq(~Kkpj?(xvuJk1`D&$l+v~%Rl=NVbucO(Eo1&g~_QXeV5SvqC6 zB*7>)fPwC(Dj#s3k24zj3H>;!+$bw@v1nqDEy_VfgsV{O zVz*i@@io|06h=+J_G96>cY}I}k)N&Fnvwh*QYo_cdAtIlc6B2Vuoc}Iu`2F5Mxf^t zD?RS4;pn0|3RFBYO6HV(YTV)`bX_Xu)R{c zY_)-kCAiT)m0e2;*m8n5V;C~qPA&R?QBKbz`Yob-;;mygeq^AwO2JHTj@Jm7rohnO zqmACNn}=t0=M3T9g{b>gPhGagfOWjTwUDTh;f%XfAqN&DgjHf=OHX@byd^gm)gjZ{ z#gNTJ18!#*G_&BUraDee23-|P)M!OUup30)p>yRfQ)DA#RjjiL0bIsd5o*;!5jc6G zZUQmM!UVN<>`Q)b4B1L=9zv695fd=w2@dZ&&9y+d{Ck>TR~PRXP^$p@&mhYuMerzy zw!04jA-DLnDc3Ax5R+4OV}vQ)jvuya629+*T1|-A_}f&l)p(rtMJyYf!6_|L^F?13 zZut}a>hMbpFk6gBi-gR5k7Y#)%aYE?YL+Mwy%s;PoJUE(7JS`ADQ(J59314M>2eIa z>cv?$Iz#I$;+6n`O&y~WX^MQ!pa0Wpc1$vL8>AQ`*xnsj0@YrE+1!rCc} z?wCq!ehzNPF*2*WFkOJ+pJN_9lj7aPS*mv6kM}Ievs9_$>VDX6#tiYUTG5e2bsIr* zEBy_lo#W~j**Kul5$`X!VpUM}r@9Y$ihBpgo}=`4)7XjxOG%&jc2e-@L0%QC6<@NX z$i1oz3rX*wym!%Y!DIzP~Ch4zE(`dtwlnxr$#(Zsap!QUfbSk z(<4}+5fx*lyvq@{)`)S>kURd)Xj&*$2fmCBunL2K?Nz8D@RI|!SR;idO2EQ;7LDT8 zgnMflw4+GpGYP=;!@D^_)a{md&hr%g=V{+{i2*lxq6Exdx!`2NE@Z9nk25%KL^ASj zpb5ll>7)n)BbzoSlWgHVlH?nP8m1Iu-Yc15S6QH;XdmjPDOzyZnl68=0)`^>P*EeV z4=LuQ<_X9qqasKBq*f@J9XQc53WP|8R+!~2oqKqZ3Y_n{8j$TynqVH#Vgi$tM!wi2 z=v+`v({vWa+l_%p?%T>Q+oM4vCGK@1tHEE%8NBX&cC0|E%dV{w$PO&ZCgBMBB;N)j)QG6fS!8qP9BJrQT~N|SLMM2DC{ z!;06N7nHROetbX`T%yX#kqs8%43Fm+eDaY*!A#0o9w*RM*q|{~6Q(-e+nK> z%Mkvz$wKVrn{<@T(#`u{?R@>!*N^KrAH7J8+00{)U$!HP^W{xp!b|08R>G1RokjYZ zF6J9TUs(ASkv6T6%MK_zF^E4jIE6jZM?!D@I6GFCz5;A421kac2}l-sR8OC37esMB z>a@HACL6jHWA!Ct^bbl+k~JsZx=pzX4ny|)g0F0rPV!a9hQ8i(2VN2ckGiIMuJdx(->@9qH|AO*oqN> z&E7+KTZUG$HcTl(%YIQ>8Rq!{hXS`|xgEmoe;@0f0INlSsLJEfIGoZd?exLqq5qs) zfEwPqeO2W<^$OQzNkjs0uViC$@IF4E43ULBu6kQ>#iTFUXFi zb;Js0HZM-*71YCEavmYZu%a!pmWTP-0c03*u;Q{pI-8-v{Dv}fVVPH^i# z|5Nxr%42^rD%7+@QhjWyYbsEpvJrKZ821cj^WyTV-xTlJnYwdicjhkQnIkIGSd;$N zuC_zbfN-BMj{xN>n0Dnn;mDBBQR_8F%l;v&z08|X$vQ+vih@&q*Bm zEm=3P1L0{@6!$ulAq)%?;e?a-?|aj%Ji~~Arv?4TA8lT+u+%8{A62sXMNFntW~%2X z+v0r9DsS7Uf1QwPB*xJeC(bA%?zZ}c{!2w1Cw$;0&1}fScGA7bSaMYqzhhQ z0Q%qv3dIN|;+q%p9=~MwaYBu_-Jk^>0VJxHKD`lz(s(Hm_tb5Esa$R7W?rKKu|R-i z{Jrk|?t|{bD!7#jcLk&G1`oQy!(*d0kg#)NW(y`2ElTliIrScRCiBLRn#s~xOTA9> zzLxJ>8e?gWei^9EKG$6l>wnRo>rXnKBuO2F8f8?d?ggd0hwMg^SNiXhOSTS~H<4FH zYmpS+9HO)&;RDF`bS~kN%f%RjswXAI;iuZ9(WAisvC;gG2Tu=w0OWkNMd-(l0*^tl za1IM00{I5$*0{-3Z0*j2xHq|TCy4Ic9dF(lcPJ-IG?^Aysk2Fy?)J8}sD6Zpb9x1< zOo7zx4U>6EL{xp#&4DC^WSTE3*msHEsD<%0IaLZR|8BJNt8~LS(%PK zL&OD(>ijLMQHAG?jXwQ<=oUvHb~n>eDY%lxI?Y3>%BsSfc_+9V&@64_6|*^qQ)pL} zA7`J?GarA!qTAAi&gSUMhCxeo8Lo$AYLv6JJ?`s&h;DR!ZmGhYP$8YQ*f6w)KF&er zqggZ>3$Djoib-pOkhiu*jHy!lhkHgTczJJ^=9i>#iqOyj`tx+y?C-r@CVF0c8?;35 zJSC3+w$oSt?D8?Q`%SyA(%53oY%||_Vlz*wMyeZln@r0>7yOhZ zYzvAE9Hh=DnJ&&KM@l9(c~Awn)zd;N7Rz8`L(+5u-Ch+-RR+x}&?k~@&?Xe<7)Y~u_fs+6arVhPK;V@q)(^MKrQK0T;DJ#aql9RC`} zxC0FHD2KRYigN#zGMU=B?!2XDu_k_JT{(;hA`-g)-!kEl(zzz~v$)gi*K(II0W)wB zdORm5T#P;2l9;JSb=W|}twk{|`ay{qh3xF6!6O0J_vKHi5Y`Ra4#wK|a97Pb%~riU zz$|#|Z=%AWZ;-x$R-T)AaiOseXm71k%RD*OniPWr3<-BYlL%+LYf&NysK_5kpkf!| z%3S$pDD$RH10$he_lDK4id;lpHH2|z0Vfrfb{Y#ku+NZQo2JSN)oaxR zy;i2gX#0Wn{AL&c*G4Juw;Nzpn|&LIEo+!XixiF8M$8@^H{TvUyPfMTw3-bVotXMXf(O_iJ5H%1 z`E*<{Zi6d1WvaQr1MyvaC>z;}!BHu-9Lqh9YXqgAoS!ay>>vjckO_3+mROiRfzn4%`1nMqSMd*Jsecj_fTQ^Z zY@;5Os5Z%GOA`p4ijr_uNymT8{YoArrp-^yY5|WW(D%*;hJS|v)Dx%308DhL!HRN) z{3av#lb)vY_0T1SPt-91y-Vf>vKWaY3|#6H%DKgGCu~~Wx5C zjmY&FEj!KV@M@+E^eSV$y3vNhBQIa}FpYpFa?Ejb;loyC9}`6H4xe2#6QIIRwX*!$+IEmHhBq^CwS=HZMDs@n?A zv(daz7x>AkW0gahYE%?U=Q2(AC^BK}_TVrP_Dr}0w!2>KZLGar@0DA9WBt|%u`uwp zVfgyc2&4(d*4CB+GljLkVki{^rvo>vboZfdF{k{Y>(|p&;dDR0uHrbk(p|)7Pw`*; zqK>+R1{C-X1;cfE^soWe z62vA3^wpLhke$*p3~Oh8K=2vHN=6;t#Tj0ejk8Z#SPe z<|ZeEd6W#xDDQ|$5dKBr`Y_AW8=AnmeDH@|)DMW}Ei6Op3@lU;TFqg(APk!S)Aa(Y z!5p?eTLY}Y=@q%cI#QLj_*$hOT&q7+mk%L%3UAPz8}ki4+@gP4=JgWp8QKZTzIlS4 z_DMfbF>D>aHeN19@JY`7XtNIcRs?T4u@#l}DU`=M|90QV1?Y!}*ujT#k(|#6lm#0* zdOdI~wFYzoR#`nZHGs9(08Ulot2CLDD4WOUMZP$bXTZPCNeR4&G9X?A8}zYytr#b( z^66Dfb9qw&iVW$F2Kz3;vm<@GsnK5MT`C#?~Y1ja87xQ%h zg-xW1uz5rj1s;cWaGN+2AFH|P6flWFf|JF{3%TPQK9GIaS+M6 z?5u$mZEPCiz87}Ynl=rt8=85zbtCw^C5K%Hc!M=}6BK?5%@tq~?5>B3En=co(D&A? zR*Lmu1J=MpB!)`BC2DQCQ()FY1oc1wstMZmz|1OyBP}QaqOiUWI=4<`{WBD$A4i0V zy!IsO?j5!Bd6937v(t?7FwBz?`DoGZa>8|eu|foR`1RJ-#)AhBHhSB=ZSyz>@8LDQ zZK@YTf%v+5K7DAYvW1U0uQXlGlZzjXRGA#om<%0gY9XW>%rLp~Ja;hjjP9UHJ$6hs zT+GLkuKq+SouD9shP&*#LrUhqqQhsYta11{Zg_PQr0OoVD>R^cO!U{k{5FH zf;Z~;CY20GX~HCqQRL3h-gwNxAvpsy9e?rY8=T}Pj?s()VH^nER1mtz_KsO-d#AD3 zeY0FwJKXq6K6v=m*WYZp<-72@3-252#anh);^w0F4MPV3A3s=C8hcc)fp7h_!aMQy z1ox|^#v1x&3-$1&RkKGzMI0vw6dKS*cDcysxt7|(nDPu5B+FTifcK3^mkaxFT+XJj z;-RR|A*5cuYgm2H?NyR>d7)I7;B?|)5S^obf1`J7@5LG222tqGC{&T>5V zMQ~OMy9UoQKggQkRjZc7Tw{T{-mjf3Vm!Ohx0WkMD2)zIiJZ6UyMf|)3A5)c9S0{> z;S)3=nx*rpMDTELrxeqwY5>dnD|0LOf8bkv`hVbCvN2I0vZ#sk;>|rW++@n_J=NiJ z^6@kc^7J%8M+fZdFsH$u6zSs`3Ac3 z4Rqz3Z%Br~f3TBV+g8uML7V{eZ={RgNFTqER=@d1^-^_nOKJclsIo?fdW0Op4xxwe z;~=P~N3Vyk$FC6}7_exiWvEf|;PI{~Nr|i6!Es9$)bt$pD?_6az(JwhBw|$N!xtLj zHa!wvcIXjM^9x01VAx%O(IFtRRl$BXo2O&4W9-s}2HJ9fwAKX~3-?;0S)VHPnes6* z&3K6iO*W8&k;h?4*RR(nB)+T#TcfUBLRYuRnhcgE=AJ~U0t8u8l)%^FO{${SWH2`s z5Co%oW$=29o0>>$psLx6&p@HF|FI$%I?8E@Wr$fe=T{4I!#F_$_7i0n$)=EnB&k;x zm1q;0mkabV^2uhvy=qD6{|J|%vpO_}ntROD#c z^Rf#E85k#7u6&083x;FO;#vp%kw1PK&+FAFbC>nXG6B7(^oBPI~hTHc(*t~ z>q+iyRJjy&qIhe|4Gd`@jIXgKNB!Q`aZLXCCbg4;&Xy8p|522TPQKP)h z8b6{F3ai#$E9^q?7}bP^Vyg7pYud4H5FUo*M-)UFMG6b6V38@RIq?K)G%d>1^!gJ# zjcUuC$zH#YJiqYwv$G6SNd z4Be8%rKRZoE&o@nnNlnIGw!Kqd-^^n0uJwqvH{xSn*-6pn5pV6Q7(7+hQn4=M5D`A zOI8{*BvjGT5pWmEuCjG2z^FNVh&P?rnH8{qZ?GNj^jnUzhX#RBhKMZ;(#w&Gd9VhH zuWYWdVnmK|?$rz6WVoF8dA;s{AXx+HO0|y1Z698n*t6kOy+M|sf3P&hA&Mhpq!b$j zgpWyf>65(<$sNb^%TW%TY}}{Bg|jr9%Y2m*>ET4cpKxeP!KO(vF3jc zlPRqeEPDX;JRwHW%K4mP@NkIJfQb17kAs`#VmX<_vnw$XBwF<&d?BXKm?yPZBGxge z6Ux2WLXf_4u2W>*nvl`*?1JX{pZ={R5uAj5j7w9SWH z%Q^+E$*j)g0b)EFsvR?Q!%Vq;O(M199gT{{2F;t{s`%@*a9B1*V^ZLiedD6M9zm_{@JSrsJ$xzZD0LKi zEp%)lOkizOMMG_ZF z%+3ZwM-zyX_jVjpa$WabgVmzi@KnSHPo8xkHio&~oMoME^z5J!5fCk`+ue!dyQ-r# z*l%f0JJx7OxO*XSc~l{1g) zgfd4POK^^=1_5$HU)VD>TF&QbzK{v)H1V+o^02WRGwPPa%!WY%6$0NUGg2bqq*PB9 zF~5`Ww>yOYwX1)E2cabq;}TDAR9gD634O_s1GMrSFU5pfKxQ7DruQYaieEq$J6)Y^`D7xeeAUZB_e zX8Ri}FxiKmV_u1u?o{@hoo{x&{%Yr|`@n2=9#(bq`#N|_d+%v}1FC*Wl#y!4mFa9B z*Z`$9oXv2N1hb;d=q^IgTfcvWdH%@uFP@m0tAQ~9zFbC0u)U?P6qDfyX8rUm``d@< zd0xyu&Ou21ba8q0cP(+^%%|h(R*Sv9KBB>jVVCf+OH!A3=YF)2ub~(hRMMTJ`LT6- z+ivr&sjGC*8V2X3)tV2M^Qj%oA&ma?=nwCsP9*WGqW5p_bvht$sX%=yGYmy{Y$9tC zs)3+?IDGLOh!?X4s>BI%%%zA7`h@`Es2M|8k{_^4l3%HdHjGxuASfOW2u=n2Z_?3> zM0!4m;6#!72XFNiT;&C-d`9D74?Rdns*m&JB6#=adC8IA@(=2|Dv~JMWpJd%3P+4% zg^-JMIJ=6A`RS(d)nz%9cTzqp>FM0;mqdRxwp>1j(5XSL8=~+LYE1q(U3_aLVLW{G z^c8czYFEIs&kR}dB3n$;Ke=FOqB(*__~?D_u|yi}xaI80ZFxe_O7u=i2o&K+2g!Jh zmmdJLsGgxQG&k=8lP^o?8Jr0zV*%#Cl73`+y?=z2&tTXZqj(l4<3 zL8=Hj9ZZ4~Ov(jxH_1-p@_aD^L2~{H{TMPzI)tvWta3oKa)cNjFsmhm=-<9Ld>wGj zIC!P}LUi3JYQQGSY5Igxhn1K+OwH8^``;|CW=J!%1=8gf8Pa3qSShzrF0Q8BG*m}- zq~Nd+v58BlARyyxGD%T1M5~oEir#FGRmx-dma~(%*;%5>QJl=Orf}Ak8!#0!;^cF4 zyE<&X5kDM5UCInGiwqfv-nd<c62G#t*qzamQg zr8h#nN}jAjhF*4#ueZe0rISvTX!TMqV@=Ypf@fN*Rn7p_iokFKGKlJBr^1c;I2xH~ zQE!L%TK1h$Hp|44!vKE|!Q8`Y73Qj`(EeRA*6_u_i~WtiaO^0|U(t)Vw1y&lg8G$w zv4LJR_z^kNCWVyvmaMvf5_jJnKHK=ZUIhc?(RR*YGfaA!R>d&EGQ79=n)-HFz!$t^ z72QIGC0*n#vlC3G!B6zz7tQ=S+J1C2s-{&w9)(XvA)YR(HWaeWBx=N%V$A(l`X<}> z9M2FH_{pAn<-}-B)sesA97_|0%~{K#sxSz}qjv*Rr-=EVj<0*C?2A#YX@Q_Pr0oyQ zxVRUzQK64PUo_oHBb^F+%mWmop@nI1R`U?bZ=iqIRjG6}h~(z-_}O*?1Kv2Nl;+Kq zRFSY_N9CskH7nYZB1Z4V>2P^En5MAxt*A()s8HAqY=bY3;V~^&p+t&QIa*v+jm4(f zumTFdHKyfbj5}7uZ@Yxmj-?7VMq+cg3K)9A8e@r!DmEQ~*|cz@LKw|R_2v>% z-)vJ)JKaafOf`03TAaelP<6Vxf_b-Sj@v6>RjW_SRUKb-5MUSHEc1E`_fNjLVWaF> zmm{M& zZ*y4s;^!Fu!J~4g_3xkmxyt`8tN3$ML4QX6_nqEXzvqAdx5@wh_3!!L{{`3o7q0qs z^S=XqJ$UHme}AyG^?UyJ-{L=1mHe0vgT2=WL3A-k-yAsR&ndU;k8i&}=(I>WT$S=Y zz?|bK-?jOK+UAoN=OqVGTD>Oys-z`P)Ju@PM@8`=OF4BpK9%a*XmQz=P(1A)4i8_e zMzDve$IMg5l zeU~T8#aS`OINCg#`7j%g+0y%E@q(_;WJG$T@XVOSdm=(PP{363z<&=%LCzshmT{y8 zd6r;|Wi_+IWIV5WuylGbF?)~~=eh@g4vi*wPOKh`j2@gy4`xOWCaMR5HefN_nS-3H z%cq*Wgj)G+%5RON^puc(bPA&66QWl*W|8GZH(1W6q|=}$Jh{@+S)#H-pJ`ct*j0H^ zRZ@Gt!0jp(Rae5nbTQk|*R2*WFzb3%&Qxwlbbh_$Sa~{gH5`DHT~&hWWwu=6MIEt1s8*{Su!aPZQ?>kS?bdy|r##xE)#$@$6#jwpP)d8MRIiH}3I#ewaboyn5 zWY=rjOsvyn6pU7y%d%4NW1?DQB)jdlp@;#N5pzkx6+T#}zz-74#ZV5}7qgY7EW)xK z+XxvOWT*Wq4vp@6?Cr5}l$G3p_;$egV`WS<4!*G<%;3>Pe#7Ll#kXm{n2}RgqP5MH z-iG_faK!+rjheomR>$cEn#)ypL+Gv^k~2JTKQi)HJT2IG92hLFQd6t`BAWCwUIw1r zDNm$DRMpAkG)*_51phH-+?Xd#JMn5dVMvZX;TGCkjy_OEX4(ZLgU$FVN5^Wq?4qj| zc~OldwlW!Fqz8@9ZV>5Cb!Fjox<=TbP?l=gui$mfuRIR;tR7jf+iA6m4}i{Bf5x!) zCjGQw2MlP-Q!Ak*yBfMB=qI;MPkn>(YU_hyd)1>`V_Sc0>j=DRH>*Zn-L&lmC$M`0 zv3vq>uhaQ?lEcA%N-5mAR=mRgADE|_$KB8i2gouRmc?|rNOxIzSW!m1ipOn)n>%56 zRR{RWE2Ze3@7_E&?)y)meY!S7HM4mMN0t*%R$mdz%`oL8t*WaQqINPYK;r4}-PuJk z|KPY%y4{7oObfj5P#Z}M53HgGzQ*@o@W$2E2ge)RwYAkheVPi0isl}Osu`yDL7H@&~b z2#n_ZqaYrwL}0vrmHw;o7<*15}NdTUDkO*8OS zpgo2(`hJV4@U`1OS?0L|Z48Z3sgYJ^3g5Jxz!?RL1XAWCU};bduDamEqk#&9e*JW5 zQLpVZ(Dw0~73}46xCW>BQCjI%b*6!~Ds<@lL+bd{fFGtVVI_^&q3aJi0yPnZ6t4T{ z2&O`-sqiT*hmM6{C zET9K&hm6o(xY^Mi763*OY4jnin^0pR{rzYI#A$VGjaDQot(s-NqL{gQ-D;VqWccvT z{%K2W_d#f~&h?#^K2*~VQ9ZjAcdD;Y%Q|IqclONRZtaIODTZo*3PJU87x)s0C zAE8S9*iNo-t~Q|#rnhYs3q$w}E&8~E6_bu>{T(V)Aj98&Yr?KVE8kPN8`+L5t(Ol64`4@Cg+L4=vkT5b z?8AoA`6yEuR-4)8Yd77g8hh}+f7i0HceAbQ0fi@#VG*8w)&0xcwJ+bqET!Jcoy_h4 zAk5YW?%K#>e2U8LY&;jo2Fg+n0+c1z8X>^ggqN!YCs41?^-ike`aCUX7+u8hU_d)r zhD!&0RB}pK1%V`$;cU-FH{$s(G1D3MRRVxyJeK?*nB~H@{I7Z84zp>Zgj? zW0=oYP*fB-gJe1#74?0#Tts)p!hTEsVpq)?@%6pUth6P4es;VdzQ<{GguW9zShSx*Kwi=Ck7?7qX3BD) zr`P+eE?4baIyrVAgx@vg9M5B!yXVB#u-g~#54)TV>9XfoTK%3^b*4!oE|b$O8ddGLUP=`c)jg(}B} zj2B|DP9k^vQ$l46h+J9@tV^83Rd(0tV}BFB<)wvtNr3W~}5dNj2YBugdb zl_drn5npq%@)1VLp)OmF$bsSISpill-=Uh}ITM92TN~V)u&d@wARTQT5RunQhF3hhbgsnXT9K*_5aVV@0p1gYX=L6aQeGE)YH;7S+ z)yOJdYcFtvjWNP;KM{^ahsjbleTn$u)~XWC>SX~f zBR;VMZr~IZ{f@19;+dqQg`!TRYDNINjIeFGa>)CKbsb}gsis=gF#kjqXw{Ea3k9@Y zBjS*0Ttu-8QF?JKhHC502tlZfhKzuM0o`hwTNaMJbs}Rt38Lf!EMoY1(qV(5Jtx_l z0!|L03n1NO&c=pH#Gx3dI_d=4C!K-tF6K#xxP20b4x*DE@V)S&zf?=p(mKgzM(KmsfpUD4^mM8ng3Th8QJxO(a@65ObFC9{POXYi+iJtd z*R2zHo?l53&TEG$?cjfT5zx2l?AaCf<=Ns&!a*9ox?SX;e>xF|conQr@WFtTDNE~Q zvYbwxuHh@Ie<>NGjiyTqg70(%rT`A@RQw))!(7F!TeofjLl^bMQ<5FD0Y)U8t{*7# zl}^>VDE~-&TU42tB^6+$(P=3M=o3X^bWDZ$+1{Wyn&masIj*LzEp|#qouqW?;9~hd z%KqE(|3H79Z2Wn~;5YaG*n#iA`+xk~_Q}`Q@7l0NAT==ZCkR6&h;Mj>iIvYVFY5>$U)h2a z`avyIB4E^dP79XsG49nBOMt;19FJN$cNa=~oWT|2f{zU#bV7hyFs2eD-H1|vVPsIO zd`?!Vsy_CKa{!^fufzG=(qwd|hBv*!uAHXQa!y@goM9jx`G$f$Vf5_+(MTboFx(u7 zwEZ#^+i88->K8_Ih@Avq1Do(qU#7A(yqU7`~o=a{0bGMsZtWk8i|Y( zq~UWwi(!|DGYadgD(u8YyhIhx=9P1>zs1CDr7f?wQIzqLaTaZFnUV7wn(hYS1+>qn zQhslpllhel644)JW2E236~>8@;YwNj9Ovmpq_F{K4zqN8mR^ptQ$x8r;Jyw7{R)3* z(!^h48BE8Bf0k3OFqtqTi)2G3Cl8TyRumw7R8j2~fE)~q$uSRNrTXfyib$}hhlC~N zu1KQ9c0--FSaM%0h?GsvNctnMZ3N-`$p`ahtLoKm^BmN|cUC3LVRN!16$?C)Ax(=> zg#qnSQl;{Qw>1laFsyD69w9I}E$_DRj;>AC4@JJ)zEidz8$EoKou3Bfe6-t^C~30B z@^rCDfiE_Or}1evX=A>S-S#W^dhoOzY+5j2I*a`D@tyL~raW3L&@~oam$uK?2968? zcH7*iHmkdL+t41G_B+6vtMkYQi$X(y!8x^u%f+I|rFyv>o@a~pV`Qz+(xXlOT2*+o ziI{r~xD4(D#%y#Eer9Htklh%*l6!Rb|UWhXN%% z7w*WcY9iAIjB`I)EXg^F#XM_+nSK)5IEys4OHSkdV<3 z+Wbl3#>cdbK2gNRfrb_K1EUwe z)D1O9m5fjmIN~Sr5E-qo$EXqhgf8*oVTuudGTEPvZ5ywpHA+C9ixkD7F0`UQ@zC** zqY(+uLxj?^0iiIjT{T$qVTOYKe!oVBxwB__n&`x}cQ@&uyF%>F!UWMJo<{XiZ8i-< z)?aJlFc#`G1<|RZ$<9rt1zozt)z!403du65BcLI1`>Tg!^8tW;|Cz$bV=3xYm> zf%BbuPnPC*zux7?Y-U1dZ<3Y{pD|zNlhJ7wajHA4wi|_%XyJ^_d%p(ccMO}mC7Be_ z2vw>nQVj>sl5+6`-bye_ZGy&aPo~RRET6DJtb^~Y49oN)M%e6}BE3@D*2{Ff%VAW8 zSw7xl6M|jra|iSGDMc1MmMe#Jf6XD~z+%@LPU{PdF( z23+f+N@a)>gTRYQ%qV=j|K>0IZw7B)Jv;ny@6A5u#kz$SxRt)GtgLsivx8@^-aOeK zz!TfU{uFBK+Q`?y>mObZs2*;APv7jn#BeH8%$By8je`kLVbvNSV68sZ{dMehQN`lb zmZbQVygw9<$Rk9Ww?0&`#j;X;IO_E+Z$Yaz8C21~AnW5zNu)oUaYU`I`0Cgd@Q`C! zYBdg7vyq6*Gq;XAo$JG!UyHT!^N?c=sl3sU$}b<124M_F>De_)#U;#3&1@~3MZ%{e zHLxzF+yR>FR9twFl>-p^2IuH?E5vv!__)NFI?5kqbe6({T)4r)J)I}#s6&EwIk_z+ zoB9+Wi1w4OW9w5xP4-cGo}@2CB*6*~C3%WNh6Ee#9Fyg#`*L zv&>+_$Tw6FYYw`_29|^$?Fu2@XmF5^TUH8$$|#;{;Zfe2qUOW)+UGy+-RH`GUp;uR z>ioBj^528|55C%d*n5cQzlS>yen0>HbDsb7yQcHswt4>R^&aeebH8o?wDbD`@L%El z$E$wxi_d?w{u|DJv|jA>-`jcs=f4NPum69N`LDJDWjk>b#lX$=e-D6Se&_%HHu}Hq z-}Qh0+Ux&wR{fg#zwO@6&R6$c`){Dd-}QgLk^XNlILl7YHp*Fwt{!ky8!uU@M5hfE zpg5Qn#S{yiVzSk2#0n@mOtxC=Fjp>AR(RCSpcmePv%0Z$B0$E2jd-=UBDCZ~Um{io zJvmF~^Q+faVQISK1m_f9IGtm*Vk2Mz6`oz`v~HSM?U*y?SA)?!`TNxXKq`;qF540g zly8SO_ze4|godJp3lFN0Ge?n*IfLdPVb=(;5E|*#?ACPDUl(HS68$LrA)QXiByu|b z!~0x3##(m9DLwF5A<$6Z)f~*Da4V+&BVFlNTb-_jqHe7M!=`L0yWz#fMXWF%%S@|q z4d}k3dSDU<8RgO=D)G+Iyq3Nib6f^3_=J|G7fDXO&S;CO6565j5>p845??2avx9t6 zV3M_Df!*mFa}Fr+7m9ukH^aU?+92EQ02QZj07yZQ?mglMSA5uIxLd;m_qr?m$OAfcNDgSNVNwFKCA3YZ#YN0+a-2@Cq+69# zG)csz=d5%8gkE|J)G}kvPFU9EBIDGzSw1Sx@nKZtIfosB_H*)0|LO`r*_J~2bP;nv!pKTg zj5b8*clksQDxzB5K=;+?uQXN0Xmx|NfkuBU<{vOYHleX3&&v-;>8Pisy^k}n6+*s* zDoi*xo00AOWJ2H39KLl1PZSx2-2rr$$Oi%#dmQXLACN_&5s~;XCa4=9W()MdXywcE zA#@C0t8S~7vbo+vO3GV&ftinU3Q0v?usHQHoxW)iytzn0?XQhoG*&vts2ad#j87Yn z{cx*o?4wMJ3Jq&bhEa@DK@RHf=VnvrQkt*>hQQl+o?(ay^3rHyoTg1mPmC>Y_TpaK z8^8sdnt%G>+K0;!M7Ykk{Yz2@3rZPHP<{vnb z9fNHyu0lFujn$>pK-iL>D)_rM$#S&*Y=&lpfm9vD#2lE5t=(DO3GHX=%iJcgv6J+9 zP%WK)!_-mWFuJ=|*U5Ng`0#$+PlQ&ob{a`r6g^=)D5q@AKTl(ow!9tvDeK(Xe%9{j z;*-(W+uJ%Z2=S+uo<(_G#r6UrN5}PQNb(bala0!3!ypzNeqP2 z2nKUHEr!XIHUXGV;PEWeuwNE?2A%25;3M5m5WRZaiFrTO#sK7orE*l9rOGv)U0TpQ z6##G;CQ0gintehqF4=u%1s-WK=naj{zDuWp40zBOwrL*Gn~nlb=OWlahzZcAZt%ef z45u*$>sh}LfX8x?I8Hal%UP=X-f}|D#Yh$uUJe`VNTj(|iL3N_3dI-ijJNj> zULL-|3_(A>-Q#DcemPhagX}bCZ!Rwo#>+mY-&A>x_P^VE`b>f4XZi5*)#2M$PyVdF z>7VlX@ZiP%(^u~f)%WVR6nOIL<;yqw&)&V=f2zt@udHHk_Ydtt^uj2z_uZ>Ehc+br zN_BYu&;nHbmI6P#dVBcv)sHW+0R4Ce4edSoK?-!5(5WIfwvoe-17scdd6AQJ$a!R% zu^;a5eZP0`5>34Ke|+`s`P1*716J`TcG-A?UJ)R}B!e;qeuTMC%yWFW_vFd`YYO$p zPa98u*n9JK-+Z(G^2w{G2QR;uOcd(%^SzhfzuWtM-zfP8UhTi77tf!+`Vk+V0ABVt zFf`YjSI-p$#QOe|!-H4Ue~H^=v{St^8`=NsYpA4P96Z~2v3L072mAuhFJHafuU@=< zv-karJ$`!i*Pk}_-W~n`;0~Vb9TF(~m%$vq**kc?|At;d_uoF-e`9sz-Rtk)>^-GQ zf7yHU6a76P$UpAAd1+wf#~**Rn>lVNf0Tw(xg^Rl!f?|mZ>&oY?@&K0!yocX**pAU zaPackD}&tvb)iuSF$%o0S6nSZ`*}Dkr<3fmi={fjEXn4j$5hA@s~DknR-BgcJ9=Ee z9~G&|Ju>g9rh`%cXr5;nz6Ot6Q0&;9kr8Dblo`2Rmq$@)c2&Wnwx*W$F%kx} zYlYgH(JZrrsC3kxB*qjCfQU-MybWPn29$HqA()?2%+ga$PV9ya#cr9AL1&na9&mtt z;w}TntBbWVjA}q;JXBA?qxRb;Zw_7`4qooP*l!=3h_FB03uetQOq_9?pCcl{PXD;& z7rKL0iq<;$Il56NuS3_=u~J;`ZeS@{yVss<49%wk^qeLML2ZR9K&_>AHMrJ`v5%Tz}zSLND)f=|%w=||3^S>tW^zLZc28{~P%CAt)54L3WdIkKNBieQ?| zPpdGAO-zbTwcX%6xr8|~mD)(p^byZ9_}Bi#72q2`*dK>i3pjI-ZfQ5za@r>&uK^Ri z>()h~6D#PP9nc2&*eu~#QBmAE7yBK%ee!*m`0Y^4jU=zK;pySCvpg2iBZ*^kOZYJ*EQlyiN_|#Ilsaa0xdahATZD{(B78*}6d~=BJye8%-CR_qTiQMxpx>JPM%ixIf^} zqm5qwn9|n2&)fYbE*wf~4d_|NPnD%XmS=ES+TF5TgWsOPk!+flbAXTaeW;7$!Nc(ltk z>PQ``E-NJR!r&9kY zR%d2$Rr=kUFrRZYMEM>>xC>}$z>wPaF#h@JRDvfOn(_1G%G8YONH(G>-qiY4LztJV z2@EZOd?&=mP(@_aH$+twX6#uA0^7}QM|4pFz73$Ik8m9AY1g8VQq^*(@h9Jvt2eM<) z05L$$zs>mNEEO34hr*glw5Ybal+)A9r}o(F%#*`+d(Q`I6mMP~zIyWN`6{Sy=>ncE z50qL=a}+b=WAF@V<;}Av!FKQdgMOgNS!VO?52#=uN^?TM>+Y(cw?M5x6OwWi%`I>> zsE-t5i*7UV!moahN}(4PuvQbg9+Tu&9EbtE$g>2KPb0!F=+f~#`CBo!;HZ`W(3B!) zonfS}x!o=uk?(~MmXf|f+LAumoJ)!tQQ-?U+}cIS zHT^OB!HEks5ps7JHmjO3loVfl!&F`2eI;q-{=ocenxRG;db??mq0kEQbGjB)h}X>; zqr#*W*xFczf&ybY9?<0Waa-s4;~1{2?jB-rGz=SS|JJ__F8-t2oNYRV>Venc#0|Ssg7}uJfv%M0Y&@QF&htZ$t><{y5+>!cA zNsiC9{h?wBwyepufCLQ{uLrKXKpIM!v}YC3(`DaXJ3<-I$7{L}<6P61I=2&3KcQfQ zgiT!ClC5h)kyzUuGs|`Qz^;zZKC1Io^c?1`8S_{Hp)}ZZJ1?3|=alY&z@BE6nx}og zx1)u^eTpd(92Q0J9Q7P+-%-?AAuDlyOIrX~MlNX5F~hA1O=cEmN)U`OnF!x9pj7r2 z!HiPvP_V;njC)N4JJ+YQbU7CWRxy-Px44RHZXh>wq~ef!TTZKH-iesUXpFomVI4X9 z7~eKp%5dgle3gfh_0t6cqlignhmg!vB)~kCfR@6tDOMv-3f>EjKIFy4G##I&Qm8Oi z$1M=%r)a);Dv>)6pS%tzU0iTZc1PtB?>0#Y$QJ!zGzs=*!1&Rw3Hpvd)8dB$-pu9THXC>Qq1!E#ZIim3ysTtK1oKz3M?VRANC z0-9DC4*5(*avzfVlQ++wIbbNdR~KndWC6JtB|APS9=Dwe-@&#_R(nzqMa!vl7svhw za$rS!C2@94f~UpX*e7nTH}Pvt{u*<)YW}L7(cCE4V4{5T?2`H%&3-=))O3-3N!8t$ zZ$wm3$ac`~2HQ1uTczq-gVWyM+Uf@F7s(W@1=6u)In~T)ed9iA+v%X_Mu3OrQxVct z21(&Wa4YwGPDoi(0&Sssax0xC9r@~h{E=}HwW>W*hY;o z4chLgo&*x=V5|c!MYF(_I;+HObI4h`n9!hMh&0TdZyv^r%LOIT3by0>#t;)Y zrI6gK(*0cW1?nY|ooT`*yo46Tv3ob#Mr$@!6cfNsYmFN~9|33qrOTQiq|$lHHnHag z$`hEM#B>WNO`+r>Rc2(Gg!GkjJ#R^^kHD7R{4{ub_~zi{_tedg-rMvi7=vV9o%#7n zindb{$~9LO1qP|nU<%QWdpS2Jkt^v_uvqw8xnaf{TpT>7G+-zs@sZV(4+{{NP)+}_ zD_}b@-kOjjgsu7Gx~7Qg1n~#-twfY)NQaF&! zDLA(&SJK~Jyro_lkF{UwmASs^^?O#~-s<`zx7ZEaAJ^+&+kVep0oh2c0nAM_>~w9l zj?z=KwS97slCVxfw0&k!5;+M`mCHbhosKI3v9}dJh(S(Ft}yJ9s|~uv)kSIlVC#Vj z$1xxc;lNZ2YD+{z@}6;u!N`XI`~l=Dd{}3*N4(JT|BGM)%@5wH<+6I$hV3OegAL9dkQFSK4Axn6s(6BD zr*)CS!r`DM+KUJ)&SM6enXvAZF-9%E!+d21Y-Q#(`{5*0ricPerxaPOm@XG7yy}wo z9&UU{Npq&&b+*?)F!6# znHNdT;=@Cmy9U>DkGtQ2Y!H|iWTHQwSDqZT)XOG$o^rbK(xHpRjf&x3|Tphy2l zA$poaEZqwBGZJHrb2vI>aDO9s#8kQ+v*%l%(Q{y4?LVjKY-4Wj6wBGwU z>%D6;N2*P-ng61>To~$SV}#12bf8R1+qHxH!gEw7Xh)a&P^_XHYcY3`s)KSD&<0R+ z+2aOfw9@8^2Gla*J2+3_e3)nDdF(nqidJOIPR^8YsKDh0{Y04^ zI%o^|y;zbdjtvcMX=c)dx!b4q`)EG3p*m5!%DW}zHe|pHgmZqLjwzmE8>-=Dp!B`+ z-Xf<*Oc#{mh2;DkC7RfRw;czsrGT5uTGwC$VnM8%aMRzrpt`+MYD}6nlA`INnXdS( ze!Gyg=$%bA>E73AS<)6Ha7H?xElS32!Zso<%0RlWb8hK2Aq~#;yyE6hnl`2;T&$jSL4 zk6kbk5iwquE{DZzk--di)qOcDVA0hAz@}rv&+(RR6-YTY0pH^y*kS76T&EZC1}Pe7 z{zY;%L>+Mm6As0;DdpBUxiS?8SP|9IX?iX;DDdk-*<~leFoVqwHjXh2gqCc%93gSd zu{Au|Tas*j>tb&UL!>a5S9=Zl3(9w{xf#{m&N9){;%xmwk=$U#^nq>dv{94x?iM07 zFA&$&UpJXn-c%9qSHR=;6`Z3g4XwyFHZswh;)>Q59c5i9PnTttBp+#ot6kAmv(}6% z<>n>Co0rhvJ>*%}^`~wz=xYhjqQD2j;Qhke)&ZKmCV>zX^CY?msFnSMCnw{Ipmn{l zK)J(JEtZq|ZJlMxQ}TlrwU%VQ4kzwU-u8r=C^Ujdg9Fqie~2a3Th!jbH8Z$oGROh2 z!7t0>0fBkT*dxqbsL~Oy$FQp6m!xpa*iBY|MyfE3+BxB=v#c~lDc3zIT^2^qX2q)P#Ku$tITxpXilpM zk&K~UdJk)iSShR|eceCw8nTlA%NC5G=RE@F3zY@Vx@Y6kE+DWC25P}Okgdo-U; zJdP6E_0eI$Kltyhf7Rzw#qbYfm z8H>t{L+pqf9KoW#aGxk$oEdvvnL@M0DH~jZsE8KqbeGTf6xMo17l=h*883nC2Skqd z2*z0x!;GOpsO_m$J3!GH0M71Jwss}C8%a_vaTfOi@*6g9EU*xY6=U;qPjbUn;IMvN z(~FpAcvnkwFi~XNbw;A^0JeM$|zlCQAyt&!}=9 zO}i`LtRuMiAdjZMqCHzZWXn31Y$Ddy%nR3MgI{R#*04;kr%1Eo&R*U5Ozq-zoYFAq zwCqJKy6hTepdKI5+T^Vn0q0iGrWjckSh9E~_C%+2GC$WBG`P2%XA2J`>jF<5F@gj3 za3+f=@sbwyaW2`6QxB|4V=Q_oGQ5|z`Km6NEJVFbnh6gO;<9XNIH{>Kgu{#118N7Z zvUEBwJ#doX<*Sz)`!8M}{v`M6lz(G6D{5Mjux##4{z+X6Nq1QC=xB7u?I04hoL53$ z5ub<&#}|~y8;IzJ!G{_H4w(xpG7jpv+j-Z2$EOCc&5vtqM-}pk3GRYsv-f&+GTtU2 zfQ+cUg)$k*WsY;1%ZSA7Pohwsm#4e!w!Q8DNSnIO#iAF$P;xs|x+vR1Sy^hM{J}q} zQ4NRJ(ICqwh1`reD+Fa=y`?DsAX*q=-!I(7mV(=d4#y_Yf6QmTY=hDrNiFu+3GBrRH~`SepXmbH(2)ON>Wa;y)G-QMVIA*Yt|f}J(u~AJPm;GNKa(%PxayQ z6&yPQkOmZAtvCjWeNemyUK+zcbDRET{zR) zfl8+7tdMz7mH}~RT_Gk8o=n*WIwu`Z@~O!1;(b1ZfyFl$aVTY6dn6JY2P9}KA%Jk= z_NwXZyE+fz!$&g*6cDT#}Lp5fTEFE{M7`xihx#gaM z(4!%(TxvE*XVebf7PzU+3Jg9$q~nVRC1`#WV8nU6q2%9aLJA$mUO(t!w>)PUD{NS^ zKE*i5tQ1teyS0{bo83VEfI#;5o`#(!i*x68s|v%YWfXVK+8wJ^7&Wn zE_baX>8eH5l3ff0({yS&;o=Nc zc&@n+HR^gQ$H6PK{JY3X354NzKQlP?^4yY2|f+8LR;~Qgi>=_2CLnAHdR1SM3Citm-LN>`L77n8l+pMfLyx*?ZUSwvA*_biRiF;jGmI?;Md11rmJ9 zw5dIcB*&VOC66TMaXj8U5DAHhNq_-BOPt)Sb$`_Tfw!u<8)$$4Uy>b31J1-Ef$r++ zM^#r>*CT^hX}*ZSvq57bcuzSlKg?xx{a8xZP@%B9s(z4MWfiG*{xC3i*v1yVLwOpV zAv+B1S3cucu0JHn8qH!B$<#u5g?hvke_XM{C^6bUp~k^0WgYrMIN~^$xOhNGpMC3v z(Q~9foK{-W?B}~R%g+|e8&2PwBmLVF#pBYGnhZF0;+~%$bA*%8IN`t)K9YC4WcVqg zJ7Baz^AitRM#mE#$>>~ekn+UE!+MGaU&BT^f;RU?aFScSO$s%|E_W=kox>&!ex+=`95y6~WFphaB=dbTbhMjS?t?3J-DE z#hKws8j2hjpK_>i&dU%9Ij*xEM)Kyc_b_ao_I00m(#NZqj^%0UA`|05q6GlW7 zLNThG62~@GC7p35K!xOD)wB}IJUashCz835&K_n}lh(rq)!xV8T#cswTx?cVXz$$s zd8ODX@$en!@y&@aDqKq6y69az6wjPm2-gmWoIn%AiqA?w$^@>9vdm2!`h{D6O?@k|A-ojk$BfdkPVETgIU3COQ{05V&TH=RKlyN0>JV z#WCPuGde|4Iw?5QUR~M!xLP$d=yp)-@5K zPNzct(o!cYmSZA92s{+x81zRhQ2DSd?Hwq@Pft8eMrJ*QHl{pJSD2;6Gli3v$#Y^* zdM>`@3BKs7THZM+m%22D1KAiFL{;Y$0;b3LHJu%16AV@szx;OvMV0gvWok4~bGLllC=H{I@JkZw6B#6WCRNM~pshG&H7m~8t$+a@782Y3;SP}sjvkigJ*sY@au zMWt^_*+$^%Sn}(P^bJW&78bj<0-CepYMKsZd}$hJE5p+bj5bb~BQoO=Ir3a;>DC+2 zc8}XuAr^rJoH^A(Lzdg^vUEc*j@O>JXQN9u1v5d&oPPhCvBO);CKQOMLzE6Ja|Dwn zlCcr|EHFt&1B%y12;dW1NNCzCYzT zpeVbk%+2KMt1%*+r!T^U$bLLEpt;%rDu-!}NGkxbQnFjb;NURc{VBzQr3)S*EmGfL zkTD`KZf1*MY@^gb+`@5r#Tcv57gXIlIaml7cXgg<)TNajtsCps#MHy77J90iTa zbFmvVQ}!@}?Wf4vI2*;Z@cC)uj#Iy2W|wRtk=!@p14!lwc++eLw466sGbUYE0(P%B zc4el~BK?*w!o(H>=CG|8j)PwW_!k9=Ah@9w^MnlBUiv89SR!RO=RW zSKUZd;tO0%b|K$*Sm&zvD*rNq%}6Nn^eY=in)pv&*3u(|3=zSncHb4T>44iU+UIx zxdtHX^#4^a|I~p=mp`LpShnD^>M1O?(ulWH*^h;#0vVS3xM+cYOhYPr{#KfyWrT5m z7jNoXPcvc2K9W4dmA!_fyi|~!Uou=OT&8{dux-eAIYRo}thd=B;!^G1%6A5fiSTji zqJ`|tyOv5lO9^;GEpJ2$k3iMCkw$Zw8e)+OVxFE*g|M6fr=5r&IQ#uGR936=$FHfn zUSra;SZJ0;Bzri%h4sJ3?=2&D9jy)jF$#wumCNY=z$n#9LMd_Y-f==ZA zTyqnKQRMD6l)sgygpSI@Ra2C2RZap0^RUL#5k|A*A?l^tGb1_&1<`;(!U5FrpA{m{ zrbl<-IMApjhp&*L>Vm=2e=?l_H^vkfGa;alEQ?3u#Hl1-NRinZ6FZsCwdJ|!5_f6! zO`gt&jV;shv*XkZD820rcp3i)Yr+S#FwuF@)u6srCi07!{woztV@0~E3EiCk`a0}r& zpL;v6h4VaTI>zwPQpLFfQoPPOF}N0Em68XUw~;xL4`~Qtd3pnABg?QUQ}eA<3`ZaT zU(3AgIC2KoZN_rM&s;c=RD>yLm~6VswkXY(a3|l&(g9pV?&KKr!W_|E(YzNWdHkkd zGV*CwoHt7uDfagH;BvHb7C5$X#eJWqc@LFR+PVaNo*2<#Y>tA_rFh1 z{4nXPAONLAQHn&3>~V|V7ll0}38B1j07VNagHTAh&ihgrq%|)DKqR3klt?it`R#Ec zQzJwaP5Ap+E=wW1aLJCIbohR5-oyn%R-F1g^6ka`yOM}lYx`<+2m&I5_I6SOnML(D z)mM_$h1-4yc4w+9^N(P%9TdHEC73q_f|%GqOq6L0L|Vy`lul7>gbu~=kV8wwW%Q0P zE{7WJ1P7h!Or3s+HrfbajpBFw>a`@D>C~Dr@mcXT+~$I^h=BFRM2W=Przz~x6rCwq zm!hOAzw?;xBIl;oVMPODaLlQ<3pTx| zH+6a#cydWGzVkyNf9C6c0DG=CSMraaM0w~o)w2u)P5|T-DbR+(IZ3S`@DrH2HM?=B*k&-v1d6zO)M!H@^i9-@UG>UH- zLGm)&yTSIhjZ$WCy@icr#O0G+oVTl4wTCy0Ak5*cX0O7Nt&*KgCaR9}Z-F5XP@~6^ zUu~fy5r&=Qah?KVMIwsK%nDvcIj_h$Sf<7q%@ioe#bg{tTu~J9Dde58DB(Pklsg_z z9c0~V@X{hi`{fM-C$5SJ-Y5j{Y+`um;tnS~@?J?_EnNi$(ci~2a`z(LA&i7)mC$?^8o|8)MR}k!ktpzrQjcMV_iJ{I zVX$tX)7QW}GEUHGTZ4#m)DK*b2VCMuVKK@nfPloq{M}-A!th8o9OdJCV_eSqE$bk{ zU(rOQ;8%=jA|9`2#bhl>;u5U5wER3Ga(cMlfIHl|6R~!E8Fu%23hUnqER+#E4|n~E zJ+Vd<@+@@-bRZbtlF291&Bu9AtKWEV_s%NWCNq1$!6Z{kC~aYZ)nd$LXdBz6&`BDVcwE!bqVPoUzXZ4T|tuH?;Tt;n}QK;dCuR zO^8^6bV$`!U;aFpRu5*XxHv(yn9Jfntl6b&dIM_Mn@-@Ia6a=vIzGI$gHlS+-z0?C zL3wUDJQI;v6LH^Gz$`kW&sn9dH{Q?Kdk41@GD9Zbl1e)nJ8>lV@k3{pD6wGmt&(?2 zQ*;|-scJIH1|Vbe@;H06%cmPNXv!gohXPfCOr7%xj2Nuk#V}79cV58EuE~%y9{iTi zM;KmeZjRTUk#$#p%BrS+LVTkX%F%vK2{lrZNrlT%i1SkikQ~i;;Klg)Gp2lxMlt9JeGfyqg6>%4H%NAG?Donwa2rPN5m`f`Z`7n3C7GrKYbcZ7hhd7({rZ|S1MpK*yJ<@I7Ode zK~6>XFTn@!!oy^kznOQGqHiJ-g@cJO*dKs7IlV3-RCP{V2ybCX$vPd z0cK(fs*v$J<4PM?9U_eOcHf10B^@^EQM_a{rgM=emw_ZTG$(#pJy2NeY6{;uk-$H_ zz9368IXHV0yjdg)DTjxgzy-+h21Up^vSW|9#mFApL08F~0VI8l33@?VcLYNrs6w9> zMim5>-TxG(*yd!*YI;572{Fmu!f1FR{yE6imnh=SFarTkdQhB8KCrdLRgf5k#=?X) zdW1CRvc)9KGNaB2{x_Fsfr?{FP>Q_N9dxcwTI_MP1qAJ>Z$UM6h0wz(HphE~WY5#;YWq6>s4K`G)PHPV+(9r6HE(J!TFGAVGmaz@Nl&CwhrHE}X&b+qp* zt|K&QT~RYSi6_>z9&0F}41iuFMThj{LUY-(S2^d4zANbJLcsGWNn(16G7;%LIWu=V zGzzT@;e&rxzc|Fwsy-(L>&#>@QT>#iI1RO!Dq*#g3+1Ru%^gAmZp%t|QoZem*B^Xw zymQ^r{MAQzh>Z9glEOZ0s^09~46heroNNM6B(qTYU(`kz$&X|ukw^%m_LUhe1lT;M z0U}ZoS%Sn^Q91-5>7vk6me2ynvtc8%{1jr#q^mNv)itVR)W5||!xs`0&#Y;SWSfb{ z<}LRlh(GR$afshH@>jpncy4QoFsvd})>>XLD~5QMVTbK^ z!=JD>!0%o!(JiD!PCZDGowP-)OL|q$gz$Sg=T1wudhNM4qD3A9FR`#uQlbc^iJ_#_ z4d$hbm$HH+OA8nK9CXz`0||PURf*}~xW~r2M3%pzSXQ3#`Sl62kV!Q6N)D;?%Bw7; zn~1Xv;Y{#p!A$(aCP(+H)QKQtu#jbz63O^}fH|)41|lAXiUcW^skb&~))*%hnRX;&yN+u~Sk>2TbK zBRC1Tw0+=Nm1q9LN#beNOfmW8)aw{i@>6bUz_at1BH( zi$}S86t(fJ9iA;>5a-Numu~x^h$D$@0#h_+-6Y3`Cqj^D&=Hw4u?4i8DbEY5G`#*6 znZRNuiN0%P)?}qSluJ?IPc~<-Lp(%}*HYW@y4{wg&S2jg9C_>b-Wo zzKz*tR2oRnVl;QUK|>IZX6msjdAJLe=Y3VjJm@UF_enSB=nGZm3ML)d-Ji@(VYfs8 zqQ@a44a`rZkHA!~{z}mLNf^b_(t67S`D8;EzmXsG&JGs0wRri;(`G!?WqCmuoFlp@ z1couOOk8pMfj8RTUld99+_D%NS9L0to0}WGPY24$On1HO3Myo*pwfC_WJ8!c3G>sh zvA{sLpg4T&C$59(tjjsjHkuZO#u%i)b`on!IoOg>A=m54B)~rIqkRuYLb2-@Bw%na zgwkq9*XIdSNMQr%k9tB|kP16d93yVTIQQ8xJW&Nd?CcdKK-|J}0i|oa4YtZ`OPHPF zSG*s=zZC8Vzo3CE|7*gF#Q4Kg$GUdWWq}G`+PpSy_7-|qZ3%@Xel`2J@aAEFC@kZJ zU@_ztj!U?R#ZPdMec%^D-JZaT>R~JM;Gn?7!b`WYERVF4_~}jp-rML`5)R&AATF{| znC2cD@QQlz-vJCjk89dfItWel)XpF{#Ie^$3cMn96AilQ1ikcurdQ#l_=+bDMMA%2%v# z9e^j+bgHIJY09%kmPwj6zz@SE17=rf^BcBkkNIUs-w8$qY3WbTlq>icek7{)@z%kY zwQpcOqiyp~Hs_KB)omaNEId{7Y->GXaA7;JR_(|taXx6&!E`*Hg_gAFM~lL_W7%{U zM?FA)Dv2A4nC55Gri9IN0i{B3h4^l>5n^vm?~~$eCN9pdD5o4ow~`PrL30tt2-de> zn!`w($oN?5{9?0S)6XqoDRmU(lO;TK&H+$aNT$45UITJcU=c)pU9X?(_VL~6`O(h_ z&BH(%k4m(|SMF%g!&?kg5z;K`^l$NMP!_*m_kK9Mc&%*bS;fw=SXqJi|HxMdz5V0U z!=DfLBa8QW{cWbzyG#ZMs|!mjTOYFWo;etJ9r98k_41Y3r)zw!*@AtL^LeVS+)PFL zItZNnnw`4=;x$ka&YGCF8EB^@lDo4v@QB~@!3oR6r1GQ@IC}TF5e=q?zgrAZqJ+=1 zbon zJUZDsI5_L=o%~YbcZGn~mduOm+hZVp7>|>dEQMVO0U?5XAgu8un1xApJbns7{z3XQ zOj4vCQ)AQi{^aQAXx{wkWWwY7(^%k8GrG(dx64_M4O^;bgG@5N^^$R;`O&=lLnM8R zg@F8wXR$0!%wX8VS+H-*Sf>OpZBHyWXF8VWP-@=EQp-l-TTw*QVn(&3D;K#v>|RGW zYSPe=;YbjY$rebNJXzw&-rK{_>Yb)I8qNa_omv#)`!%W$_)3*QBT>spO~Ds?jOKbF z;-m){L|Pt+FG1WkfU7vJxI>qhz*Lee5GZO92fc+C@+0?pE0kgtoHz{^OD`kxz;%0N zi{z%6c&T^M;ZQaOD$?UiJ)UyqM;{Z(a><*7l`Kf%r10r=$;gBJmhlpj6%nv3%XiGW zZs4_&6>!9X@$^nk4-Sv_eo0yzxs!yTFH<^me1$WKGW$GHwk@&`QZCHxV~p(&!=Yx` ziY1=+b)qEe*2F9MGSg(vXDx<(iF@pXFvy0Trd5zT(4LDI-|PyO?G&J6ZmogDCNz_Y zdPD!fp`k*ia`bCkehQLjiLzoeg+vvA7Fs$E$j^o2*ZiWXmZ zU_-!ReUab?ZO<_88}3P3?cj0!(UrJJdH&U#q0G;VIEiyZa*)8icX$51BaO6~Ht7s>pi)dv}A-k;EVg#m}yExPK}KSvku#v|-dATanvIo_wG-f+k*yBK;Nbmnpt zZhb}@OjzD%HHcd^vg2#uAJLG&(%pZzUDFL%(=$nZibKt-sLYCU8; zc>P7}rLNa>V(r31L}mp7R2PB^Q`eJpgcONqx&zyX@kQMqUOEJ5qO{QN|3DxQ6#oJxA= zafN~}lsEdka~1m$-*7#hrs{cM`-Sn#(n&1bZ*a@OY|GK|D58pma;1@;q(~|nxww|X ziWFHczswiL)}cbiR~?KJ#2I5+>eD#SQY6JV6-V3zEw;!ieN$~)u6FxRQ@g=(DXnHR zdNy@b!4(~s&}uktxr-bWjRTWnOd?R!d}yrs z#&Kk4xc(6Z)cE=S3OMwKAZ^SINVVN=Z-NNAETNgUw4xBBSC&YepYH#ocYbkpxc8O< zf@(6(Mb!O|3lEt9@aW|50trKWZKXRn{vh^a>2WXED!Vd)x9~_4lKrtFaSA`if2~At zsfepfORM=KO{SHU7?$V|gVo5rD*_vi5slgTyTko;A{S(guLKziXQ?6SBwnU%Wrb*b z=ed_A8Z?>g;N+b9AL&w^fc#(2>a@BRaK1KM3mM^bVoYJ|V=OxDL@L~2?(uAmCX#dw zF^F3+dpx;2q8ab8DdPHYFFI1^ArFNu6}JU=$8&P}%jZ4pPB8`MqNxx85}-$&%;5JT zL{_#3OomVD3L~j_+Mugm{6*X_TXkGc!)QJ@B6TZ`lZyZgsQ#JX36ZIhNAUD|h|Xkq zDP6CnEek!eu}~xFt0lxuOol=r)%J6r-(p;l$}`1lLg*esE2HF6 z2=~cOM#SlODU`FaV^D*1V%l8etqKgN_;vAm1p}X<*%?XKAhU5|i3nyXYe5U+sn9y{^tgxoGsaGPXNT|44^d(+Gb83kLcrw3UZ(T|6SkvV_N6wH)ccdenS7yW{pXws@VZ6Z2W1(eD3(A)4b`V~mqub89414nQ(UVk zfzhR0GGs*|(}2?SCyvg~EM(S&K8lsS^LHneeb@bT3IA2NH+SU>&OH;W(rDBg?RpGg zKe&y|2=V5qNRe~Tk3ZMyy|;%KZ%z{?rBFc!T$KwpiXl==8?yA8%3vp|DsM7F7I178 zn@0d!W~oautT}Nf6nmQN%EWb1ap5S#*&@0*6FwA2?|y9230I0{MLGj^)ni{Nc{M=O zgUf0Fbh=H^o%L0WV0YRHBUntYM)7)9Iu*=2tjJXzmPbV&1}4Hj;9DVRcDpPt|FqUI ztzV5iaU?_f#2>vMTc;<-zl5R6QmvAu+ouRdtZvbztes(RGBNS;h1i@D;epP0(ybsg znM(^Wrn-2T{h{q(_-j(3;9M>b3!KUzL++oc4W`X={;t~Jq)B9N<< zJD4sE(1z+Cu8~r4sMRq=ux;k_<9DwxQP0^wG1o=x17;hIp$IlX}gufTUN8 zFDY0ox8ONHeSPuM-q|6!$hhON2xWS7bgr`_ypSYwjG;A}TunzYeGdmS^~38pGqgwH zF)QTlyvB|2w9r&TgczJ7eTgr+N^1%Hv{Lpq6vhio*1@2G+P6NpvXWSQ%Lhl?>?bcd zEn0W3gIvrRfUp}kZ<2me(r!z2g_;X>f(4($@)$jr8k^7x0s2zpF)+*^vTH%BByR0e z(hCb+3a-rGeBz|uRo};dYDS}}4}8Kl6IRnuY1@`#S>HWglPv*H;3cNGZCX25sgxCE=2C01%VIcel~UNTPQX*!c49YOS|Qe*)J_z# zVrL`_VUS72F1@2bzG;Cw#4!*31`%i}Bg$cFEOs7>o}#S5`trjt)N$KAet<;U^`%xZ z0R@(f-6xwHIw-U7T;zv`<7lPgYK|YAREdi3Pm0mi3L*C52m0GNVg5EuIh&epHjM<2 z*mI4CdKz*4NqL(aFV`w3KlBeT>CN-k{94ubos6T&!&MEgQ`(22)Q4)(QKH zTIZK^1BI8Y%r|j1fazw$L*?mWkc&o09&L@oz}>LY30H2m!Vd{4pt%1oqJHf%6SpQV zXt*MeHU1cqrM|cZ$><$Y`wdrTHu~S$oLPWhWeIC$kkd~-r6*sz_)=KC`kuDQKEhPOdG)0J0X=sWNuUw9ZdCosb zy)ubum=s;kf-3R$)i;UI7g6M#ID!k5)^zQ9Ziz^NJncpp8j{JK8(%EDM~?7{ZrXG$ z%Sk;h^8bKw(=deGEOPHR!+>2BE}36@QAGZf+q(r}qzbA!9^Rs+KM17#h{l(qCJaGh zniF`F8eL&?iRQwcV$1=c0PLZl0vRtbWQ1bzsZkkMKb^+!L_}D3=>u3SedyFy%zJ;E z=~#ZdI0O2UoIP=RT+Kr`%-V!;7bw)ZVw{J8Hu>#nbfr?x07JL6O3X|i*pcq$NXd9a zsb>P5COrd0c)!nXXb(s2Gr6&IQ%edBNqN&I(@Q`RKxQFx$q%!%0}+6zwrboF-O8t< zq}-c)!ON`dORWVS=o{VI#9GU`1zAc5DulR7c?E2XJYzvE^NpQ8MNfRP)xTdmd_%p$F(}lB!-txAn`dlAefNBC7Ra zu?DU_a?OESsVJVmJ~}?U_~jiO=nmhOsg5F{M7=|Ygt*^qMxq%%Vf;*980Yxbb+n{H zIl45E`66U2J!wMWl9D=fS=oENceH;|c=jyj@Qs;6q^e`9L%%=Qw#tcOSZe?1-J8QR z_Kh%F`i7!+lbgMC2NO1-z?*0`Stup92Kq|WKLe(&I9i+wBRAkP1zW&T0brbNBi@S^K7q(B^)p)_C-UUuld z-(i(cM)$K(`BdwEuJORuH^QrsRFw%>SCJhl4W~AggPHCc&-2|VX zwHknp9&3db`{@G1g2{(}asG>v2^ ztunC2u3y2Ct=w3bn9mP4p2|D(_6=PFRism)KAEl4MzvaPH5%+MZTc|$U9C0vU;G$t zgBeE6Fxt&Vqfuk_6SZaopPBMm?M0(iGvHZ+o$tRnI{rl;Sf9JKdShqD zsMesqA)nih+kNq(+HN-r(2RIUHoM(gPJEAs{x_uhdHVl<;rqV||Lgxt(m&Emoc?Q# zYV9wqn$!Qo;{Rdh)-TTg>$Q5$|DO#1Z|3~}5!3&MRKGI+Z#C!Wzt*f8Isg9}AJkGF zg-)Dsy!~u?lwwrOI%>2m=h}AAX%-8MfP=2DV@!%dq1S^`bFYV_RM7z?Nzp z@|Z@Qf^uJs0O&G9ZJ@X%*SWt{N)*W#5=MA}z4 zGy}$nurT@*M%dOM9_3MVOPAe}@gC*x_-Pkg#H(hSm_AM3pXuA-A2}=0%-C1^G`R)MAvxnF5ja)c9#S8Ez|1{k8MBbe)x3rF)hO; z3zRghN&A36?MQry@rAi?h4CUG5aIY4UtFjux*d-y*mJdH=E~zhiyxwuSK0cwXzgwB z8e5`w=!Q$tmBRX+H5=p37Tqn1ue(~ya?xkU2!{V)S`*P%X_0XtdXTZkj$M>83(4#? zJi8i)NMYHx@X=P%3_u25HF>w=K#MW7)*ucM^M}F+jFf35FKL|VBw@q}pfKm(A^Ov0 z8V;XtGR4r z`a2e?Mf*~84Vkqybz|^X-IungbZu+twv<7jusUGfk*j%}*rFFeavv1_bUkt}%~7(r zcnW2I|3l%zw6ZyhazP(*MJz^nEWZXu&pq`86G&1uTE#+3N}VEKdfjBzIH{MFj&-B2 zrPKy=fxJ`~i=T;I>~4I+qKH<&D)a^1V~bvHrP3Qm)CzhbO_88cNZAV=btL0!7iF(y zOZP35*+8H|(8Ym`ZsRUqly{XqXYu}~kwm!Z3Z^B75OlFz=C&7nnzgdxrT(=2;yz|v z%ruGA;T-Q%GFN$<<;~ByzMM0mEdqimCPZf~k}0&(LMBDkm-8rIbIv$3vCaha2Hsz_W0BKX_W6ko z;4WBG4ahtPFE(oIHQes=qyi3tK#a|3AaD*}=Z($+q_v=s5?9YYJ3V7X4%?Hs6u?>e z=I9EDhab)RR3?lAXR(MD9In7)bP?Q*pf>>HJw(&F=rOM0Pu7mL2#BL zK(BFr?0+Ri^b?IUMZ26r>?aFTpXaTR1GGn>z-&A~iR+!}j1-p$Ka;u1+y_5{Emp?Z zr@;5jct;MvjBp-?Uuz;jtx)e3sB+$s;J@G&bOrpngWMcOEvQvDe@!X-JrwsG|2yFP z_J;BREQ)Ag+};BjfCwmx!D`0=C$F8hM4kd5;vO=+1<$=P>P)@@2FC%f44Hn8E3lPq zX?iXZ4HaxX=ul)6_zt(A9BKes|7p1%2w=0c@_Ye|oly5mAJ_G`0ck>5T>Wg>-mUaM zgTB8(c;FpyVlzIp1Kb7vk3A!c;l-!mVmNK8Ovnyk{|UHya}TyE?Yd$>Sm{4CDbEpJ zFh1uB0oJkqN5(Jg0^r1;^=^3S^w2gSsRx&~1*~==5qoa|ue^sFC}a!39_9RhpNBmR zv~XwkgUfc|5cP0C#0?I-{bzSD3&Qyg$I@qLg<0^$zkt^_l#b_s^X?@uy9u;D0QECK zwiVcU2*^}Sc>?qN&*+5IbBj=4CZB+(fcBAX$lfi0pAGQT1LgB8F)8(mN#AT3A+Nk$ zuE7`;pj<%ddfic{@fdviUl)5o8t3h3KUjMkm@{&U1XA{ZFF!89ShkZSGI7a(v)WDO z1y~KJu1K&3{_8JS5XvpiH-P_nJ z(0rz5ljARmV^D46-(!~Pe>CGEJ>72A?kv_NWUj3|6xcd{$D9-(+ridI%Y`pcU6)~I zt-zTVp>K5XDzjBvSlAZn`4zW0#e=!sBhIWpA7XE!Dl0katJW6PQs;|kHrZz&TZjw6 zSZBZEG1748Wn%jK8}1S06^77@kerZg<=wx36-AyKuHDpdFq%!(Qqi8!=48=+LhaSr z?G$KW1&ugsuY}Z?O{9q-`#t=9WiH0=O!jx?=Pjo<>CbOxF}7A+9sE~ol^X-soPqpx z{_hJ!aVrMa&IRlZq>MB@g|oK z$XBE##YEdg#U+D4wab5od5&q7STL+@dNgDO7Eb!*pZJ!k@Ct>dmRrM6ofL@QLS+c( z!In2C9PRip*P7D5O}}c^CR6szzs5)G#yI7cqW5y1G5R`U6dhk6&zO{62agf5dPmW; zIx?@j*yikLTWlrx;jjR5LEk%=utwhSpdO8QnjZ{5puzLXHjS&J&&Cf}_fNY^#{yeb z+cHEvT$TvI?UiPdqF6zmY z%YOg(-X$GL%Q?O^g}h!LH?v3F%C3l&oyn(M{rzN%kIV}zy9$xsoQv0tmWGyWV+F+< z&7_@d0hslyK1OJ5W$tDDjl7XNuSG$UQWjXRe_h;;|N7PL_F$y*JEyWL&fLjZjqex< z$BAu0XmiZN-6tB%iou}aTVKSejmH}W>KS0-PUA;&d+(XPaI{HwTsg@)oXtE&Ux{ zd4iq=WQxJ0qoEM1mvuKMV2YF1@Tx3*hPr1m*`o(vK4lP)LRcV+-(w{UP@?ut>ZC zcn!BU1;BXg1%@eI01|IZjirIq>6(ShClG!-;GE=d3wr{^C^Ak0 z@F6G|A;j<$K<)OaIvc3OBZ)?+f{3M*AO6`vzi@427hov>hOfYq$Z&A!|H{q=*GIh8 zbMa61s$2lOA6SL&MVteOcMD4aiNC8xhHKcm02;Xu&arXj$;=Rad2jMrrJH38v)Z%^RFV~Ky>?5_Q9=9ZoWZg%d0HP=toxj?0< zfIXr&XITp*<%P(K9!`y68waNwb+&Zz$iFTLm;ngvVBJwO=eCuhOp zfJnuS9)~P&K88@pKSn(V;6OB7d|*~VB#tY& zU7~((kpo2cTM~WSX_R!cypu8%Pz^?6zmRK-vWEWl_3AGx8asH!H)UoZ;5?ZIR20aEiiQGvJ=GJ zxc;g;TOep}uiOx=W37xQM1CTj{?=*eDg|0KTF_yG?Uq@N<}$2n?(rM%#aamM;JyEs zFJ2UHNHzxjV&ar5T9DF_Ek)}dW% zl3;Gq_ixzOGrO8T4fL3S55J@kUz=;+eAfw?^npAiF~@5WTB4CfmfJ>%vDzCbMu}U3 zhPg5pkGsE;y_XEn{uxV>LNvK`Y&>|`88q*Op~v?uK1W0Iz+S9bVE;Q|XOpwjFgd&= z7Cj{xZyVD%Bxw;oc7QLr__-85GZ(%2aDGoPSnlZ<ZI(*~rtI}>s~>}EexiIXVbaX_bK{OP3rZ5eFWcHl7QdR6+tp?GW+&~2YYCm{ zP2Zl+Pdi!$nti5M`($wYGl<=2jbjWHsubQZ{*fM5-s(JmPpOVH5^oVaEkEhjY1wEL z*2(x?5lP&|D{UhvB@HAUlA5f&{Czjnv0Si4x3_%+XRWB!) z6jKhkEY{psiy8wPQa!#b2OGu&&Ajzqu)RxcV$maWileNb4I-(TdE73?(x^|{{V9l`Ca+yJF8{p!y_YHV|fB?aO*@p&nP|JzpOBiPS`B0hyEjqK9{{*O1Zhh@X5Ka ze$d?R`RR8*@9Ymf6~~~FSu|-17_hwgr)Q;4VjZZh0YoVN1?Gc&pt2PX5Y7P9o(QYrQkwD6np?`FU2y&i*^(wchNxD38#~7LW9Jji5~jU7u+VE+djDrrjk^@q*hQJ z^4N);=8RKX+dzS~R%xi7A4$dnXy5R&I-_eDs{i>TPhcQkW?Y?D;O~g;j|X1&po;HU zdTg}PY1w2WXj&nvDrlX<{zm;rAZ;SLwTO=PCo4`MT|d-Og#z*G7utJc zttH5{=9qSFc|MzLQ;@A2rHXchCR{DRcPJ#rvoQM%Q16FV!VCC#kOWKOyziV18m}yHO?mf72Z3qg4 zYy$b|V>Ts$M@PV+Vh!Mnh?%6V@U#E1lQk??vsO6q0igH`9=-vC;E5~1-&+W11gcPlaI4Z*D4c0m^DI z7>MKye0c?=0v!)WJqPe#JZE_16WqRca9>dj0jG8?Xn7Z?oXUi%E_dCeL(&F4YkP9D z=r=xlavyQb$C1wuC2C3sQH>*9a(s0e_iZQI)cyoGKh}CV>~}n0yFPuwLfZZnMcP4= zWPLnOAe?O~4s=o&2fbstcX}NMKDB?WAvRE}^XJPnprC>s=<8!wt>gt0m^Hu&oKiwg zAEOpmB#f+6AK5yvj?F)uOe~9MyRoFB<^}1Kn0-xE3P7`+Bz%C4$zV|!>E+eu+nRbb zt%Fm$T(q5Ee9<}dUtd%{j*}KVJsqy^4JSF&SMWTZFP2D)@Z4Y>4J3BBf4f3bYUZPS z;@MQ0;DAo8HQfE^Y-V~YcePrYPuAk7^k{p+$Z%fa+B({tU&qE93*~1BG}Dx5sipo3 z`GPCpm=(LomKLpg_S>_WXN}Hfkh+h;p&|}9e+jDmE$9k88y*6mn81CBt|2eyYdL{=-aXLPlfD4r zKSx90pQwrUT`Inuq#bkNbe*b_DsP!Kh4GDfoJwg0cx)j0CtpJXP5D41$Qo!Gd`a9t(T8OM^3q*ytaXR?|E zdy^TMef|*3gTYMcwUoAv=|JYv*UnAAs2PlLuF+AZ`t{RBffCCnOQZ6v$d^Hb7m;4v zUA=WfpT>-Jv$=g}GxN!uA{M_fmtqiy)V)c3d-g?t19z)3S()z;n^O1X=ig~;@)Y3W z36h-g0^-rtioQqrM08HYj%lkbP{IO!iLE1gC$gi2C7YJ{M~5l-3r5-5$Y?cCf3BZ4 zBo_u+0+)scg(e;|KLQjF>po2>qky_1yms#r*e(WC%L6>15L{`Z3lL>1Y;h5)lS8_6 z_1>6A(fMrjH@3`LYAb2r$d9|Nx3wQHesQdYkCO(rN*D4Vc{=NXrr-ZrKE{?QUh=Qs zOO`{9b2;`R7~xeOZQ7G-#cL=39dy$W7<3jmAb#xnjORjRk;yAxb@X%?2EIn4Om8P|_~}wnm?$sx{Z?iy4TadDqm(cBBKVz}4pWIb&Gu&WZxpFShobGb#00arue5!28k*U2yUO~Z_BrH< z#s=w(zTG*7*ZHz7*(dp7JAW+3+420A!pjQ0xpiPc5($&Nf8FybHX7wqi5)+fqJ31@ zNn^_FtjFllfJaw^MZFb_Gnc4JW_>mBIbu_oqq1j$J%K~bYR=jr@(Ei0B&zvq{zJa|P@qJBYsJ^_`G zX*lHTQ|<~N`3ee2K=@Jy4tU64fMHLFT$(EmxU?MyHuU4sw#7iOKaDXCr6_B_C;9BtYti*l)#YXq^gEvY!MAo5g z4v{Ru!lWImHy#FK&3odsKSCg9*-tLNC8Rrkii$%wJMM0;H((d^)`?3Llje#Z>E6~+ zhZPsn?aQW9h@5@%?bpXW&5>yg()@hlxpZ2Wr=;8mSKPdYAN8dH zcC_)WM+;JfpIiWJ^?$Gt6nqJ9F?8BF1e8<&N^RI{7|%992cZ^M;+6Ars{J%AwFi?9|w_SL4HxXD3c$Xz+#-508a>-JDXl<)M zuxYBAmR$t zvJ?X4z3Ea2;4%$xR{TG}rCwt?0=SGsyh(Wh1d*p9%98w%VR0LPV&_^7r3sLD1+er2 z4!DJFh~E(4+5hR%vr>2Tf4ii-0MnR!2>HBD2dFn@41#^Dp1@{6ZX+=GpZe$zxo-jW z3$Qj6klaX|1r(oXW5ypCS6qX6N`P_-McsYyKj|0{>D4*+IuZ>B>>mpFKe%PJCjz&m zIY$`G3eMvgbCwD$QfxwN=k0~RA#{QR-B4ibaoy)e!J$;oSYAcPX=gALV+0T>c!;@{ z&O+d_o+}33z}dI|)B|rSxdIQl-vCOD8pVfPqa5HM;R?vMX*4oT2Ph>Z#-EbTkq|vq ztcLdHM$%4(LFT0sI4q zsndr8;0!r83ofz+a9bN)F48{W9)o}1C?duJ+dn(H;ejRBNLu8p#wWjF>5QuzVU9zOwS#Q~5dpk3KQ$z>J8N;s|NeEy$peUPt<5s9uC11l z1y3CZCjlol^m+P|z`UKH9ek52Z3U3lfI~dtJV^f*@nhHE#rV<60U&OlRyYvhucOTi zp#C6j(V_B>Fk*yC!~uZ&Jp$Ue(3}Nq{*iMc6){HNfkni(&_n6~N?ZHdi)z$=FtvNh z6@{7q+^|ItOkp4iAK-ZQC)zfO_{>^Q@;3Se3U?oCWHK{KV@=@4%dm>Vb(}oxBg#5J zOSrxGW>4#rbM&ic-0$Hz*ux+#W$z&E(=@w%2j_}Nc6FfX@_Hv2GF#Ive3u4R{{ zlJ`sDVt9}nch48*m(ruxeq-vbH_Mj(22`g@2>(hki2|+xLHdP+lo{tV;ybF;d>Si% z%^fACw9-}IuPc>#2F7>TaSoiCi#H%CU+lBXY4?1`H5tWtkCCQ3)7Avb!_jE=TM#LI zHsd}=Z`mc${Pi@(>>;(V-r2A=9Lcv+OH>buk(FZ9?derXBkSjpS@`Y~hz~hh?=UqP zF!};7Wnlt@5E&ESe+FxKTC7^kdMO_P3V51oT!P&=m5B<6k4Tb71f0 z=b#k~?mMHBz9NX{88j|-MShdi;>+SIO_gKegoplnwSDO^;Owc+%loo#dYGQs>5t{~ z9{Ebf&j6PbL%4I{pZ?sNwP6q3l?a;c+KcyoLDF=B3oX1{w<$;Iu!N>!rRuEX&TI!% zW3d~Vl0KXr?!=4I)_rs>lGM>>=4cC#SAC6w1}v|)p@DmXxwUtn&Yqr1XB$Jj*LH9I zWaWU0n!G+)v^mF3hN_Eag5@J_XUlxe1|)(7hvkw5+<$M@n_*FwjM}d-|l}Bdx(Jz=NO=_6eXSB zSGwXfxJ`B*q5-ECr~zlLid#H3k+ePZwmy zONuSJ;JAQv-9Q(fhDhp7}8L>W8 z4Jvy<3ZKob{S%8bAnTF@BGUZ2-C6H8{QRt`U16K$>c`!h?BD$pHh*KmU=b0KHm75x ze)8{D)`v@&%?yb>&MT<>>C7tBPk8_DPPeCG@>o1}e^lK(I zsB^NQcVkjUjfWEm6!sYsy{+~7aGdql25tV1+u!RMMpk_6wo;F|9lGOwEIP00kJ!AC z&1$in=qkKfcKPmVIi7pS!%)WsXQ}hF%g3;ma115i&c<17HRh!a#A+Bh`FsE493A4I z+iody_n}z(!(%SozPZAXSMj(4_3Jd8ibg(+v$PC-&s}cGa7uQ-#%04%@JZw8&t! zTr?_QNzyQH(Be}vslEtHVouQuO5UNsi)gM;OH+`o-mgwze`DkRytGrmoNcfUm1g43 z!4W?&{*!sE3w*G93<@`$lOWFFxu?jV7H%qQ8TCW(m`3HA#ruN}UkMmBLzomHn z;S_d}RZ$#lHgWN%0fU%wt`Fl*T*9imdr9QGvyz}CS^upGYvBfqVyuOb*ZP=$H_xFI zSbV<&FqXZTnnqLGkUB)#_-v&#$uwB=#5ek_q1HD3%~Y$~{R^)usR)E*4jj6y2t6vE zh%2@h4h(-z`@NiWr$Qq5&3zjihmY^$d-i8sJzuyv&mji|?@LG7mpB!zZCf6-L!E2tfu7;14mA}Dn zu0fUL10Vx|QUGHB(EPT{3Llk&^IU;ZZ9h_=|3WYb)rj=i5e9sJT9-JnKT>drF%I?= zKu~^Z*AVjg+E0MRTqPp!)pdgbi_m^9$t(`>(nGnRBbDrax0xGJ@8iGrs!?JKTz;*e zH?s~6ik2FwX41D*W>E8k`H*QQZHbtZWt}+FRn3jogHG-c+>(B12vZ(%sj|)$)Fm&c3$2L0AbWWSLD!2lh~i**etryUZ9(`LabJ)(hxQ80co5Vsc8^pI}u4__~O0waJXhNpD| zyri86+>{02!ljjf82iGEE1=dNP8(|LCN=B5zpm7o^J|UlUb`)4!R-O2E(L5R&Xf0` zKY|6~fcwx}U=;w~O#wbSkW098cP)YtUr{^-pt@2SB_!NaRNu#zKck5TY$QPG+3zrJ z=Sdvz@F+^hr|hRY8TOEJNv7?6H*POT%iA}-EDo2DrC1M+E$yH$Mb0|iL+GJz8Zz^D z<7Iik4Svl@8$X z1YPD@0*LYq2NU?{wzQ-R)EVtDZXimisDB~KMA>iY8bULVKt%a|kN>k~lL-LK-$?J9 zGlmt5oWsU0S?KVByF*b34P{?Q#O3SOClNDZf85HlxA37+4T zdqB;pjvH#Ak|J;?b5nT=e4VapBEF7eknIA%G3q`~wYJ_@+yB3K>-|OPuRem7Ddfkj++L`svlsH2%c8t;; zB3LUKr6i2%$48fbVv?GEW`?%^0!ss3*vG?6Eo903-ErQeJu;5S2Cvgu7In76@nh#z zAz}hL*xwpIc*G98K@^rFsOVqQn_?1@R*$7D7)qo{r7Y)1LMsx`Fxd9~PTm-2=C#~^ z{50~rIk)APLza6l%NGiszX(F*{HtEoJ{S#rB|hyYzRU zOiCu>D9MZ!liYR341|gV)vlv(g};|-eCiUb|E~Udz`MC+hjN9w^^^&!vxBZG)m6{E z%SbxGV>9>k;#Ml3_kDM~EAQK`X4Fx`VD57a1>ALeYx2W_cGO&cN~6Me`trLaqaov_ zEBlrlBL(UXi=M$+vK4f947=8vJ$m{%9PIBtf|KXuFELSRD3rmhWrX69IbZQEor7fl zob&e{8*&BCY6g`zP_ss!j{jdQRxm{}U)37}9g@K|S-<;Q;v<`6@!8=(s*FCLGiV63 zjh3Xd0@G#|?cosP7umg(3*X8iRF5{6$Fy40>;E5`i&437A=9VulOLH?n4FiFiMMGz z(hbOTg>8fu6|wJ<=@+jbDE(ssiW2sDq;|m8|a_ z5`^zW#syoAPe0}u&uizmE)|p!EcVv>6aOkL~r^j_?Ww~GIwQK8^u`RUM$_a zpc6g!Zqq}&KKUIiOniPxd$rne^`o=oQc2a;kZUB8v3utWjQrpT(hn<0)|up2ziJH( z4pHY}_p*%`zm+-!3GEtGCi$g*Rru)9IuErCk9e=mKn>ph=vJBBX2+->!cO{&b)H;~ zSHWFEB=6oW$EHkBY&R0P&1|azH*pH?XT}R}K>;h3KL{>w@rk-*5?@=2DmeA-C9sexhj|?}rLZGL=gTAu2hveq=d( z3FWc6B;h&2^dVBg%{C9p4V*DXbb%ZL|MsjOjQmknR4LJ1WhWo9zZNGilKE@r>BHBS0ReWmxZfzq8uj|bQ}t*Ny_JQHTRU6kfeoaV=Vhu z_gTHoU%Im_^>o9SsuG-w12?8)_NMZ^_zK{3UH)0yu+OpwHI)qT-??J2{5|p0InYlV z(M27R13RXB39;x4?hU5D2yNTwPt1?K>xWEQX?}S$$<$om2zLE2e zoBZkXGy`^7JDLt&R?qzNg8WsssL4`vAbwu!J_<#?_#u1LPRHEG)1GT{sMg1Wz;bh6 zdajHXNde5)kVo0=j<2YZvHAtbd}rTOJ1Vt&-jAhyEQHIEcZ!gbp4la`30LY8bV1Dp zLdU0;`Y7xo=1*-gYj<_T=3M2(e(10l3DAh%k8?%u$EPq9aetS4WE{P2=(i6u{#cgO zLuOAlkK<)J_pzIgkWcoW>`a^czW)nlEO$;^Cv6;ZHH~<|%P*8=&-OoaA=M>(nMcCC z6Z7Y)*gKdp@yH%!{Z;o)G$qw?$fYSb;J?b2w1QcTH|{z_==-T1oU!6(mYelQ;N7PN3`}zfn?%3J{Y%^}c=?zc{vOCWHm*oGx^7{`8y&X4FZ*Vlud}F@*6bHGk71*5(Wi*2eKc5`NO)4MOMJK z5>bJN0KOKl0SAg|n^WM<`yY^%TMFR7L6t2>|B}t4>Q(rWgP$4fX2TuWO7#lIDNXD2 z1z;D`QnSq*$+?G*$s;|#kRPGrzgDbl_H{jJygjAMbnw)F@KU2@Xs^u;Cmn88xkhQ= zR55pbdel*(eTM4Pavn+1N*zZ9$wcY)zSu&mMQlIw6$*{9O`Wr^9ndrX&h1QjQZMDz zKzN*f9gm2y4#_BEaX8h3)qK>Xw!|pm=mTDWL z?(PNaZt{53f6yXZ@2dO^8O*mvqoDLz(A9s0d3_AwyHQlYJy1A%?9#IYKjnXBq>V+F zs?tzmZ(ylC|E>IkuTEOPyfecpV8Js?{APZaJ8*C0kL6R!`~9l?1pVYu(`GJ*G?eNz z3p}|4k|wvu-Nn&)d|n>*Lj(_GUW62G^h9T&8OylrqYfJ!iB_EnwC| z7gm;Dl!kj;%9rx2DFWZ@S-}|Cu(?9zh^xF|Yf8A`A9!m>vdYv*l`wK(+V5x$M zo7s#32qo));e|E5-QOk^iN1~S^gJ0K-H%ZIx+1vAbI(>0c6jxgyK51Dc=2hzW4`v_ zYhTuqRIyEuZE}MDfqnoQXQ$z>siUpR*m?%=jg|XlC3JZwUWm*b&Q;ta*!CzCcHGd7 zyi9P>$pEj*RH1Vi0oWzM;-ySv@9=Xs~2o5V8KK7Ou6i`USdv)DXqq3GfI*l>=)W5(?=PP)+M znr;3#LbXz87!EDjc79B;1$8FX4m~1QV%(RV#SXFAK64@zow2?6Yl=^>`jv>$ZA+dS zdSZRw&{xqhgDAzrCnsi?G~6*?^vFY>wJmf1?e4)=NNf2y#Y`9nk5w+2Q=Ss8@J}?4%}?1 zHu>#A^;;lk*0tAd{XzF1k`10j+EU>c{FOhXTx%1B&dKMrC{dA~Z9*b{3)~p}EQ&jx z)YJSkkYbgLwJy34;}XpY*C!r<=Q=n;s zyEo(vkJFuw|L~eBzqqC5*((AvzbmDeM1g_MF;jp_$99jvYwGROO@S)yc-C@Uh6mU0 zQkgkhM^Vo1PkX(Rb&Nv>m2H?=A;{z{(6=SoK}rJp88<0e(^4Wdg8xM?1p|N8SvHQ*(13{;bAJVM}!HCwxR zl|KY-6s^%QUPwnC(oOf4`rzZ@k6Z3x6P9!{c|-RK zPQc|Q9({VO;6inOb9((G$8q)fx)dfgX2N3i_p=QnmAtKHluhwBd8Vjs-sB(OWiIf| zQ}?se(eyXj6;95NkFTpx@52;CaWaNeH#N+}up*eeHAPGHn}sOMpCK1HQL8uo#l&m$ z&9R-(X&|yt4cK!)Pxm0CT_MBYe)aU_72N^jXe-{=^ZoWSOt-50AjqRRc&LZM3i6y&CoN^~8bXJoyGDiR%cb<<8pArhPri!>xMlZ55OK0a zC(2Zw_ZWl*}tb1NxKcsm*y-IlnNOu#&Ys}Qs|xA2a>Tl!f`B9-QS zi(4Bm?0nlfUpd-x7M16hom-A4_PSr9@(w+0MA&pMOS+dm7`@PIhaaZ&w#e^wFT3F! zg}1U>>j#4PxCS@-?D8@*$4&*`)8^#bG8K58Ip zdiX0RZZ2F1rAO>ezNF5ZG5?&Gz^PT)HRGpMDPZza;CnffkHi-3Z!)8jpjMl^AHjba z_45|AODq{}Nf6`g7u1{gHLUMdB_?Z^k&Ki|2*diY3pxGSlFfipJPwh3IxE_CQE117 zbzWgCe_ik@k{ZRJT6dIpbK74Xa_0U;c-auXWmaY$NTaAE2Vl0Hn)xDaBjY@ipuB9F(!4n>0zCcSoX&axbm z3$os2Ps1nyT0?hhSpzQis0+JmG60SlfUL>B2Jq*9Sl2^r?vUbtfMEPfP~2g)(^V}- zpC3ei-{^e;?sTOP(H&8}n;Hqc7VcDcDRJQs=J8!%PsqRc@Fl`&WU7Lb*s;g~`YXZ(_8Dd>&i~PMb3WW4eWF)0CNFZaq2gnjSvE9eX@g{AG1-9Uh(bhht!A0D7DtZ|zm_4m5i9ae!9E<+yx}5^;j0 zBg)#kPzFVHU+F%VB;0D(;5VD>Anx`k8a1SLYEARUsK{Si>?_}fqXgfNu}R*kNj^m0 z6w_xtcVoxC+!~c)6k!_MK`0oh$^_Ey&L09WdAp+H^F6G8c75YZk=IM`W}{`XAu`Y^ zpp#+`_hY`$lKR8z2VLjYFW*ILjHkxsPKfTF;zFxN-RC12vHgF4we|D@;@6Q<9 z@tUDLdRr=VGoC(Nf!OpyghIfB)_~uR&LOsBF(N;c4K@a>Y`1JdHRlxZP)T+VD&1$6J9FU)mjO_$=JA%(~m;6f=5!Z%o@NB120SwA{#YPimm9@jlm}YU+8< zl*zBbGn;P|%t;e_jc*L@8XcGyjf{HZ>l)WBoE?5=I2`QnJe?m0Q)ioGeYGQLH9+sx zPp`!Y1*MEuydPz-<_ddMh=Bd{dV-&4kyyaW0Wi3Jy_&YLw#K*C&18gKj6E86MTRWN0X6WTW~e7hcAYHK`gn8zT0rOW?SfQ1i!vuiyT zP|QJP-ytyRGX8TVDRI>@$?Q#16p28#p)+-VSXoi=We(=jYlr6_wLh{XcI>@15e5~} zv%0DDkTs?7f8`Z+wSF1JNV0CX*Bl-ljiTa8G~TD#J^{J1L#g~L-Ce~`A3f%Xu}p46 za8#R)Y2lRgB1Y?d(>eT`v`!sS0|SdAH)V;#i|^&1eo|C6?DM9S+76w8wNIqw>S1b7 z2%?y!via&>U40PF7zbQ0?E^Gr0SW+wLc_m0{Tis;y~u^@JmUX)C|u&nM|`mS(sSG3-@pCT(HQ*Bs(>A)kxGQs?ZOQF78#kAmi5 z+s!3CMZ8J9DgP4ZC6*?WL|)9GajOh=7{0qcPX=`w>s8>8vuQ7%idR)0|Aom|z+re) z+hu(nC5j9sPU1xAipz&QjTZe|*1C?=F+78f^jLL~7`t?f9%MX~d^gHUBm!vQ3@E=! z{3PVXS9JO^I{7#PVlG3JRC@F5u08U-{z*0SGb(#)Bw-TrqJ_R<9 zA>rlGfo^YlYxso&Uu7AfUZvpZa;|rI<~h6n?vZUD@XPhgP12uzXD5y_Rk|ebQ5)ro zw+|klH0aP9E!HDE5FEDmD_6zOeX6Jwzc2b$2Hx-9th8Y3@GRa=59Nx2#llnI$LmNL z2Sqnl$>RH|u@|Cqn!@;Hq7Bl9FWfv+55H;DfBO?7M3HQ6`WZ@@mzON0-+Ov`+7S9# zh0BjnDnc;p_m#>b?8vP;)K*K>hCvY!$;U4qJ=|*Vj1nmZM*VM_%~U->hTEo9j}<5& zGhOC(z_l66O*LVROGy@U=i+Xk@p*#>L4HF~Oc))(zqg zrW#(03~|AUMn29e{+g{xT-Aooso$>QLd*&ND&Ob5cvWV_e65*sED zleZpiFMAz>jT6ZUM6V6U(f*|XMmaL1!-^5pr4WeY;&j!hkPQ>kf1q1={i&bS7A3l&`6I#==uKf z#6{2v&R^q~+mH|Mio(0_ZTEW{o$iN{nHFEwd=khb7Is(HAAFE6y-nz}{AJXtsYP&E zP4^>RqQ!1DP$^Oz)xsd8AWrXvOT7LMa*GmY=&5{Zqmy6sQHss};?VKi8#Zo%4+WmI zO;qPGWGGNB=vwv41tw~r?@LbblHudRLI;sT_d8S#_BY}+$tyd2=h_sP9-2;FOatj!PjC=S>R<12cT{I9y3Xj_Wq!VoFOeywKar4Rl3;yF5 zUX2$-M9;sA|Ng-=Xyst@BSpQI%a&PXZi?W!<~P}Yw-1dhJuZ{pm~QBYf6Al8i2VLK zLNe<8z<;+7X?y5;P{(2ns&~;o`CT91vy?T2eb`BJGzg;Gq}VRo;)@XrKcUh*_Ol)O z*jk?XWEr?i$xp)V60jd~%w!iE#+>~1itk#{8=u^F2?^cg((YoaCblmkO79Jl_t*D9 zNWGyb5kQpfN_V?SOh-K>@liP$1w^UIxbBp*nK5H#g=_90KT@X)iMa*vAtKQ1w zu^lyvzNiwZoW71r>C{Ua))&`pf6gEv%BNfC&JPRfs;ZXbv%{oI!zyBCT*o-}2AQaU zIwC-$i!?D@QO(e)4TFMWVlJ^lCRq%zuJq`1p7?&X5TcbWa4Bp&WPUQs?}XCmA)H6> z@EG>XKu_9p!c?%`?d~F=m@U@l0#!`s4Q7fpUCz96@*l^QJHiy~LVAx3KZkMm*iho> zMjxHYw!wOiel5bptOf%^V8+>=cGh|K;l9f2pyfx@Q)x%epU)@y#ake9)RWZY1~)%g zjj9)8dujVN=O1U3%Dq!?&zt@BX2KkLiInd~<~QF0=_Ccom5ATfoTuMuso_Vk9ou0r zdnm&po2^K)O`@Nk&DekDNq9h-a#-;WvMe*NSE8?X>|YfNLSp0)9^TvRMZFYs9}4T+ zEqq36}ESez@b<{WR~gn7=y-sj0#HA&nDyHHZHym z^k+4_-r;D8LvgweG;L7}IU5z^b%G||(G9j2Y#69yxX(!V)Z;(Xc8K|196EF_-z7x;jSx5X8s$miFfoGAkbnO+_r1_8W~QEvitxRB|VOzn~!>h#y-ZCHn?r%7Yj(8{jT1PTY0~pfI4Pm`!@3kZ#daEhs75`-yOVn-qv7nkAgzHJp)cn zH>;@At{g4@&I|rlHf2x0CFSYb@tXhi5biGNZJTrUYO#aUgym9lL!Upa=Q<-rs!O+m zV-!P(&sD|(b=r(iX6x1zBlKNytlY{u99z{idLhUH%Vu%Tk!t39oj{Z#pPxXcEC;d{ zU#NZYwQse#8NF~#-b-t3daqmO++^loVG_J4mna%+t4s_Z$5EM=c=eZDc-Kc86ph}u z2yEDPHaLLMqQo%mK2$WouZwJ`yg2mPY3>J<}e|K`I1`{rS+A~qD{{HGB! zDzp)-txgX0&?jt|IjcX%Wcbr4RTCG;zj!;B_OpSBO+^pCn?vIVr&4Q=Dy%aMnqRcy zV&jTx{Pgc6@4Lm!S3#2O(pl1h$8EzK8T%K z2dl4~c}^dTNpxnafB3>_TkNm7e8ax#6g}ZlZ`HVg>I=-4QS3-TWF|t-gvxGms_%g} zFFmGWffou&FVL*=^}K=|tg6-iB!slRV2_d9lR=iXM;zrPg}69zXo-{YC!_JJOZ92y z(D3k1D~%F2s})i4@Iq<>R-HIpYx>tLn$j^-=KGVfhstyC#H~<_(Fh{vZ&67_wtBzj#0j+rnni$V>P!Zf;;2`GgjRX+t3RlI(qGaxD)OkHIyZ7vu0P z=KD+ED6eeYN?l@wm#m@RnUqNMV?mO>^YXYfW0uk0V^4&!FMVoILTokuN)^O8j#z5o zG!Kg5^IHxUjeK3HObj(@iL{MXjJnjbn(RAAP27$Uh&E1IiOK$+|3k^|HZ0tt)P0+c zlk+3Bv-6L~VVXUh>|ONKL0^@IChuoO$;xB(QtAjVCGVv82UqDXvFxefH!2CsT?B_!y@h15Pe+&&ziUCl9S>=o4k0i|#LLK4YLqc8|`kgAxZk*Y0wTNr2S*FbhOa<0>IwPEvw>-@s zIGWxCWXbGVietUy$vU?VRnc!8(n7Uyx1I?lw%FhBSb0Sw95kTI@?Lm)gFx4p>@|%V z#Xtml#Tx#?3TwhE@98MJurwzk-D`3xrVfjwct>=`Kg{(XW9;>CRMgJ5pqy&?OuB4) zs~n8pI7E{fvI9vk2A{~9nENNro!CJ^SpN?IKS030{)TR*Nd+v-$ZIz=!u14^jhstz zbS;@6IoWvW8~C%~hwHQmq7mf4 z_(11lo;D}NHOMz+7$?7Oo%T31g;`$5tezTjSQw<7mogf;^Pr>_G1tW!2P5ed8X>dKTQUuoYru<}X#qu!2<`eAWSh z#w{1op@%X{Uggp1ueJK|TI1wbq^_rHk1hCx4_OED?&Z^ z3LIjd4ei}qAX`Sj*Twv#Wtc>#OvHAEx+sOpnSE9%YK9wds!}>%d0=FK8hjMtX=Ihd z`?HzuObRj*;qf-cQx5M^(heZY$QvuTCMqi!U}R>EcY${$I59dO6O2X@I6zJW5fPK@ z<1kjvuxb99E|bRN*LyH&=ib#p<8hDYoV#&q!PKYaPWnXAX@2_31c~PwJXRAO*iau} zDg&dH5?QLTSig(YWvP-Dt+k^cU%q;C`1Hx!L*o+(x$mAIA3Z;MDSkeCb^PS{bN>Ff z!#Cf*dV8oU=yH5#l}z`BH;O~50a&_DM2D}+_0n*rY;UYcJg)&V@#3f+4zOexY8S9u}ROk77s1J82xT_!@{>hyxv6mJ^6AyVp(p0EPo@GBSsTM6EgnJ$cj8QCnz zKs#+hy*+V)nd4J>-vNdk6pu*=Jio?FDnGz-Xr)t0;`~bGYjwzKrQE(EYrj4EpF?;u z4KEuTO^WKhVm$cpy>>Rb=rasENyv+2wN8AxW7Jz~t-2U{7}XXLESwZ&3ethV{k2c? zdeXyoPc>o>kciAgif;ieSq1nLd1)+r?l<2n7LBRuzjGLgGjmdwejS}Wx zXsqh}pw0qgkONWO0=^|3Ml4+dSwJkpkOIh~x^<&3G#^hA;%S*TpYH|@9Se)%x3f7) zuh}O(GzjigC@+Jf`a`V=&n}k!WlSX>3m?vN^7e2rkyS znc(V-ilnw!cTsA~%u9~Sn*6jx5tLqGX~p?zNpY5gbEO66Vv3bbWg4Ag@0%=wtC7>zOmqCKf<>9Rfhp5wMjk)ul!2x^Z zxUzLMwpRjjM@j$XRDeP+)M`ukNRp{ zUx)^nWZ5JmG3tYXKo)_qYEW03qF%K@la$hO*A&?ZKO=?tM>%a`^@4omqn#}cmY^BZ z_H+mbPqPAAAi^QCa@n|a?W(EpjPLg8kmo6D@xh)>5USB=dKjVq^z?{fx%Ko2>=&7y zlGTsAKk%osgl-nn-k}PgN2ykINT4nu$_H3ccaPjUH2NoG?1|GC5_}OkFCVzU!6}wo>=c?H(l1I@!QoNU;q+PvNsMO(sDg4x$|`DCGB%osa~xLkcT*ZcjtbpzX>b-H_MNAr-sS%bdtvY2y%pQ7@o4*y#xyzZdKzXPks z6ZboFb~(X2|Qg+@1X=S z2cTpJnbm7ZO3dtGQ)B!r7N0zEn%i?W3s0lY@y*#OT<|tqMQdWg%~G}<z1>!(xC5Y9ymZFje~*d~E5)k^GS8mM&aCv|BPlPUwb0$wt)9*}rv zrT^n@wW3CJ7mBPK4WzScMk8nQ)}kpKcP$_y`YS7sM7bg|B26S3RRVM3I;b9eZ1Dx7 z7)-o+w{<>*T+@n9Z`x~l0crM=^DxYD2X$VrVSP?L!?FSjPCE^Q~vbZDZ8pW&4prJm42Vw!7 zbi}q2msV(8GjPz?YWiv9ef)HB(rxXWe#$&lKLKy{iF_zNjr>oc|7qrbLU+Dt@+k<# z7u0%u;yh<=^2s}o;K=y|Yy0Ap7sQ`NuFpLluqO_-DOxYuzbXULs63xyw1|c=>UXsP z`n9|;>3KtDy#OlRF=v}qe)bxxu~ktS5C%N^8(M|_S*e;Yj7B9f$AC=q18ePZ?wk#- zjm)u~-{Nd_P{O7CD}eRruz=joV+x z`|*B1SpU@jA4{!v9%hPBN8U~0L4YkoPozU|Nl;{%V4zsq;%h3a+5SabyjaCmdiHy* zQn5OH4<^F#m&@}$%hO&fF3@j!d7jN%o^;6O3^ej*KAURf4d_!h^il=@&H|{!y0AI7 z_5z5oU2xD`;a@JH($ItbeQ+_5Cu54oK%*oUpkiWj0_Bkt!$+?#6|1|5XAJ7`ANwY#z0`w9M)MV5Tv;gjD&LR{5qn6XVS3IR@r-=^mZki)& zOlD+$MMlLzg7aMf)-FKHKhmuHNWbVtWQj~oQWJd6IBbXRyXO>uV~kekir_T>aCAUZ=>4eC<6A z_I9NHkL*dL)Xy?+1I(0H6qC z0{Gzfe(=5=p)xM!78nVWefBB*2Tl`C@!mybv=3;LUjpsQ%sp|-!;TlGXR3WB>!F+= zEk#s=ZUZ=k1@)MdOi`p(TgFk9mweEuSxylK7ZLT3__OTC07q+(3c@*&*(x)5%PQY> zjMzRN&4_)}%8Pg+6N;(Q)BE#Xt5wkAG9)-CQA7Iogh7EPsHgPOph*N5cE;%EXCpJ=3OcW?CyhO1j6RzH6_vu(jiXD}Apo?NY$9EzR zg_XNBC7n}zfJR@v1x6VVpyOLw(t_AGBXe#t%}_(YO?@Wl)fnk3qP5XWze|+nB6UYs zk;>g&w)vcMbnU6!d$<~&Xwq<{tAtLxB_Z1*b!|=sTefjTg;U86*Fi9@-_*%@By_!O zyQr(1rDYT&N|(2gnX`B=bFl7-zv~+*{*8LQNsF#2=DDK(=`~jguIx-mV@>W74Pqa8 zUA`}4&)-``RE4apuSJP)smb9Y68Nnp@dYLFTS?|Qo`x+`l|8mzF0{iJZSqcauLaB+ zbAv<3F@|_IegTe0iGJaUz!7Q2O1yxi?J~-2Yjc1&x}cOoLDoluMm}XzvEDWgDdHTH zIKgk_;50lNL{W!j1rIhVhG3Ioy47U1K3(r3z6Lsj=HMt&yo16&B2aWtG4>vxhasx< zA0Oy1ONw53JuhHmei+#x?p=BNrgA5Bxk;B%8=X$K{mT)TG>wbMJ>NHn7tR#u)1Yfl zMrK=YYXHzPGh*h5bC2%A7w14M%pDhmby5md_|=qNObIWhxZuVl%+g5!B!?z;n|=4@ zxuC=BRPL$nW<)Pw3|SqakZH^=J3#pa$3^a3jmyarI|yq^feyP+3!`DwDeE%*i8ng! zVF(n%V#NH00*WV5+wbyMYI((TL9MOFL=wOx-v*@xLGn*s@LHS<0Z&##YjnF46+BNun?s0 z13EUMZi-x4_wfiL?o747Kt{_dSlI+*ozCe%WL~Ix|7YxA3;yvGj}zd!0&-(cJvpn~ znNxC`W4H!h`b*oaw69`KDCQ%>kH@FkOzLq*H066!ey0b#+Rt>T|c8MM|LMbF=mar zXt*u|c!5BgB-h4-t`daT9x^xKYYUPf%l9(P2dCZX#Eeu*h-sV0U+;LGFw8vbkj zw%Pp53oZ=?*IcN2>weV-^Q$YtiweH)75w~hda+0`EMQerV1JIiPnkQne&d!?V!%DU7^-VK^V(c!LR>l)HMg;Dmz+ zFC^A0qtvZy-tf5GY1&f}TC9Rnzx#_!#)=9eRlXB+iCq?FV0s;2xW0djC^rcQBgh$7h@s=jIe7(&V5cyRAjNY}LtD>ABVXmA^iyeFwzSTG*7>K!iA{4zi5fyX zS+K0EX)FzoTluc3Vm1XMO>0EvCV&H${O(~f5e(=kg|T4qXF5w%HHMNq1Y^s(7zR#w zudzQ8@_L0+J7;2H9vmE)w=}Z21>2QdOgAew8tI8{0cP+6xeI2TQm!ahqu+%xZ`avY ztg@74Nr7+^FFzC_BJvofO;H7U*>VXfNajIyhI4~YXw1dI|P zaf0ILX=85|`3F{YC!JZM_f*^e_@X64hTeil5Zy_mjov(&<;s|wszEy*Fkjw~Yn_Gu zsC>eng=FEzcVCsE+9K2VZKL^_qO_UD-dVI7q=Yt!_h&eG<=aZeD<4w_gOXK2gNzNqvak=1)V&dhx zwUDzKfu*~e0*H7W1<3wo>vX+mJ?1a_@W`~+9-E))@NeBhg1mysC?XHIvmDh0S+$+J zBKef-Q#yz;v5@Mo!zX2oY0r57)}(^l&sHBsW*-g$NUk1<%# za-e?>_72kuSgHIc=h$DeRNJgM%E=9}y zM|dVTiUP>PD5c0M$FrCcV<1xp?E5TWCEmUTdVx;_GKRx@j2NI3O))XV%V0kjSLr0(*_aHz)x^@``&cW8;c2xT(s?<3wr7 zaxuz$nsE||S_`{L%sXbIwKj@QtUM=Ue`k0>HqRvh(lj^RwwGW~$_QlSX-Hu>DI*Q6 z2H<;V6E^aFR04)fL_Iz#F^(myg%6(ZvzX{5GWft0Gad!##;P?m zedfR*`zUd8shIK4!7c0q4mYem%paWfItyK*@p8WjW)driAmwZz!X!;F6a^0f0t`~t zrx6!-7KVqyotas4WaV*J63uCp#R(CeKoiVb!K|tN*K)9wCh{J@#dD@H8Fxc0#DAPF zrC~YrQRmfC5;a+v!i*}6W4PYU)-frZc_uQnjM9riS)*ts!_1i222gyF&7)T;LHizi zgJG*%qZom+baUVY1O82AnB`1oRm*+A6&9=-RIiq{A9dR%<18v>A@ zlF(zagoviK3U(#C2G?&oEm~!(blm2o1c@9EPIEmQndt=-;YT>mAcpmHNb4AjOxW0+ zxeEllFzY-4b-c!@r+NKSSu2GseLB&EbP%2oSkRS}a_&tXis@+XC#+9&Q*4~n%~MM+#6csF*($SL!LHG0QTWun_0XnKVm&1MI0#8}+TT5RosS($G)(aYE)zoo5eXQ1@dwOZKmA4fls~o7YM!n1xmQJRs>nUH| zs+~P4EGlo5ANKy~oDoqXJ4j7WmDc%KP1we^bifEl$#9m4(Br?5P_^eAcewMjU3$r~v1+j0_i zdt&7Yyv%a!OhNDlNh0+?3S1P!F>)?Sji`i4y$+MKw4RScHCUHz3c~{*vw^{PCkZE% zQCqBVk>y*}#pm+yih#-@_OkKk@qpdj24|RI%vFzf5;>#>68r^ctq9}b@ z;sOQj{RC^vcQpf)oR<^3Fw)EVNnIMfNcW7q32%!Nve3?D^w9Q0oFrtzSv*HXz09aI ztyxN9^3A}M^!WLAw{1sO{cc`Enb`?c>`$Th4BNKv(uBCY;1q{^6RfFwB>E2LkdD=19$9Xlr2CM8L~68VkJQ7D1S675tIfDpF#{XG2ZA!@YFt(2&rl zl97Uw;V&IP-@-%bRb$VOfVKBcoM+Z)@p1X)Q$f9U02_8gd5Pxc|cWG zhs8;o_XJu73|6@-vw#E!sU>3+R8$C;O%sjx7Gzr+!(liI0{Nrdb0 ztzMPPg3J=fVZu4d=>m|$`CzK?k0ER@dC!***roD5bO}e7dTlhnuxoR%y!mqLCfaoK zy;h~KbkUPXqgVJ^Btxn5uYD6p=S|ddHShd$*PbofzuUqocSc&=DP9|x`dv4Ya?0V} zjiVurE^@f^Wi`g=>NgE_gZ5g+29$TQdD8%;U20aF_EcZ9R&Y*uIC3xN+S(_*g zFvxf}9qBM=lGd^G7Cx_`()2}PVBdWr2gQ_1>Zepa^`Bf4^S1Gd!TMIs%tbe3xwD8c zk7s(5Q+k!MD&?q`^B)DR{+D7wGQF8d>-AN$B4tHrKuF}#SPpMwTGDg&hP)6``s1z4P0u{mFu)wu_|lOA%Z z1wOee+haFrh=H3MNdE?2Nd^B!xu1J?dOjy7c(r>3>0f{uT5vmmDW>R)GDi1jj_%1I zec2Q7{)|#?mUKQzvO9}UkTSLl@I+sjQ~QG4+85&3?!>iy3C`_{ac{rJdcBDT-GP(4 zH8*z?FZl&@a5NQ=2asS%L8p9Dlv>5jJbGxW*K##oJ? zkhAVlmlu!J{I!JyEjQMG=(V(=t|e&#CH?uU0*TQ!6#`(PKO7=ktdnl;@xJ(|kIQRr zcx~S*JjAL4XMBj}DGvOd*Ad|DU-l}VF(eD>%Ngch@R^!u8{n2{gwvY$P z*Ute(Xrj{1#Y0~5H?O@4HBvIVFT*@`!p5G450L znrq}Hqp8nM=v%#lnyfX8Dfymon$6iDi9il?YClquLw{W~?E4k2jX>e$% zmdLUd=EHcvQf0!SgjH)SVsxAu)3QH*>DzqzniaGy-cNn~g-f{@`3m@Oc(0edDq}HW zw<1fhXpgoknA-mTl)m^>RXjkKc}Z(SEn z4?!exM`;{)v?VN|mAUA_D+R*=^uj$xA+1iHt{z1V7|l47fpo^TjThHh;2E@QCZ&s` zISeyI5lRLVDGVLokr@&s@o?CRb%F(G`cWe9AmV($+dA2#;DUp-%Dhu(cIy+_&8=nsw+GK!i7!Vh)qVVstP?Mm`?hOsljal^c1GNc|qa-)x!Ap>RyrEp`7 zhi5Gv65BXJJ$O9L+jy*SctH$hgw~xjHz5ESqfmo^EiNyvQv;U^vS#A>gQkw^BN6s6 zZ#e6WCSJlT8CT7C23rMskH&0po=)ZHojb!sc2!Ke2zEqMlAnH0gd}ICSVnF@YrHjii-~ID& z=F45iO9YqKEap(svG0v9&<+$PA3BQ}qxfDzM1-253q7pL{Ny7M7#eZA-Ii#_Fy%zq zX+F?DLf!3hOV#owL36n{x@Vf5F%ig%-R7_6dd4=C#E~sIsw9V>!Bp6L$qK7cH0CRy zO7V(J3i!9TkIbRBEiX>Q!K*ugH3X-WLn`GjWbjIbLxO(%)@^Qlw$y0L zZs?<;p}(zYSKw!`2F3peqfTz9m|2?J7xD{=OxIKeJUwWkjI^XXL=7-AJtjCyYg{c~TonGQ(LE%Op53WG&*Z<}|5EX_alsdo_5f zruZ&R4` z}19>dpO$Az*gG`9ksUaLI=)_Nq66(&;!w=K@ee^~% z$FGjs#U4`{uV{?@@C@pFa9w14BH`AAl!uww%(~ZcP~Bc{6j=~%nl1EeIU@=b z&&FQT0$KD%OO#`y#fNj-ULIynTh$TY$5DLWCovAL2iIkdjCqH#0pgqjX zX?*Z!Bjf}xW*3j8KZWKD2|%XqFi~Aw(ckeJ)fay ziwe?N7$stsC$I}!5fgbESzaklz&i40f(unS?-jwgav4S;!icn-JCwx^Nr~0`j65kd zjnQN@hJZSWp{x*lM7bypvum>|(Xq{3PH| zdRcje6%)KwoQBujqQfY}CXF>hJNWB!NQuKl{m6os|oHAe*4AXpo zB1D#Byq2?_HiWEsNxUm;i+ln1u)%{|z)Gd1!n144S#ETiz;;m8VhQpNolQAt-0o!b=P5Uw9Z7#4K68dE$|-w+nB4L3)5j zaSx<0ENv6X@Y!=@kQ7$6h@lqIneFt=yuz5u@9d_c>37>#sWzRy-Xs-9We$)VU42)U(m9pCWVc+vgIGI(Vyu^eQ_Q3VY#JuEp|Gy$kaK84Ar9;gzK^fnjaV@)v>c zh-j2-0u}Ba5Sjxfz1W1Hkb7->oV6%G=ogl1sQju(EmHbRgSEU@8hkGTe`^#{p&&PA zIH9mL{oqsoi8VK%(2DVAybWX#RK^=eS9LXc4Q=vf##ai0PMfw3IL? z{x%D8jWb=7^6MGNejx}vk*u9oOj{c|L6#gxjJtwW8n|6ld;~(;NR@}MAu)$^4Td5V z{S;!QAp*|Hgb^-aik(Muf?9Ti_#SWC+R3Cv(Q(^i*lZm2_hOv0K=dw%oK#Q%5D~;> z80a)S`XYOn4At~!@7Jx|P5vu!rz1x3eGbNV?=9^*6z?&OCq{Lb^!h~;yJL-36fl0T zBMc}i#O8$>)FXF+O{gvu1uGlFIh<5M`zlUt6UdKZ`8F&+8cKA3;v|rqGE)B%sy{cV zDI9I^-}hpFbWfZzEG=*-M+HIw*x!{9^OKpMc$l0m+k#GE7XpWXVd;gI ztDsYoyITgz%Rq@elI}j+cpHlARxg;&lDvss0kmQK7t(sNetQ2NbEv*Qb0RqEoPWK4 zZwUR<6lCdXV&4SoXWYP)@W97BAxXGE>wD9K3;`Zz4GB5uIwo-7AKV06S_K3lOy-#f zctzI+jW}NlstMS#0flKOK$24l^G0(<0xfDman3k4&_(2uzBqP=Gcp#@>83uJST7Qy z7s5T4@xHSa(KOs-Xjf=5^Gi%5z=mF7{@mgs5o3o$FkWCvY}=OWLCgbu$(tUQ4fqT% zPZyf?vD`Rb5|0clB}5u1*uL8gRcd_HHL1KI510TOzbogTgn#F zegoRBkO8E?)6heOdYndi=Q(EII0c*&rF&Vqkf%s2VJs6Si?H-rqZ8M`Go>Pq-P|Cn zm_m|pM&k=O`Tz-NEq=r{dT0cfcp1-;w-Ax9MVuTbHx5L1^iyL_JXcB@fy(5?M*&{K#MV6Y1Q4*tVKShRM=GA~*7>R?(W}fc|JtQP)RJdZIm& ziA6D1I>_mj%AiI+UL$Q1>|$0}S@q%;ri^`biN{>s9BE1{sWb{(TY_+>pwSSgbFr)_RmZ;jY08Jao{H6+4 zrkQS$8Kk~86kvIK8v>%PQ?#a0)Z0{5HcCT@14+XPRSP-Zq`F=3-tjPUFIme$b1ZE~ znjG3PjUbA4)5w4H%1|yWA%)8=T^Nj7Q7WeTG{Fa~irs5#k62H7_D@|@))Iu4o*NiH z76`;bnF~u?=)zo6jGFlsE6r&ik1@B!jYnL)j^{<5#K&}PRLoRS-y3-!^RXl`bY!!n=tWeAK2#^U zE2$Ftc~Hd+7OQGvwU!u6e4yudUp%8PQ1;$I@q>n(3 zi0d&{m?cgUk}u8FpN+lr_Qx85&Z{UJq^UlQM#R`t!Xaotm4Oy8QHNFHR<40NsK>Ru zj~j*K$kI>hK5Q{(>eQ1;zW(X8mRgrzSt8e&J>o*s~s2>IDqrq-FOn z3r?v0zZQyNsYc)CFV-4-gRQSsRLSr`AsOqD=05T+Kc-4AZxn#Fn?kM#JYWPAL+>4z zn+2t5UvuJe&e>f_=UlpqbxNWpG*p5vsB0y^m5KoL)P*@P`taOpU+M$Iy}@7lo#+u= z9#ZrbEN_VNO<}DJXXam(EZk=KMb)cWLQwb?Ujf5L^1L1?ELM|6XUb5TYU=a%MnbD3 zVF+@Luw0L*ql0!>-ozni{tHS81kBuZQ%(6HU?HH35bZ2N4;;)9q~2zYpNwtKBX9-t9i>;iC=lL;kx% zZ#&&bg)6fw1W5%u4+%~ugSmt7uQm>`!G{?{C}s?Mr6o|!AwO|f4o}9j;FoWc_;PUrD4WJKq*r( z^4MpM*&$S#bWfJ5dbFvTpF6Xt4oDUL1lc}EPcyWpE=eIPfv%@zKcGEBigQ4U5 zgF%(FiR+XYjj7Maq;(4cQB)RD=&q(P`P6Qi`DoTtDbk5!8TCmGSo;XfvI-;65JrGd zLlolSK&LL3o0EX8%3RC?foY?%8x_Gn>DHk=B6h@Nv`x`+EgKH$kvE28wo%@8wvkTo zpHmSbxypT92^EQ9BB?nMBoYwn=~vhImV*vo5oL1e)z zmGyvDOfJ^CO|i!0i#F8DA!%mI&Pei5`56fPAzf5AQpuOGA^#z{^)NC8)g!F|XTpn_ z50uy3RH>C}0Aukxv&}aebP;udeCRp)A<{L8w7##4G?f5!G(-2{01wFvAz$!aQfBH) zW`4r`t7S~D;|mw2y+Lz+?hWxAVsNd<^~vLM6b@$-N?bLzRMg~UxzUh5wq9bajXa8l z6|WIjA~|YyY?Re;Ew|ExJguJDx`n(3&#%e*)__GgOB2cJm7U=F(h)hmMV+`8*5H9j?vrb!zF$b%SAQ_LJ~X#wDHeL=ahe+ z$Q(-uhXVmM*6<3P;ovvTkq8sFlD7u80Z2}-8&;u!J)4eDtuG+t_kzTUQRaBK)S|4< zHPZWGL<`4@d){M4fr*_Eq9pTSG`e=j;!&wC)rhmbg!v@vv~UhIRfKCZwP{d+>}aXC z;di^9FVCg)LSDVBkg9``YYOjkRF|WAyUtbF3q&+nhVdivyf{$0%Pp1nGL^89)3-Gtx0c=!DH==ssheAOAh ze)sa|>8oeT%g-9(dLmdpSR69752)yCb;_1uwS$B{`@^vvX>*A$UJunc7C%rzAm zOtfdpOvC80#EwcLFE!nr2GnzY4r0%6=CcM}0hGg5AOJ@Qa4G=>^1#q?7~3On=7edz zK?K$Fgu2xIscJBSt(wV}T5g4LZ(&jOjMa!D-|`clbS)~BSRSz2aee5>q)Ufu&p|sai7~tR zM^n_YYI9NL!uyOS|ESdpxZ&A=_|ZZXj}KIaXW=z{mvzkt5c}e5V zoF%WevtE|2>BhC6gP?2x`LtpOEVv8iPUULUGY(%p$CHmNoiJ`AoKElv=eESmHkGPe zVJ;=Vp8TUC*F{xcuG$do-)|PpBcxA+Os8VdVAIs2M*)L*kWSvo(seJvZ)H+8nAwlluxOCP_mfJ zlS*S=w8>>jL|9tPlTNBw#r;y&QQB(=`HoM{RQWiVz=8!}qegg!)?2*d0Lf?WFpS9f zi}Z{`XNbuioK%rhk0s=fNQe0zoI>|e>K&XJUl=YOor_-v>i;6X(Mn?$!_Fmbpa_af z4UGg)Pd0+Y3I-IRA=41KEf-6mI*~8dgi7tw`{3DAQkvOeG;TxR0-;1pJM`Q;L%-Tu zHW{=r2VUSMgMrWmV>i>2o^3th37Rg#AN4Wen_ihQB)*#xkk%BQSSVm74Bm zB@Rly1$!`nVuL|V5Nf=E2i_F-O9ZX4P9%-K(a7R(q%`_1T@z!LE8~|J&s7sZ9Jpx4 z;lKFGQ{qvg0a0&e^C3{u(8kJbz6hqm?u-t56BYTgPlZdpT&!}6m8=!OK|FKeTn$7D zC5ix`#452CboGicLMJd_f7W^5@{|u{QD64@kNcPF{j+|&Zm&J=KRzI^)q`9B@Tmmk z<$6y6+CD*&ag^+*Q0Oq}tYATT8f6439FTq{I87#@Q2P$XwNyVmANj{-y6r(}3x zNeCT5mcqko3OBGDRw~ehjL6?m))H&~B7$e*1y=nZef?%@x4+2R-D_M#RuKewCBlH} zp|%O>v$8fX!)R2(r8Z7}-8(q7*7RO#McTAA^6=Q!M?_3CN+qf;^ok`Z-RjH5V)BYI<}XVeFEL+b1`6K@nbPz7Q?%c5nk%};pEI%20P;G0wmkZ5|*L@rz))+-$=4V##)O$wfYG)Hg1^832@BPclYWxT- zCZPWfSdchyR(g4;OH742qP!$0Xi|-gu4>>T>r6)5z4|)ZR}d@A!vPX~a^8AiTDrZ3 zykmuV4TwTWF7{>k#yy9XnFVGHrwK*cwT}W8gMM=TX+D}KUaSX(EUJWhB%XH^0>}IE z)iEXm_bHY#2EgI88*s>^XE^qes}7Xcg8fsT6wQE@mlR3Y6a$t}ce%KTY6{y5pK^oz z-Nt_=A!f;>YB(wlV3R)Xqf$*)KS#doV^I}IPk~>&j&1xZ3sds%a6Cq4DGI~HzMz!o zYSK9Q`pk*F;nOfU_r?uj959QWF>;|Med!sia6XTXlh;pI|!fbnG|ObG7KbRJ*PFwq5Q)WBN%rZkDzFq_=+KK+|r2@06|;;%Lv`@_k|4`{}k z#bBTNB}+=^Z5TBhefBtaW5aBHM00>v3)(pgV+;TaV3U0lMvC2x&8h%~4)H7dW(EDlJDsDPDWix4#+>U?ooPs8EO)exH0 zA4G($ew#*JSTEJ3S)sK3yg_qmo*<~fo5L5c{&x866nC&YXC$t8B-o}!nH)A>yrJtD zF9|Q*QtKW9W1PDV&@)%$EtPgU^Rea_V387`2!UF*@ix5_Cs8%--2kYr84uOey>)OL zQL?8iW(G?ZOBORT+G1w3n3>6v)nY~qEoNr4n89LZW@e^(nwdLs-$v}dcy~8;Blf?J zuI{c=nWwr==E?kh$d=3|NXr3!-)S76Jg@SCNE~mK>m{R?7{iRU!WEBiKi&64RkPQT zfuj(TYCn=WcNp)`(z<*Znthb?{X(N5hzTdy0uECx;fk?wbU$kwN3KXJUUM!J7pAz~ z6XmAzr3ay0{_5O3j0KV35kY2#wf2|M3RM8ZfRPRHO1^^}*)WI1LWYF7FMB<6s0-q~ zvv==te^)vg`g*8KN(iPoZp3P_t7mFxg=4@`P*_wC0>zv-S!2*poXi#a!9vnF*rJ>l zL9p??w57sIdAn7CIsQ@ovJI{WvpaP?wnTtaug8Zn*zd?XsS@_$+*Idfk%7XXHVJ6U ztQAV}tgk@jx2bJx zT_um#WXtK%_?MEW^8Is1`PWu_c3NLeXJtdxwbwkE0Jd!}^fGVgShxW6F)@$3@K%sg zs&qIBQPL+UpZ;`>1}9y`B;~E&B3-7dKOVo9MY((Yb<%Ojxx8y4*WZTm<~YwW48;MP z*^P4XABRCJi)(hkx>L)u8mFcVtmZCp@QvLokdXBh-|!r0Nm*^!JgGS~ZA zL)987z(lobk$wCfi1C3A#j-AVH5Zn979p0lwg*Ip|pY z9Zb}qO=|FBfEJFvtVT+ZRazpg=U%@8f4d`iDzBXZTubbd8q!E`l%N0X&+7Cmzsv~ z5jS5SAkU?cT-;<8cBDTv=yQQh|Ay}o@yg#77>)rkZ2TBrOByf3EL=y*1J@~c zZc}<1!+?Yt!MJ~2`?5k&fB~|dk!FMX+^;^VWns8f{WQm^@oRq`o>If#+n030R3s*% zyfF-0g?wgDYJ~mb{kzL>94dTr4-az`nBr9eNkVRUq*0q`gN&kCLv5JZw;3Q4RZ_r@ z4MB&{D^I|Q$}p1jX5egLd`>~wVXgRW|MG7gPRN5*-ptXdjqPX9azTt=Cm+7fD~{0G zTIJN4osGSXjqTaF4Sq({`2F}mR%97y5NtG77gBW`<6s#zZ`euk^wrykYRfnu)ETGn z;Kyvi#{$mIE-FNa4`0`#(X{R7^UTyu z=gOCdo%ezL`2g6pahTbQ;R8LibD`DMwdJJ}5w>koy%z7qjmM4p!H(`X0NU-RBLILd z+x1^S@1W+l&bg(t!EvJyyq?O%b|`D={|!T;_}N`+Wp&YU;Q8KkKAVL`6sHHd0uLT8{BCh0=CY$93({hhh=352&{w&hv7QCM0@<1zC zUQ`=jfK;j>M8#5~>=*yfyl7rCzbP4H$4Q@UWGi<-&xPgB#}!u*r`3L|mBhSXjvV_B z+utvUrIJU>YjCx=?QQ%;kL|Qr>}KlRDjF?~B_M94LOe&!eo7wwVA1=P{v)R^X8P~o zp4CXskVu!BTu=~)H3_)1gCjzWscp}o#TZf>lzBEZQmhE!XUaxvvqY50B4NK6)F^X> z=qztw_l%TufTbzxaXQx58b1B2DjWJhi7mYq#9x$Rxc5OW1wA5sh0I>KPM+tLsheJ6 z)D65UJttrxaz{g)uyY>Je(25F(eK(X$CsEu`6yq1b8vYaeSE*j1>3AQ9r1zxV&BFc zf20TTzLGu{eLL*C44C2YabZH2pAWA!aNb^X%wv{jxYWTC7jm5*dv+S*?V_D(d2nC_7rP1CEfpi^K| z%fQF5%pQk{3NYQkXOS9e9G_`3T5p&MNQ<>?2RYw^>$!9^UJ0~&>y3YL=btn@fC-+t zkU}7Hj^|!tT<}_)*R@%bMAU7d9%MTqIYRJwLv$-~qG;!f;kR0z-@9BNGS65~{uax= zy$QGMLK<=e+tqYbqqZ}Y)nt?TismK24&5m(JGd^v6?-zy(<@rDpemz?odR_9>RRlh^P27Eeqy0uG!G9mv}(u2A> zJ&MSt!&&Nc@L-%G9oV!-uUNVZK_*qQ{4QBaTu9TMd7UUeTNs&@R00I94;1+xm3>rR zQz3Zm`qWC{)y>qELe)Cqm+GvnHW95exV<{Aff)^^EJ8x!_8;(_KFpbIudP=b5=>bf zrql)xR?^A*u0$7I zq0|m>n42yix%b4?uw`P*^S>7sXy_{v?Ug`@l4%@c1}f4{{;4{ByuV!Z@Or#w7=<{h zG+2fC+S*{BA|Zk%__FSukKJ46d|4GMQ||sF0|m?D`t|WrjuTG-^y7N}uZnjd#}&pc zI992E*UfPQxL77Jo`eu?NGCzNfKSAFbhXTm1P4q0s~$gg4S#!_Bb0IYI4sjfcbA~2 z`>U11ErwN_uVJsF&($t+c~)=d%^XwNBFvMOJqn21+xhXDW)wC#6n|ZkTs-n^Y4LF- zo-6c6-pY}tR)afJ%=-l&x>kg8ME+i5E5259p&6E88@IRH?Ro#z7SmuehD)!;lN&GJ zO*`&SA47K3m>4~ReFO)C<~YBqbs{0pE8Yhpm~M-F#qv}&j!PprCrG9bRmDvn@yD^= z(9+y%qtJ_s0br+)c>UpZV{N_TH?Xn1zP5ODP6BMK9Wp)vRq=pA7Q+DG-o!eZX&tvv5;NVBgZ?PfA)9L`YV8xjA{B7Dt=|H)KSd&*rsK$hY#%UDl-3DAML)YPUUc$t#@Ax1SaSdSCzE z3AI=l8MftzVjNu*UXfoD5Vu^SUtO<2G@gIx-t^OfmDO|lJwONrXlmF6 zI+or*2fCfiK*+pa^Bbx{i_H7Q#?{ix`BGWYAPDqxNTa-J<0tTXSYp_?e3r$P)wnVd zo%Oj>F{Pw(EvLS!H-Amg+Y+zPNY}AhDhYegepv&qAY!g>L0#7MREZUW(L4sPIrS0T z0apxcEd-U=NnktlKK9B)&@d+VyJ6HPJag|=uB_TMS42qEv}pt#<^s4a^ua!-_&4`K zp?ac}GstMvMv3&89@MZ+m&STmK~n1{aA)3y)Xi>xR0|znQe)p0NSGlx*=`?j z)efG|qhy^TzBaqD=;6Q3(^jB;O@awYb|{vbofioIhKG+xtbkX#zaj9revQ)B`y*7l z#~)C|X6$tN5bKOxOa_803JQxEO-PGZU|VwGR=rR||5w+s<1^T3NDrkO}S$R=9} z6^1`uER%xA2<-I}a%!&@jHW?o<`|8(s_I{uWw1Y=h6L=F@KP^HPi%Xdqr?|oM>IKn z*mAPrjl{##ahUB=RGT51CwxOiKax{G{_B&NhT+|q4x`cT4Y=@dKp9eV`q$5i7bHwg z`HR$d302aLSVIUAVGEN)XQKgoOMu)pVB`sS-FBwQyj*-Zzq5T8==`_yJC~Lg$4)>^ zy!apYy7d%5TPibqRX~dA_ZAz82%Y+s2Edon(if_Co0S(duirudMi-H_tUQo|h8_k$nBP<1B4ET|KHAR?T z5Vd?t`#FA3v%fLeqil9E1cwu;Y8z1Ha(2Q@W7hJgYG%KBTugWUBHDNKh63GBi`#4< z5B!DZ|E}NU1Yb@gthvJ%2qGKF@+d0etcW$TWYK)u?pK<+;dFX;j@(QIRZ*fUbw=>w zPN=It(fjV`r}oXvKz1}>s8p5rQ5e_n0@v5X@hyCpe(ryOr8UiE;MH-r)AY5<-W}NR zTwtOHa=;b!-cL>$-$0#TpHNelq_|U`>}-g)TMU|mahD6$28iEY2V1FMj!|4TQ4WCp zf&1%Mpf??89k~J0)&TeQJH#xYderUup4bFv;0Hj!#JX|H3j5!Tr3OAT(uUEUfMp`a z0IS}pD0%bNH*$EYd?E;Susns_l453r0`{LE`wDsOvb>$StRH1eKg#TZK`ewyFD;Ym z=W+LdVCLio=BI+$jx+cPP@te=C0-@NH}HHKLlX|!JI83z${dpMC@B(-F?JGE;!cB~ zd~d8W!6F_@f1nR<2UhV3Lk-pIPjT-dtQsDciPFqQSqMuie{!Xvs}V}_8}WsJn}I8z z8km%s;6=_eLkzE_F*bk<>AAsryNRsu?3-hYhxK%Xi`OiI+9jA+O&{AlX}JcH>XNm- zlsw?y&^KVSk@MVg$+-myco?9vPkfY9aNf8KAbWl#V> z$QD3M{{+~({nJ~OzzTH@u)d^q4d~~*+Y8#WO9-SG+h6gk$+{Qd6X-7rp$j?h!^$A< zfbL&$RJ@AbKqHS#X=yS*Rn^^Vr_5{eIqUmEQwz{v*{Sfsr5^+xVE~ zA&cIC>?T(+yP3q7RXBgg$~a=N4Q-`~ZJ7RwD``4@s54X^lHw84vutfwYSmg|mqPS& z?M1&2q~;HT)7XDG1bq?$xZF6Co;QS|-Tl zjcd!QxyXjE2)O2lnL%5y9zphmI^nm%8d5q!l{Nk`K;iKft7Q23#3M#oHNP+!as(?Jxc-Y`GuiwFw@u#ja^}O&6iyCi9yRc8cNT z&6QwY8(U^3XzmxMGDI7wKArQ^KRR_W5hx{vS#_U?XAjRkOn+BIO7Y-q_lLfKgeCBr zjDtfofSH^Ricl4W5f5}rH$&zs?PBWBWy^1(JF@!Ctx+LkpYZ)Q5;h~c1@}h+TzrIc z;P2IIE2-i%^A%KL!O21i`m*mayw5X|o8=s3AxWcn_Jx|^$`8nJ;=I~ac~biTOb zir0IYV9C<|APR=}O9*D@r#&tR1n+1Y9ON6tjp)9z>JL$J9HECB(;Hzlo+#6ClmV9Q~Oaym}ErWviV zxtZ|4=o?A+^)5t7{C7=bfJ!kf#WCM&AI|;lbH{@a*9LIz;@*-*4Y(&ibn4%{*_(SA zU%hO6B#BJHChctike_bn<=e$4o7dI%i>{c0SK#8Y+T)f48m`=%C8$ffK@beRqkt2v%o|>}YZylry4;&b?&F0DRCxlWe-KPBH4Bt& zOXgKJB>X0Wyhq`dlam{4ELB2a#5uC`Bj0JdRd0V7PG`FXFW>G}FI0-G~@v>Sa@R=r%#KN(DLD zCUg@OL~A@Ed;j%a=DA?Z#vq_hfTbvDAh!kR`( zzPPd82l>-*JL{Xv;EZp(p(ZLUZt;FUI8w4Oyg45wkje};x-@Vl;b6@^HA4~=28WrB zmVa4qCU^W^NA=~V4N9anDP}&;76pFeh7*V21j34Gq5yu;|( z^L;N#9yFNb>JOHh8#Ih!HfcV zi$cn=WjO1JI0FCxh(zM)+t-F$Zc@~e&E5n{%pCbQhZ-s zXTeA3VVZ<+c{^gBseL$owR*3U)ab+=rw1jW`Fy{@px_}vhy}4ikZy)?#6+D4#3RZM z3OMoeYb16*A3Z+a8Da$~L6Uo)E99IowB&t`ZDjpR{VVsQ$H2NG?r;Ot?pu z#9;(KU4WXm|8`3*QFKNVd>+h6g{g@RIm;HR)QxG>B@tLFU*{aa`;^qdsmg1EUAmHz zF@TI`881A}fc#m|`7}>s;wlCQ3=yskuGyIvT^=*tMY7XT7ZqmUy~7X%X0fch>olxy zezAYKX%L`$5^C;B-u&Rf0Ap=$2WIY;zytOO@Nq}A>6{48?g8iL%HWsS7oe-h5pv&- zZ|?_ZV-*E&4;swC5)TQ_1)oLW`6kfXP!GJD9f8*0ty@)80mc%bsRp>HIl*ZJq>n(2 zfIAIft|4|FVN|~gG!+8mQ5&|S_PGFl8reH=K7T(?xdfcMH74BxCxt*7YUF{$hL9(u z$(w!1J+qV4q5V#uH>RVji!J6{Dm+u^VZqk+O8u4bi#>`yn%3@ ziN!zihfY}kC>;86j1Zw$+b<->WB{AS4 zX37cJNPUk<@5=!b=?r<7m;}!E&(F=0Z$L~(0CC{^j^Vq(M@&5S28z)I!9Hi518ZX$ zcF=DC87{*EP;u1x=dg36c;jOnHUBb&63|HwY#hIL4pw#syi=_Kd@Cy}*tbB0!M|)^ z!NYMYz$NJ;@upC;at8M{G4SghV0i>87(ANz!8-nRBAmCFMIbF>Y>oD90;k)e8MqJy zg0@bR3jutclGmoXx^}T!koCv7oSub%clWv*hlB2c0fu_fjEaJkOSNf$@BEqX2;}jx zcxYq>%qiYFx&C1#>u08PQXkYe1#zHN(JtUtXyg&J!TScPZeKg1mjzl;iENk(0;}wR z4daE4p!bot&bm4^Q{YC7S^FH|n*_N11O%USFF<^)Y4~X_KG~MDy&NDmbiZ-%u5k=F!e_sm*R{6R15W{yKm}|XDiPxm z(DZSA|8^?mNR9tH2t6|C1(?4TLXLO*dGOR31zha!|9S`I-F6xPGrF{H9drNmhu3q} zBVcEv_5}F+`y1{rsegW!>A7>Rq!xJnx8FwLo<3&c%0~bd z^9@Ko2JBt`dDRi`_U@&R0F(c}CWbNC5+BM_&%Mwgu-5$oM4c790SoegeKoMO__1+t zv`!}7cRzofv{|+SJS^Td6#+}{8|$kTwZPLqPxLX{`0ss+9__7Ov!XI|!-09-YhZ&G z(EqxWjF4@g_`dOTVI%7O=6&PHrS$Y)lUd08y-0-%*UPUA29zd$W^KN`VgHV^0D#^6 z`LNrMBhW+l`1tm7Rt3g@&+qpO{6*kl#VxwngIE=cHemT$P!>)XZ|CxWQ`}(E9swgoudJery??&$8p& zrb;%)jj-taGQTfWt=7pHLkd38aP6Bn+lp|DQ109_dI@!Iep8XVvmU^CQ`YP3`g!8GJfb`k}h@mUS=VO1r`-^#d*K> z8F-?yK`3~)I|p*N4f1}qid7%|r$5a-M_+%7WhRP1lW$Ug?h~EvBbTx!9we%|H;O=W ziImB63R`^@-CqS%CKJ8q9_a#>+#t9D`tJd7FfoFqit8*jFT)E<1o=7B zZ`EeEF4v0d@kZ!6*8>!`Q*?FZzj_{$X5;N;$`B|ulcYpA_90RSL94Z}{+$|q_o439(4KiBOns+AUGPG=r&^P!{h`Y*{vCz`9V zs+Hw!ORQ(0z%J5F`xR8CxUK3d{`77AMI)(9s>DHf)x!bdvQv-Eqb}B%S*eU z_2CA<)=yT$l%Xb+J*ui zX5O}a9rAhJiQ}(l;7f%@HkCvJn+yf7xd;Fcs}2D7$43ez7^$`f@NWU>ORhy~4fjUq zKXY&T?#91?2mFtROgjy5(OZ#mHWqXN@V0)e2%`4hHZ&T5i>4EtCSVEsUTEq4!tEn| zegFZ{sADWvzE*St@$Q3FKK$OEkNbbGw%EKKBA0J%us2&mpiseh*9M_YZPj_`#2yi5>N@hwTI+r0U^T$FQm-U%hPK=6br#9Eb_hMN1r_ynOkMt5tp+7N5ycV z@#YUC7Ev$~Yn4WBbU7nB={m5>#~y~wQ_|R1I3rq>j<5Md*{`-py(B0(5Ycp7*qdUr zLqRzEzI5FKbJwH;o~5deo1Ve;!&+HJ-suiiuhK3f|c5 zA@>*yjks2H?FgIRi`dL*-GIruhBg*Wpt_$PFa&}*C?otM<%Jgn2*Z%;PMpmwiy-)> z;C=DCX-EvL$conI=h7^A9J=(y4oxTdlpOB2#e2n(1fgYaQV$;L?VBXTSc+lZYZ5&0 z6%IAZdNUiA5p3KI3hBF_O#_h%Iv6S976rwv;5}vLh3F_sm-FHAth+A-N$7ds)hi{k z5`S8*mUJYE@U6ePu?J@xbNb>YA6lD}3)KuF)%|uup`VozyXAtU5Ga@l7y< z>Al5DLTY13&4C5xLk*MS-GOy(-;NxZxo(lXR#kh+$O)U&R21;r&-b#+Sm;>+Ft|f# zPI4(VB$mzKH3Cuk^hAX0M|iw0u1JX)*?W)*N08*HiG81U$iG7>V3Arw8%*;M1btH$ zzn=U~=TMv{j;`4IAoyosedxtQ*!3QR{e<_Uky&@6$w0S85II{o>xZ|1lY@i7uFY@G zX`G{YSskm&jG!%d>%yRiSwm6W_6c34SoP|w=f779^In{BB^Pq2_p5)w=hv)zliTla zvx#S|g9?XGgrof@D~>O04JiDO6_2rdZlVU4?( zY9I*Gt92dFAgmyc%6k?aBENeFW^G^P#=ac=aq}A#+a7Sj?z}8 zP-Pf^@3=b;j}Yzp^H^L^u)p&X;{HUqB@d}>lV9|#V(O4HN`A%{)T;BzMuZ#3i2aja zCVq9M?>;Yl?C34??`H%oD8-BAkDdhgrsGS0ChNmy&qxi90Yv-9?sC)p$ZH*M1|)V?L) z0OJni;OEgM`hgGh;VNqHADFDfPu{!IzfLeTn&M-92wKd`jDJqAaC6*84v0ICD5Hh1 zktfNuyB7i_c%-uon*0b;kFEOAXkD=!`i&vr%xJMik=f!8xEeo^vZyv^aatJ z%C}%fT|L8epo=&-)^EQ(&-D@OaVzciT!+NF9?r|yB;(3|1}jTiJ7-*_#3xN+$6CqN zuKC~}8VfU!ANi3|g3QQ1pfbLaIDaOIK~aB@$=0dHL{j`&_?m z9^{!13W+2#D4)h6v!RiHHy)4}%mRP#H6C9|^h@YQV&J5QC{%>(6%cX{+kyTBBSok}Ij35U zqNsg$M^?V9iGo-2$-@;nBHM8qSC9Sfia;+R#gP^t{J+mFEK+aO`OO}y)cA)r@y%c+ z@7t4}>^fGsNyFbhGYf7Nh4t)`QTX@xij$wtN$E{QDPtJvaZO8KVXU)~EHP6m>beWK zq#5rKBWlWhPSaaGdXLWY-Xv8B?zUl_ibK)pM}+z5z`^-H>F4+a?)6xNGGZdOGaVAZ zQ{|f96=%9m)Lv2BTHI&TL?qrZy%pZGyv@^ROqBmO>_SiIpGfuk>cit(4w}y{Gj;nv z&8~bfH3i%OM9Qxl4N;d!OEn3zxq_eJ;ikXEJF>^^wty?)FzwyOru0O+87LR2usZSJ zkQ68RX{?Br1%XQ(Lw>EY-GL6z@$V-J>Z?U}K7fLVEMFwgN=8H-nwbpKf|ZwmXU*qs z_UA1k^c|#5%4i;f!6g2E&kJZ%7JzQA1vhjrW~mHo-hf=;W60%p~>%Qfq4 zU6VQN^;X1+KTn`rjm^OF&n2$u#I{O@+5Z>25aFNfLe0bHEswXZ=Qo)qc?esi101W* zL#OCs*s(Z}PmtqNqP}20e>q^PJR7rmZ@zGYjJeyYpW@QEGno`D$~j23aG1}T{{vlk z2gXF3tziQlY|%SxBf4KjB`(CH3)F>i(cL5V3wwc$W+o%S^wgW7BraO*!;3h&uZn)& zM}ssRP9&S8(==Kciu;cGJ*gAs>hyKBwMUDO`5@(Jtl~rIv_+Nmj7fWX=z^o4_3gh- zF`WDXJ?8Tt6hqZPCOUY&It!0M%7)MN7$n%=iq)k%7py!%=rxYe4b*7}&2^xc?=DK< z0~l~nKVUf%dgRD{&GQ@hBRdC0(cZMvTj%%7bIhpus9RA~8L+^DN6D%+>Ra%Jw2m!Q zh-M5u5t^18(&-!;SZ5OWQZ5{F z+{yX;w+M6Rw#m-f^>(Qgzif$;F18R3D$qZO^-HgWU#-8##V{r*@Kp#`dSm#86kwIG z9nsJ~dOVOI_>}zHs(N&FnOM)I*0J}c z-A}U_gQSfococS1gnr;Q4hzk85)1 z0}sfLi_r0q-gqQ+!6ZpPO54beaTv)Y1j#Aa3XZ~1C)hBDU{MRO)L8o5eh^Xl zwlKZN%DYTMw0!aDV-F0zM_tI0ZdhRJU7Wr#*3+qPpbZ<@!tUMEz>? zL!TFgaK{tZ$H0l-G1!q#zS3}Y$O%%~tIu5DL*xUgLSs1C^K1U zL?;iBKk=3pexD+kwZ>bMehZE}=BMjs<;Q)1@u>(VPsjs(Ll>QjQvWFlu8dIBH9LA_ zs`AiS!riP;JWr$U`-Aq*198PU#D@xw&{u^h9|MGN z-5HB3*ktTe&^}+U0eSr~E~z1>X}|uao{{sD+hkA|L>b{w8m~N51{ASP|g`wq-(;liG>eEu3JF!W! zg`5{?!iIar8760~T4$u`~UqxcI2#4nHA$pFi>uBzPhyk*F?$`4-IO{-8&vKpD zl$&=WBkGWSuh#miExnK0xe~fh{ zxzHmL_<46asf?{9>~3aJMA}onNeH_%OnIonZiK0v&t_CK)zTeAKIeJq`B6k2lBe%` z1ZIXS&tS&9oq2V?hI-ki7#%#yP4uh=jS`?#d9A~)kNnt@l2wlsf0z)qIIf6OpvY4< zK`S=q04c8qg89|P&&(=}EEs;tUOT+?wj}FL3H>|l(&O6(CdkteTUw%%y@FX>m~l7p;Coy)ro92shx!*`B#s3M6%1x?yi#_t2;agcIn#8 zcb3iZf=Vu_Sskn*+s!aBbE2@Ev#=?{T^0qwr}2^f&${W1GKuvT6@>rUd5f1NIrQ^i z9kw!8H%nzZR1;Y@51oV^Pw#Up-_%5sclgRoTDaPj2$Ye#rd`Q+z11p{gV{{IjK#J+ zkz>PfB<>u1DSC0M2Gh6DOaxno5r3O)^RnW3B$ZvTqWedc>8P|>-Ugd+pTpCEVFaLs z`u{O%VMZ}CsUv}8Xn?=tTrqD7vw^DC?K8%Jn3%e6y@R9=QRhu0AFopFP!B0rA3me2 zy;G{;;^8xkPxp2*5j+ zs14tP21?gakLj6Vc+ln;Hg%>A`=-xQp!>*>Xngrgb)2F7PCb61DU1+={N%DY)Qrt@ zNFG!A(I2h&JAT}gmX*L=bn@@x~)cpgUtj|KV$_Ov{w)6Jylc~hOvu7;wGiCF0$K&Uw?Y>)AVAd zyovL7eDv?-&TQRtpRuKEYtD0LlYS=?Byv1ihLKUX_e_W1%>3G>hdq!+9Ph`XOB-hA z?H{JIxuA?)(vM2b^?atyo^+oV#4W2?s!E($q-U)f3fzyhymV@4w45L|~gU=%a108hwWO1LlVls`NtgY@;^gpLPVcrKBHqf;a^h zPoHK&F8`AmZHSCn9r@TwWwwmd;+L}ac`6zHmU%uiKwTtPt`}S7U0-KRp^@*Y1l>b z^%Khdd-Do=j~dW^;Z7haLhr>UsLfWI*oX!!SF15K+={L(SkLY|G(THaKD@Y|z%+h4 zae0`H-X{gs!Tj7ZWjTNn65CTCd~X^bZ*LmhMkyy!SiKls+Jm}CsJmJGiXcGX-_OIh zs_5xUP#8@OI|+R{+y2yMo^y3QSF@#2=QT#q8G_MA;QNcvM7@fXhNUhO}_S*yTnqe+}Da?+pu%K~|(dzu@T@INowk}$e z0H-`1?gYtI;B=BBiKxYEL9K{uo!u0occz@bR>FSC74(VrO#;rM=hQ_!l0S_~Co#e6J0~WM88o3I-u7+fk*sD#N4}|yg3Dm-4 z)7sj)Q5sZ-DqT&yW%hJU)@*^CY$?(%g2&&!AHv%iiSIG3bK!VXy41CH?6KpOpQF;p zbRr5JRvUan539=gG4{fTLfPZ6q;j@eQA%Nnb-cT&VKA%KZ8K0w7wU6o((bkOgj3&k z`H`GgMKTtwHM3MEn9sHAvJ~TLQxWU6wVmU%-?FpPdC*^X_~Mn(-hBNRz?p)?jtffV z+V(<%5=!$8TBLiaXP{-Ygd1F8=A^ZP#Fb$Ou{Qmzi4S66C zaJYu*7N*Ls-aK-&cRd8vx6%MkI}qW;;LDrAn+gD;Z&>zBp%Ewab3kGQvD5yg8Jz6@Li#m^eg8PLe{ zZz!bonm`S#--0oi_i8Z@UuN>gu?08bc-SkLRR}Y2yMycX^@2^8HGbPhZtj8%*S!0a+hTfF|ER`5AQw`d7iK_|IR&CoXC~{LSa#$SW^zs56+91AMMw4u~66ApS&X&z(f=*#e*@~y&8yz`{!1*P82p;{o% z3qsyGGpAm)i_>djppk!n1B+J_->3(7av&PBZ zBeqoc$=7%JQY%_WSpqL;PSE)6Va*w#%kMM6qSsi2N<4nFd9I4jfMz62J>f+Ei#Znu zQ?gFR*$BM4uuEWhMZYJ*ikhu1M508Jcu4Y}iiXVUr)4j^%ZIj&Uk<4tnclng%0}Fg zntBs0%@zxUdq1XL8Nc-9FG!9!I}BGG5k?m;-9+qQ)IBL+f{*RQZA=oykrB<%;yor= zKM?@GnLw2`cl6I^kjuNt!mX^%3ai)EJ3&Xkt+M|9pr(?PL7&TB0Yfnq5!fd;mh>0r zoSk8O%2ujNCqrT+J0`v|Uy1>bo+rT$bSA4)GrLSu>o~!!`N^M({!*NXb0tcNrBYDz zaL}q=miw)ZR;?*9&8XIx8DqHD7hXMO@jFHB7On30g89T{VvjsJ$0MuwPO)=<;~A*F8DQ$VqMl;nN9QIR(+tOnt=AR* zZEK5yCV`WnOg;2$F*#IZLp#4RKq{7Qj6wBSL69D79zcCuUB1~Z-j8OZEZdYiI$w06 z+qWQWK1o0z7VfV2C;=6YLOZa&bHgm!@V*22W-2vBS9JhwGm82JzptInena&cHWMEua6F&VecUDp4Qk%;ua=k2 zLFi1Kj-c7<$HG}n3Dn|+_uZZBv#U!HjXh%KOY|yj{XB(;`%PVf+61o-5miyJ+cAz$ zW5=nuIQ395yO(jc#{$=NxJ4GU9VPFD3(?Q~E&LJ6AA+h_Dxv_AMZa5?*Vv`|A{)^M z$TzIg`B^)1H3`<9MRh{eVjQRl2^)iW>$3eFa!o(rNH>1>-AvIdCg*K%zT0?>gy}k> z^kDiqYEW$Ww}gCdu)ivg5q7)irPs?(v2WvcX6EP6Lns6jECQzyBqwHh9uV0=$__1N zG^*0O=qL6`e$+KOOx<%o(nY?i*wu($f1{{hUN)YWvqo$xInIBYXzk&sZCSM`NpIx2 z?`HnHzTdNpx+?7jy`HOt@QG? z!daTf8E+s(eT}WkqC5>8p`D0iQd(>E5!Z-5mG?Co#dDo<(1p2#@MD*%jDpvtfU6QP zvFllSgsAg^I}l+~s=j`#A0@{X?C`)k{T@;NEXyKVOK-YLqE?M(Gb6o&9rD~*ir3u3 zj4e@n8g9@%qn+Yul2&C=rBbDlS@ovB2XC#KUSm3_y~j}9;V6wFzB- zN`_LUb9p-CUt zkTNfIjDAwk32+gR-j8+*%6p~8Itr74;NVA$%IX<iTc+1}Pj=`JH%Iiu<~b;jHMt2<6C3EJaEX*EJ=T);g+tUhtVL!Gjgv3#u$>{&9sIWX_mf;4(fB)hTc=E(txF)sJi$6s36I{u27vnCc=yQnf8C z62p78_mpI?)i9%?pp5F^X|4L74Koy`VdF>LiZ+3YVpFDW zS@h`u7oQMLXFI9dy3`~-h+JC|4CmJaQ$Ml>E_RSR^=U`$Wz+$kHrUU&9S#B%^SGK| z-CO?#Cg(}3a-SoXoPn4UMgb~+IeA1Xhzo=p8Vcbp0%>V;v&$R^GO^R#hg0p4S~!Z| zg@ztXZ!oKLu$bYBmA?WWtbXz$F{i@llbMcb{ZGLCp5D$yx z7ID0-ui=V#g)D1{PgzksDQN_*#AJez+M#1eTH?j=+dWxF5?Q#^0^IlwI@(AVct1+6 zp8^FC|Mo;(bkKfl5G%Y|5x-#*vt)~Eue0D7;ES0%X6|(YEXS}9x5EuPkn{Tf`k!o8 zH+eKlvPA|i@(seFcEzVuGqRLVz;eZ+I~$dcQ}GE5rsX~*4fL=4N|$=vaq7nJ?;CNo zCN<}Pka6dLM!VughEX`_1t!v2k_w zAS_Ml-oy93;66D=>ci1*G$m!(r%yItsg3SKdu}RltBA(!MOpqIw(c=LlBiMib!<$` ziEUdG+fF97ZQHhO+qRv|gcBzd+kJZWyZ1fko_oJ0{p(a$t*%a2t@VF|Htv$_rP)aN zq(jPfevv9kfXhL+9qVxs^0(xWA?{otthQGK#X1kZWA`j_1<~}KM9ow?p-6y*C&UQN z>qe!$#^1==p+hEwD@iA+#hEjcj|r6_Z9HoYNDrZCg(zF3gTwh`?tp2xnF)k9LNN}> z%$P18+9^Z2ZODt1KwxJv=ont_@M+XXfSe&z$PNk{da-Wf*lQ3&Owch^)kp*vq(qWq=ky_pC`~j;_zUxGXcZI0|Vm&DG>45+zCvLq6e#+be>F9b!CdkBwm%I;f;3 zk4G-092up&vK^42wqm*^XSmOZTic@NYI@3#ea+nj+2qN9lvvtz*fI_lq@YYpEGG;( zD2wB2A`U#yQP>>5S3n?SMtJ$C8sW=m!V~iCnlmrTnA`+XoAJRd#Om)7zzJk9h2zSv z+6(8Qb9>EE&0(Uv6uHUP;Aaby3@?T*=WUbJtY(MhB^^WLfekC# zBNhtyG4;l_zdtv0qu9a9Y8_X@cO_lK9oxi3T!aRMP-!#RR{`2cX-lFz;2j=wY!nA} z5uq>f%cX2|;R5G9+Q7Ohyh?^rs3XF9_b(obP}X_TurwtvW-4p+S(6MLv`t_Tt;zF2 zn%rX7*q+J8)f%1DA@tkd?)sD(s+ZR^XFG%hqrEROUp55YSKjquE0KNF= zdRGyUI~7zf*|cwqE$V}HChL1|a^+&xHmPx&IVnVm2ZNS%=A^8>0VE$V<2WM?c}SMu zjT2$?-=4Y15|DGTD(xgOc-^=Yh26G{~*vl5Vo?m^+Zl9cCqD@B;U&whqHS|W=Y zY2uW}azu>S!sDqE2CRr-a-JJJ;=LJO7x!GgRGc^j`n=3 zQq`6Js9LABnKD%DDE4l6M=+O_MA`(p@~m=+hp-xUb0ZB2`wtb)94d;}2Nr78Q%A-mwDeZ?E6MXeD%jxRofhV0Ef1lE{Nt0p+L;`{wc#^4hPMu4}2 z&&%}f-sPX^gW~k?c|$RKqbD!&*A=K3KLj!_W0_t~@A=>Jf!`dUmzn(X!ve(nB_f|q z=vE?fc4cMihpx)W$Pyc7keg&J9+o-sAoh7F28TAiAYodvvk-d(H#nsirrz{@GEK#b zb>g(pOBG~v3krJk4QyZQ%mR|`os6)=y|e@?Ws0$YjStR0wvNJ>zdK_He)~lLCkSvw zpF~uo|;P4gFN;U{_8sc+`rqlW`4J#o~nJ zt+<+wf0QSDQa|JwZa)3Y`j(EBv!75O1aB~>n_w_8A(LS;-~7^~*ozVZNwie@kQ4`* z=;COU?!s*DwzVwl<($@o>eYw^Sp)A{6;mXJj5&tQS-*f|3XLZ#fjjrzbLitC#cM~+ z19V8-s;rxpl6D*4qV}{fr~q0(QC@mG{cUu`%JZPYpaeP&`wjT1Ay|77{)7CA#Oqg9 zO}m+-5giG>pTQY)*elDmkdVL-ekC`8E#gHMj7O)qaiGQ)35;`Xkf5@TnDoVqO+}=8 zM>_D*C+eFji?Tx!K0cR04wZQ*xzLbT=F*-kT+*1T<5u!==T2$E_o?8!`*KSzhx3Inz|08+++!|L5Vos$(Cnm%$aPn$L(%`ZR zV>0Th<+EP$>O?-s){1?t1CNLV6D;ejgCT`YY0sWgIrA46GaR>GlL@KfXk@4*Jjm3v z<(&Mp_A~1?p^mw{HUwD8HfqQGD9;l`YO4;vK33_pq&YX}%M;|gdQoF(GNmO!P!rMH z^<1B**zNOIK1vd%*xNIBW+~y*`3;5;q-(qLkwoFcV8@Ig6u@47Ysbz8cP-}k|2_AP za{$-oMy{B$>RER~diuREG`p%1i*Qm_=u`Sq`D&*gQdav!hxQ$_&O}9%sk|KqixKNt zvc{eV`ls+5O1q5mYLv7NSGtHvZvyT*?_0ghC)ypYNpkSx6gJI)-MY=Ue{vh8M<$w zgxf>46n+l|r$;o{g*9e^!Io$8RWMO=+uuGs^C~gFSKW96;WMtPkiP?`CU(=AyAJ7H zc7EpSV+jI-st>&gS-9g`)Vm44-Xt(R1kGbxfSTQimZqm7m zT$OoOnIj#(_pMeZX(edwbIEd1a{mNz;v$q~|6&e7q_}>)1MODb31|3os$3s?jOAv_ zkjANRw=J4&*SB8vkjmh7XYl~1A8kVA`qgR=cGeTAU3Z-8fAzar?A=|5>u63gns?M1 zT&4f7xUxL=R<|Qt$)XridSA^L_Rr65YIpc@BHrf)<%stL40N>ovGJSY(N- zqnxcwc!DTd<^<0MX{7NBBhq4j)2x(iGh?DVRMQJExsf0I-_KkMY9J{v%fEFZRMoW< z$Y!up2C-SAm4ZUyT$ho4hmu?3N!uAZVOP!Sqe8goRyZukP?q{ksVe_UmNDAl&|}uz zt;*~?4Wo0%L^-~dwRe)PS5_~@e{}jPE?5O9(@B?UazKBcIjELW40wovX9Cfv^E%ALv7SP{r=-s=a6cBbVx zfp6w{HgI1OVQ3wobT_?x*MZH$ARzRXvb z&zkj|iv_$6PVL9XcKhW00Z0tt$t*nW`;7X9VgsdE#na|25+^@@eX?Y;i*y-|Gn}0; zNXJM_JnA1O*k9E9TbT;3M^ExAORmZ%gHlwmKhNeZ+TxnQf7$s6C~gnX$j6l@;gJBc*>Mn0ki?jXe*8GmbKGKg*RU?F^ zRJo;!T0kprXIGq-U@G*rgV!YrRf90y2y9Hr{o_u8UpKq`JrUSi(vUssVmZ z7A>0nMzPMraN-_;G&q(eV15uAwF7`2geS-uV-GGt>~9;?JG8JCS!WRWI&$3mVo9-< zhbYPtC|whe`A+FM$8MRhJ^q6At=3RXZZ%^j-$j#e*24d(+w5_KOo><{qAK|s5`?sm z8TV{Mj`hZXh(y1Ns?|2+gN>|NCCio4U==GC6Q^RV{dB_Uo2{VV9Mm@YURmGoK#fJJ z4@!u1{g|X-N=I>_ZUT#Wxu_1QvI)pv!pg6TfxC=RwtKNiz`5jam-l~<~rJL z_j%y?g573ITpZcL4a2B}Ck;qK3lDf%M1sf&kaQZ67HO3mvV!*D-;YK{SC~92{0Wd* zz!;NubUQ%Jmj^v~H6Q1FYykb@A9(l=`DcXge4SH+Lj|~z=`$N&yx3rsd7~^GaYEV! z)D4)0t_&g>7fP(qNT06tjFWTn8Q=6s%m%(7VO+w*vBsi}V>mcFIT(hpUAs=bBN5oM zczfJ>d=UPm+Xclsw&6dd6I&xS-4iI8#7yvKdwxaiL*#Zsy+t$?3T%3DWr%m891jjH z!<*waPhT(zV_K$&5rIu1&_aQK4mJvlD>Vzf6vpw#EGISml-ikNa--m~%Se|ETRD)h zdq80|8Mr7wx2w6msVNmP!jq_gjxYDbi^o7>j3|s*=t@4OfFLqPw_<7-JsNej+H?A` zCd=o)u%hHkHbiiYXr8+`BI7}RP=zyZP%U)5n1MLAT#4W7Y@RVu7bS%?7-u1Uw|pI$ zKvus!L3wMr|0yGSj{%REjzbU2SlEFhEl4A-YKPghhu{!cI4#5E=6Qt1BrM-Dj+d&2 zP_f(P;`n;h?N2c0(>d)Cxp$W}iZ9Lc)O!p0J8fsG* z2AnsemLYr8m=B~XwO*nWDB$TSk-J6UG5;ZWJT2_pNCUb3bbi*CI#qSan3k94n3fZF zq0orQEYVx-_*LC!3b+66%r-SmBMHl*kvwv7U*%`*(P$)>hbv`6*j|d0Wa&pJf?lPA zo+9rC>dMJfe^xe`kVhE295vkr2cMT;%rEIT;KbGHvoE*~VI(-se z^e@9KbZw>!M>6$a)S=VU89emZV|^s?G~VoVrB_ z4pEcy3))$+ionx0$c}rGB~bG^%pKr92R)+dDnM+I>oE7)0Y~D7Dh0$vzZ})jYZD=x zWLP&pFKB)pidK!+2qH+Hp%v1q2vJPrRfB(**LAB^G7?K?pUNlIWT$f^nr4g3@B`P~ zQ!-M+PQW187^`-`EpB}xFP^D0W;WiBgJWMP5!lW>0>2@fOH;w!tc4}TSLhv~cW|7D zB7q;0YYz3JHx~`u|Ft2bJ%Hi)M^g0ZXv2HB%2H?zzoAxJW+I^cd!wi%p>W_!7wc*$ z5Y>PEln|`^Kvm+~X!DxnhjO1GNlQ z3|$S4SXUV4pNofKqV$6YzF2hT4#Lfl)`-NObeeV)R#W0j)}fH*q?ZI+gJGCT6gx4^ zIlO6un7T15KasMuj5#Z2?3*Gba|(u0JQY@{p_{EbIZsf&lQ${vc+8?8)^r3d~+a^rC1S+~v6`n`p zNp=U%pkWd!RPKY(am6W7Z&dfiSPW{R+X&9Noqh0)&k<269doeg`c>R^aI=$sir^lN z!&Jy~b+F4}K`woUqGNthSJaHdhtTM&Rh#o+=GbqBYz5Tmz32zQPRNe9Mip6Ozi}lB zMn>3)oGlA~co>jHRGqlN2}wcAGO&*+Opf@&$$b&Yuu0SW(xjnJJfN!(#I8)=SB=tB zEL%dw3{ioa2{Oz=Rt%YNidlDRTMkQLXN+_YwM}H)BjE5zfY+>SQT1?(<(`g}N83QE z(x&^dQ`k%`5g)>kvirn}Psdpw*=AA9q`d)(a}rua$*Dvj2+dvYGqM9?7)kX#Z5~x3 zrr?SVwTSzM%1x_%wSb9FMX{;y0|Mq;Ay2|;vWZTvn32M)-((x%K-0t0@Dm>SASi>p znp1&q>Cx>^ETbgy0x(L79#(2gf+OG^hmbT>g~_}^!qc4Fm#KlR@|wG?%JymZPP1CJ zyq;Y$R51hU1dj0Iwe|u68v6{fJE>3tUN`DdN7PgL6NdGsiqNorecBt4K&_4o<7;DC!I4c4l6re=Jxo zw*}^GF=I{M(}4ojZ6h-|o2kox#{B}GmplzMG3Y^TKyZu>8zbnwG*!*n7n&_>$4wX)}s^t~wT zXtPuT-)b>K@;$Iy^Oknae$+XZSu6YTlm23a2&7DEU5>J}7NML-I*4%8XpjbPkB*8L zlEkaL`^G_boKQEY7*dokT*D?hz@5o71S#8iZy~QZZTD^WKu(7@vxSPsP6r`7w)jgg z#u+nW2Qo1k4i1DQr~?5!H%vm`1Ub1d>+9mPE46#lpZv;YgsN|T@a3wv^k?`!f7$Iw z#Z{7CzlD)|&0-YT*tKg2s95Mmk7BUbAwvV~YG{#xsWDf0GL41)Xr(RKs}kLA*pY-t zDZ<{#c=Lbq)J#RB+^lZ5EaOKH>!~z%RL%$ zasz!;WZF4+(5y$Ow$Yrf0&PLh*ju?Qjw!)gi z^{z3}>a)2#ghcLr{qr?qp?_8o9lzW~w!vo%9xw>;l0iuZi)G#b8Co+p?{7; zAbkX98^}}#5QR+g-GkJh{w_c{k@>+6s>4yhga|3=L}R#PF(+9dMo%?|GY-`ip}T}N ze&rxOM#8hbLW%ZQO!z^#FPR`6PG7@5Afp#WeN0-d#A^~O$UN+Kp z(}_RAGl0=^dAJEQ|Ig8xaC069F&C|k*j&r+BntN`RUZE<=^>RQc9lY+RZGL0R-%rv zg;-Q*GwRsLTyZ0rU&6obg*Gu*0?<8hy($#rm#MD##xL;71DuOo7-_NY&?CuXOovQR z(gdts_IW(YR7V=K@k}sOBvI!qd_W}#i9l;d<2S^SWA{)Q54i6zN$y%Bk`E5Y0#D=o ztY#ulq@_q5xRn{A*l-o_i_~DNh;j@vy5F*RA9IaQ0R zPqJ&Bx~2~lkL1%CpLL}Ev4*}3)X$wITT)V;$w^M-K-^Gf>hYZ%@=u}mohs{mj~Lr_ z6Vo`xtqc4gEhzkzz8TTN{x$t~Hf&9s4-2kFEVLeoH(1|;3e6WU6H&VCsmB5N+;h1-1oqNVXB zGi#qvR$-PV!HBWx+}*a0t~bjpoWf7|g$NPgObG$^g+738zv%i7pqb&Y3)BKlk9E?8 zJEq}#msak>qfILY;4ZkRSX_ zgy|2vnjz2mTZNQDiv&v})tEFicds(e%IUauiCoan893T%y{(~?#(0L?jOuJsxk&O< zaYJjqJk5UL-95e<&e$Fi<+ZS`Ol?v2BwCe z>@)#je_03$EJ-tWRR^comg!l7SJuP8=N@>PHq+mJtT_UWvPNf&p-w)kD_NGf+@{h z6T_QLVrd@eV$R)tKPtVMtIrHS2?}*uS%`F}SHAWk;q* z+shi`;g`18lKdV*eWJ9$Xc8&SP}+cS~;l2I2Ml}U5%O?quC zkVY03XOo8B8*#o6eop32t|lyqT$M2hrj9P*KDnmC>|s(*ryFnX)2@$^oXSgYY`rEU zoOL99@tPNrPaR}lhikg1@SyE%L%h5=f;~?@$f%V610|T5ycRb?CFGiIYB+4gIXN_{ z?*yc*sRzd9PQQ6F;6`N9sHsP}-9vBUixm<{5M{WbjE zh=*INo}Ta7beJ6fSRXgOU&qtO>2dnZxaarZ76P8yd&*iu`E^0Vt4WD8?5rPqJ`NJ# z{Ue9{eb7wuIcxpDoBu@{t89*2}1x5gF-;#S=4fx^k; z%6aa`@A_=$&li_%o&^r@dtB-Qs~=WdwU{L)Pt5Ee3xTMD7F&>4jnqG^$+zMvySr9SGK)IT#H)k&e7bo~`x>@qNLyZf{T8=W{qIqvdzr zFKCQ4GK>RbO=T8x_L+HhsL&DBmG$QB%xuFUM;@UFJl(*V>RLY^c85Ke-~)jiwt$M2 z7#3bV^^&Y0hoY3I>VuX7`@I2ORUiO%3+%K%ab1a)=;o3J_zZNO2e>;xOw6RLe6O~& z=vQ#_uBss85NJ!3as4@gukVWSw(@!k1|E7xL!m4^!K^C^5Ka%jjrqVD3uAR-y5z9k zMHf03_CAKQvlR~GbMc#pp2$_w zVP3;OdN^cV!t=BsmVJPpfP=(y8;vK+hJZ@^okc&K>BHez&mar$8|HQd0ylF;WyE>B z29S1aS?9Sf$?ImD>42d!r4c81uVLm5kxssvR91`UV83@Z$6N)i9Lgu!JUO)tTU#Ng zHVN&4jWBe3ff};IJvLx+@$qM>NaAqJMJ;B2e9dUFBPNQPB7xgpMW@LBP0va=>qR5R z<49L5Pzh=cT(F~V2!T5U^UW$km=FJS<;ckywLvX2+LMUMR@JSTcp&}OiLtxzQLY4F~gq`g{nFdml4xO ztzBn2Ow4~b<}kxCwK>PJ1pDPoLqev}8BbeC!YoS$yhtw!Cd~hNj=SrmD4!WkV{PkL zNg8Z4>n?{i86uN8PjmLXghE5r(j~N61<%P2uY`chKuvHT%8A<SUphWKY7WNR^O7Ng&ZD%6U?FO9RJ)yeEyHIq%!`|B*5((E6akGOc%fiP^tm|A zD+|Y=AXz(scv*p03HzR`2lIX4T~;o!O+0l+>Rt)utE@Wh3F~{W?4Cg93&z}% zM60(6O;?|Xq`aMvG_)HA-!$s0r?N9GXUGLu0T7e19{2r_Q^)(a)Bs`-F1H@4CE?ohf<~)#(_@u1rpHe|+Q4omB1Dxo323Bqoq= z!9TBzM?K``h6yCmE`jJ#Jo0Xj4dCWuGTxq*B4vrOis>%KZkg*j*O6}9#E{z{WWOxX z9a}>ijDc=En7T<2P==zn%<8?;NT}cE<^1w6xhe9AIq~rNd9R_6&o|#6{kq+(F|Lp& zINzIn(~WhVM@Yyo=#^Q|bFluOMQE~~uUaCy3J}xK(IUrsme1atCoLwD`A^QqjzNH& zw4KH*oX=VTZGh7T9K;?B zs62Ur5COI}lCk&UYz|W`XGDQlo??2JAVwX8Wj2XHcG$Nhx;Aja+r+xv)@k%T^oODB zjug{3pJV4-hl$(FS9P-gDsV`LB(|TjOHo{mlxaDohpKdm#Azvz8jF`0+eX8~;2cL& z1{h}$RkQ7*|FO2#TObK@w4GgxxNjc5-u~8S{c>XZFa6fek{uMs@FmFz)lZSSw77A zi{b1Ob;gjSLLA%ynf)yhVUQ>F}V|CR1$P^{Ts5z5ofJk zI^^$}E@uvx+!p2J%x0Vbizsb%TZSrKkNm|$!d7iiPCmSEg4z-Lw}DePbUkI)O?9sz zM*Y1_x)Z>Mjuvz3)a7S2AFjHsO)s%;cRV?NOF<;cSHB2=bRXQR)Q$BT`%``5ahF(% zHf9^L3`_85D=p(!nQ|)aOzGizvcKwKSH)DdZfOCnap@Lq%mBDZ;8)w!4oRJTDz>0A zGo=&Ha3h@1ZKyRRfp0a7I71XYtedL{7Rl zl<_$#TtrMo+P+Gdd>8AJSmn<#)Q@_!q70onJadq#2b*44Ns7tq9el z?+mm#Yb}yLBzM*F)J!V6+1tA;_+u4TRiLt8SP!_AyxUCV?B}!g2Mo8cbz~8CoDDVg zIn@5>fUhAZ-BGV1OJgQ`DqGG(b9B;0!pKB>E=gfjcj!;?NP8m^@7_*k(hx$UX*Ph^ ztia4KctvaA6TQxS4@1H6Pt0>XMPTj>PNs1>Zw&cBIb+%lRyh@r?hR%=fFCX`cD|h! z`qUrc)V7>4+UP7nME~JfS(pfNJ}I7{IT%2~W`ax7eq;ozN_jV$rC9mXi;&M-TAGzN zX3X|fhbYCuGCa@Q{8U3{nj)CvcmTe~gy~zC!G)REQgbaO-;ZUnmtp}5pJiIsun~6^ zu%)b`%xP;JRWW8zp249cE46C$JB$Kff`(57jkOl7(6NewouS*um08Y4BM=0Oz`JiP z+D6?6a#Z4WSv)qI6vmWe=nx7sCAGq@JEa>^4X{+Dh_PuA9dS^5=@asjK)u7q#HsfW za514J*%;S%U4E;SCgtVrnvoxq?FPtDiZ)CYu)?vNHv`%Gevt+2Ymp_3oVZ%}aVg^c zrPf9>PNpAs`<+%`5#U&}Tpw8ap3;2MY((d?HUbdRxW6?!XR!uVH*yyZ?TbkCpDGCUJ10FD5sUjMcD z^~=`r&v9soHm?MD5P{Ty9^Gqdp-pHQ)M6TxMuJI$^hnk`G>9^?A7F3%N!Fne(8D4Q z1W|`J=Ig)l&y)---ct}5>0k>Wkjk^bAx1itBf6 zE}yWpy1iqgCuo9*>P4{?`BylIebv{FO|fI8J63=RDsSlWwX7YPTFLA-aRP$Pw}=R8 zhFkK3dV3rVdLCa;jnpR9F5loI^m?hD#c%U;IJZt9I12I54+@V8JtxhG>>63AD3OwW2q9OMxTuEcW0E^*(XF8` z@eXGc>d7Tkq%fz0EGyIAp5R`?R1c~IGdbFau-kFcz|uSr^M|cavinJCz-U3CPHD!B-!;Vy~f zEX!yB^_&3T4Hr0J{HqDH2Cc|Y^J`N5ZJ-*S#t-pxD;I=vACMnyD1bXcxK5uJLUe( zv_C+d^*>h4jQzQAUO{Iyo3SsPfju19Uc_Rv;(3A-JDZH!cnhQjs~^1PhB2T*pf*kR zLz0czjanE33&eh=yD>kQ1|t{;hBM??pk<$qxRT8(;07S+tg&GcGAOji8>*HwW8~Dr zY|lYV=E7-gDU|(*`9zcqiTFZK*aP*lbxyruw-(C#5d=eXMt!TRqPBcEom>hr#rHHR`nH{@?+hmC?tpBJRIehZFhShZiSfd5AB1XTKZ)9~>7OOpQvKg(bfwN7}w|J5dYlfBas;0qM4k zA9k40qrSAvZq=xLup!M=&qf4CK1K>{+qY1(!$xj7Crx(bpiYHSQ+1Z-o!Dqec&;G2 zblw~}P0EbS34urnM!1JCN7K3gEg`iWxz*jqaQO#gUPRU4XVNc{P+@bzEPSNK=!@^U zT8^ar8_ObZr$@;3H+vK-BZca@8$a^X zWta4q>NJb!S4ob_Wg~oyS$1u3bmJ{-Uk z+Oh3?Ii&1>0FCJa-k&D&O?&O4uL+j`^;kzY1NiAknCZxlKrnv^u zX^q_+Sv#G7+mnnve|3w0F4EsUSRC@3wLFkM%=UM&bP)GpG)k=pK{aG5>V&D+b9`o$ zR*^y(8D0t+sL!~sQm=!bQl{Z!h3SaVLDsiuy+vZwvV&(p@B2zSl-UZXALD03xCJxF z>PeJe_yuCb&Vm(J)MuqSi!&PX4U-ZB>049JLj?8wDet@{R&qC{uK64@WNf}ZC5IS@ zohCUvPd`hus~nQOq0KTV6@*RCcW|kJXb%N$bLPI#S=Z1=r;g&qJ=2rj1 z#l2g+y}Pluv9YzayV=%-ql-89QV>T25C@t;+znNcLq69^D4u#$u<`o(AwM)O3GgG$ zzXCTtj(zIh)X~|_&4te9ZH~4K5fSQd6_c*&V^gr-lCU{qW27LbCs^PCo zn{%LQ2(0(MQx-RG0s7xhW@F^=6p?*qtOq?qcE;*f^#opwf!DSj2AJ;JXY8H#3UD8{fZ;jVg6WcQok?>sL5=*u+fW)A ze2kM5dU+kSdk6VDpD3iuqJm+&sSt~R(p9{Q4#7wT;TsaND0E z?tnJ{1$^hJc-m4{Ea)4Dk*iP2W*sX!NC)xT^V2VX&St3)9C0@l<`G>Nql(o7#orTD z&Os2hQWkf;?-IecvJ!NhoZ9A!=ph33zs7cZHe}MH36Z`7NMe$Ag6D$ed1tMiFhkQD z1t2=!&S-+<@FdU}L8O8ZB(rl~#7q20z@@Oyh>y_H)T^#wwE6FcnZ@9ylgR_R z*>C^AN#4!G|I8DL5e`K~*v2wl2oMoiTDlgxF87;=&YCLRAqsL8hPwk6W;m~~i4tSg z_VjSiV)i)mV;UB-kSd%Wqaqg{I%Jn>m+3 zq&DyiRC84fzmh0eI8wzW`{IAftIHduQ!5DnI^AG*RJCxrr}DC3=`qBcA4v$E>QdJT zxF8?<3+MFBqc0%=(qu@wTc~YJCQE}^ZtPbpS-_Tz?8&9Ulr9}?K=_Gq@b(E}o0*>z zL#$#JdNQ?AHlYx-L!rVj#CS{OuLe?14X{F(X#Bf%K@KZw_ZfJ;03 z`D6e`1Yq+&>RG@C^xfzh&?n(J>VMEm{b(cl0b#ArUAhH8IFK@1y#~0f@7}|7 z@P7iEs{e2^j75Kj$L?D|w&xqNsF@VtUsAk#&9`D1n8&{Zc{x85?C)*|ae)5v_x@L7 z&w8Do1I76>kf*n6_hs)1=x^QDS^@wo-zV1qLk9Q@K+>DP$UDHUyRQQPjE|pv?Oxbi zxqO+ps2%p9vj79_$Zyv(Nzt3gA94@=#?I*{{_fV^)@WydW4m@>%3h!IbJxd!Pw?$) z5|8MwKX1#~`EL;kXhZTg_?|5@_eGWcLG|+PPCoL!xljN0|IXFtZs*p*z4_~#0E_B= z4}vkeEvg-}=vy-YnNNT`)>rWB?v1#tE<+{4bL~n1QCR4In-BcXxJoZ=Qb3C;tQhQ|_^~b|-_tLi_N)SNDbVpnv7R z0=Q6J0KAg`&nnLa0Q|J`HW7d5Prw+^a>*_Qh7yXd=e2Mr;u64y;j_Qb@eN?$-My@p@#Rj) zx1lEutAFxe8v*~B{_c+z7x+JWS?@tm-k?~)uO7&bTpf>5Yy;D@uK>Y_ukv>TYiCQq zz3nof`{J#8!M_m*5o~VfO27FF{yRJb5BWj)pGeRB%7AzokViqC?QLLIABYgx^Kbav z{G<8){O{wseR)aq3Ghw)&j!mEpzk_8TpIwITUY-Lxs)%g3;!;e(K*guz;)YSdj$Xn zU^n)X2C(}-2juqnXPw>^D!|bA4Ddc)2rS{h0qCy*-Is6Pi-20Y)X$Uud2Sq(o9A)> z@Bc34djWQB*LVMw@~dC|p97qJ_3QhXui-o41K?`!6L9>r1f{x`b9zs8NPr^o$O4gfY67s?;)u|NW;_6|Uv z;#&#wR(1x+b^F{t6}$#)%a>1m0s1EBo&o#!`N#f>Bm-i@@YlsMky!qMpYu;+$3RiR zKlcIf^qSw^p8lHOfBEU34m9&VukCD(?*xazTL3!8{$HPh{k6DPYWq;d8%)$SsPc`L zHGXxgd#jm*NP^f{LT|T`h8Lrg>}6HGd0p>jU>N^!LS!bD3g&y;M%kF%(?G))y_}W` zY53}g7$Sr%iT$IINQ2bJm^dFMS8-BC>~aNx^*aY&M#$M#9dpB^kmS5YS4y?iixHU* zgz|ePN$r!lk^6e@u-D|j{w`?tr`U|Ko? zfXzM`xDTC>thqVUAl6ldWzM{h{{Z<@d@)YhV;@ZMNbD&Vr-)pHUrQrt(X>&?&Do5` zxSdo^zwj4hDT==87h@wp^UdF{_YP~}7<@{Z~3NrXvt`@Sr($s&v_Shq7WGG0#00k33T9&ldLhAfR* z?b9~+YmO6;lkPb^RU}qN5`u&)K`i*b+z_vm##tcaC>*0bQP_d;A*}$3iX2bE=h^as zP*B1p9;mS;wy+PGf3RU9bB{yOhTPNccff*|K^rncnCL&*@F&;cggYr-6vcqw7$pUh z&B6Z*I21p8d#7;p9suc#r2fU2$bv(>F@bxcb>s#baR8o;PSi|pnA3eFsU9+W3eFkD zX>AM5wf#tYI0u{Sd(YvUK!1|yl?$2--WR@AC4}KA%tb8gV^W5n!fnW5C^PC#1W3>)-h%|d1$3Pv1 zCW>*TV4C?d!x}f;B=nTqW7G~f(crT9xe;JkkE1f%N>|MQW{HXe*S`B9`Qlt$e6^R$ukM*L0yd*<38w&jv35+U;49bNG!YSc=_e@g*6sqZ6$kPv5_NHlYiMnT=Pi6ZU8dd zC4d_3R45U-_=bfkmZ}GWjDwfT>*dC_I@u@Ql4N?bW|AgoDTGdX^m27~v* zDyZ{w6naJS;6qLVT~xl1#+<+v3je_o8>jwH98uCVy)Z^vJ7zloG#hQK%Gw^x^@NZk zg6&DrW$=M;P9ofJSlQ~n9lUffsz|YDd*}V?@T{3~TtWV2=V(6mUQ6f@)QhrS_iKyE zl1sq?#L_x13loMwpk^ZMk%~49+)%~}l-zt40*^9?3`izI=lvrS z-+*KyIhLNyFJn$i7rNGOdIM|?CD5Zh@q6=nzauV(2QzdBh96zOV|)ydSowtCD44vW zT|capr7F|WL(%H@aM7GoIwfnNW1k?=-F>{zx87YB$)nCL(d0uqe8?nwczF%1YZ#rU zrgj`AqLtw+e5HB3-GpzgSZRN47 zjf>OpKdko zrR|m@dLns<3%9?O0inD+19f+E3%}OB%-C##9g-fa6ChvT-@6!yZ6-GDhcX7cVs+ih z&#|0gEUx_X-R_CB3NyiD#0@EU<8gjgR*6=JeA%GIa~eo>;8O9{r5-O52RW$ilzMChHoY8R@c?e$Cc9 zgb@?awN=21T2beLN)gV2CaWa}gw#e3q|uKi&^#)-WOg5y1}+J=nNz2k)TuM+VQEfe zhq(MO!SjfhpRpgbI%@j>h;hAszv%x3@OHLt?}lvx`R#8H)DQcNK6iEB?SD3}|JuGt zGkMa21KC~r>YuLD*9EkL^7Z!tvBE`w_awmI^{xN4XA%F=Ag*`xo{d#TZhP-#=qvIi zlT}dD3cEk(UVrA=_8D;X^h9#2@5brY#ajZ8T&aN zT$+8e-UqlggLX>(fbQmR2eh}jx24_f_8)yoz`p)yrscEmevYReiVFtBWsq3mU6g58 z4$^md;v&JM-hT?H9C+ijLBdxkX@pGP9Uxyl`i07MB-kQL=ouBZQxs6*GfU5Mc)JlxGaz|Dn@CI{w%0SbWy9 z+HKZy^`PW}SJu?eaap3T%eazy|3pd6uCuZ!rc%-{9EIDanP5z^e@d-T(xc+W>n7KP_@oAo>m=u+B$v0?~4X4w%3f$)l_uuPZc--tnajb-}O4A*~<9m%!gT~pX9Tz z1(o~cICMYxzL-1swWR^SQOJC$W%Tf5jD4B@x=G5g!OOVn|2~i;7#%QuabT*jRL(lKe!ztx8d%pYDbS#i zQqztTJB}#JlI&P7ijpXjB02B(gEQUx+&)NIv z%H=Ywvly`)!*UeMZY+IRf>?@JKE`qy%LE7!B4pEuZk>SV0G98`fb+Em$HuYz2+Mg< zT|$Hi;8_mXypH8Bu=FnD{6WEdga{G9qg4oO{szmw+8x=21&HVKFrUps2uq<soe`$QNKHlZBad7UpyLB`S1kgaQKy#A6MP2vVHcaKIsu@gfTfAkdJX$Ae2XE0X13M3o5<(*B~^$Ahfo zfL>xc%%2m@=i#H-NL^o!Nw0@tw*z*%oq#|ExrHL!OQfot&1$geL-VT}i^F6B{o3;ii-k zfvFph8$$ETt70|Dd;#V&c}N$Eu+#4Xt3e@T|A>I}_oBLl2-za07Me#CWA$64Ck%8R zCDU*%60gg0b|K3dl+CIsG^sw1Bp`$46=ydlM6V$_28&tlDyLqo>0}ljARCJ2i$FkF zW+1cx&-eKG<=+EjPC|qT*#f|mDHaKI$h>mu5HuD89U6o4jNi@1-sDRQa4i&LArah3 zY~+CBLBP>VfHRxQz<4YLQ>h$q*2T#eEoqy|i2K`xh0+i3GHG1@J(f=p=q5z<2@$f% zOs#aW=w(bt4(^;nptxQKbOII=91M7w`81o!!e^m46c)&FtYg5_XSc%MfCo$liLuNh zAl(cnU@jv$q?I15GO;EdbXv{O?X*IV%LXQ+1XB9?omm+y|BL09W`q9cqAG<5*#y9o zDir<`qyd6xHf7;P9nk4TP}%2~2KU{GCg3(Qn6gY++(skp@AQD%B8@~2;WchWk_>Rl z3gSr2rpIB0egv9sxBbZn)M@HLCnoeySbl0X7^0$Dg$QXw;7Jw=G%)Zj=7T8|;PQMF zilyq~dI)rUtXCbbqiHpn%qXVPpvw+>0@6N8b@9bq3?8WV)6$6SW{1-TJN-_unIjwLtMIv0Xuz8W_}U4+>fMSBA$V0t{|CcDmGN>`%!#A_(ua1hr9@00T(!| zRqm?uSl+Z7je`9M5z;2W^Bzc%=4d)4;eH}r<%@Bl33aH;$3W)BJTaXt0|@HXdSRPS zF|CN@+>NDWuwl1?Zo38c1>ImX8KH=Pb0?HQ6De$M2Khx=iJzZ`v z8RcI#d{z@284Q5ari=|b?=+c2)_AfsKA8za zyjWDsE7I-*ogM@QD`e30`u$W`8Wf1TUO)yEK)@&zOYrfd5XABe3VR_SIk3$OCIp`l zvZJw?7^DiNDzNdGjj#*NuO5$00h*eDh?v)(?+${~s({a{F0<)^ zsA?fXHjXG-9?s^d2WtndPbJd`9t9}NtfGYALuOJ37BY3NqZyS)<)`R}Jw7M+>}JRp z7vT4kVPs83c07T~(*Y+O>h*&W!Q@M1IUhWXuz`VWj82JP8v!p{gh(b2B{?v(Q6X>| z>GQ$2h62d8^z0h^Idk|NzqvEdo>TsNOH{KEAsb5+k1Xo#k2q@>kET@q79CUW;ed;6 zBNL0l8fZOWHABD0!PY-TCPU^GXCMcHF6i?*7$E%t0ck!{c)~ZM_S@VHr(q_Uha*EB zU^W_-d}w};pxXv74*>j{0ZnB`C|!VCGYQzWjfIsB#Pa#qJ!bRyMla))pN@(aA*9`8 z2+J9eri${ZwnwmB0%=~l@eRp%<}=v=1eqfQG;^s8q>7~qBf)=tPAeD?d`PcvMN+J1 ziV8W3Y1_SU5@u34b}XIqY=;ZBBOpc7dHC)95X>PEaCXCihaJoRRLtkfqnMhlH2e#C ztT5=c$=9shiSd?X!6Y`vK*j}i1sN&$c6)+@=;Pxauz>Ea3O~>XFN2b*eJ7|$yi$P z!|Xsaik1(fX~@Y8$!gHUu1+Zn>cBjHWffCSAk7QHtVR+wL%DfT6{fJ=-e)3 zJnY&fsb`A;8kvQ$9KVk88oxtSyIs1WL9P%x{6cT zWmQ*gDmNZyoO~M9A6$531<5I6#D%FQ&qFC_KR;56H{pi)`!+ z?ejC`mH>jAQZ%-wmw*S???f_-no<%=sRkQ$uM}R=tgM~ILuAx6~4uksHxjc`fT!u+hTI>7OlHX5(A273XsY# zDA!QbUCy&p+oJ^>9Asw`yJxvFLYbsdoap_q1av-|NjBZePu^dM5Fr}~cmST`9|&C3 zu~MYSe7(f{KGM1snUK173ZTMfB61H_xenMs74Vd6qSU3;0iR6)9i2S#c`34zK%l!l zpJ8Y04A_wUenVH2Ba_JP4g_cf>N%7P8oMw0?=w!u3fP^Qz zWMHBa%N3dFMQo>3$8V!HrMQ#1Tw`i=;`IiC_KM?}9MTQC(?7fukp>l_srOWu1LF7` z{CuaS0+2zQZB)bi7+c4>7#Gb^$d+L|hy|UTc$8IuKw{AAs@O)4*Cw-@GBbaS1|7J} z1_Yf220GUs#i8G8eIkHU&2H*1%M3W4-{GmtZuq8Ji|Y?}qcEMwv+F;*-3=y_o-v-1 zY%ls8CeC;!HNKB?SVo%g$uv6llE!P%V6S&Ya}=^=fQPS;Z-$#vr2JN+Zb>6W$-RSM zbY(ih)*r;Oa*x#FrWDkeVf4?00Cr@TAM7^cqNb4!s-NeRyAk+eHqGwS@3F$(?Jh{9 z@(`CpGR%5C_}v!9x)87~H^DlNYdn^Ep)NK!=D28LLWI<1Jd!%I9xD~yuo{gGdx9uC z;P!wWW>W=t7?aXtOeJzvQRC$@R7JSg26cL@MmRj=1&7L?R2S&D-v?3T^M?ue{IN7> z(T`yD^m#AtXLN*ZasDVJ;iCn$S2QNC zFV|5%1oZsMhzH?IgT5#EcH40dj$4i0X2D#syEwX>NuA}jX~HTs+YDnWtc zG60-w@}8HW{$NJB{%fkn_zQS)v^8&})Ibj{^Ve9T$7MB26O5#yP`_M%xiN6Jk#X6`i0!cPqtXuwZ7*wveF%21{i30pY@*mL=9NCRol~} zg5G0{#A&XH{4T?MBHvV`xH>vq>mNV9c5IK<$aFHX?uWCA@96L@uR5IKAKDajF0QcR zYeC?0lMTC-O45wH(7n{7^O5W$tD%8cAwLJo!UvovJX=Bu} zS`VVRro*h&%x1MgYYw09RO8d?V-=_~+mv3iH+nyek_5u&0>~xx{0yjgc0;IBqMZjQ@G!?3{nxHiw%ftU% zpJ%BzN<5mLSX#WGdcwAoU#y~HbCYDHHvekKu@+wYlt$oLEjgv}`Wr1t*P6CH{jk@4 z6;*VC)RdgRv4aSe(Hu4O)!=YCe{OUtQy%Q?6oZS!Dntz};Bl zKpTP1DUH9v)MId3_9q;dZD_5I#+86aGbTi29pB?hmHA!O`_-w+PT7Yv)}m)P)5N>H z1eKnnP0{xBGN@c&qm|7@=y98?Oqc0+9;U;YWo5TooWW=Vta_kzb2174b1ftTj2vn) z;I5?IC|3cSw~b6JEx*)M>4)UX=8cgn%1+SMgLSIK&(8Dus=Lrz(ZxJ2E4p7+{6nCv z=SoX%qp>>5&EMf`s`p#_Jm=)+xTHbJ8UfRg#`^ZF6x}bslSsR_J)VK^XI!zL3fiB@ zk8|Z=0#ccR$_ZRs7W3JF=9we3fYwJ}M&ZLdQ7PjgGJ|#@e~Mw& zxoRG?rslJ7;giSk#dJzBsVG$9+s_8TPf5dSQmU#zt6iorZe;#k0`22*PHJpyD+Qh7 zi!hM!75*YO>J~q(;rc#}@5v9#8xPayJC3aI{hw=4-#n&ptXM4q5itypB2&6N`iefLLGy_`6bRItVJPg0O5rR;vs65Yhp9Q{su!9AGvMF~W zdHGs8a9RxtXp#S_P0Xwj4GpmwrC6ycprwJWMhX!vV{x=Z@{g=Ry)F0qk7xi!ErSs` z2*<6cOCjYd5@yWPpsnU;9scNu(s^QSOOJWemcU>0XI`TPQW`7jCB z?!{p`lwA^_?&Ce){bUaA%x0wI^RhqZD}!!0hD?WrTgZR+<}0aB{Vtvgqdd_jY)6Zd zG&?G{QRT0sEBP#}UJ8%XO*HU!l@SeqHr~X0INOeGuWDHGKEL!Ik&xcWVp!vrr4 zdEj?-8rFoEVzzuy8qehPyyERmkH43h{Rli?gZissl1q%6zfWD35s z-w!^wx$ar_=dxAN^2Bmp+~on6!&K$DNhAv}pDKV(4jv77dAioN2@GvjPM=o%8fyif z)sn~t_r!4Eip8RGc_IOB zn~_DE(>NeCD9pz55K9*!CxeX?Fsf7HsO}?pd0ghYptXCu0}|N<@UOpM@evarHe3d`$4wr#x^40VbARP!wC8SDCLJpz=Xg`IZ>yTni(Mk}?pQ4SLwO!=c1z?elvsY!b$^T9q%PB0Ly! zEqp_bfM;wgv^{;jjn4*rQPkz5Sgw-7yAgbj?DNBLmre2eRQEoWmDH(xy(CX_Tsy#z zy6U~ikPZ)fAm}$Y%&ENCpu=n9ak%wZ%0NgPUf$ycvsuZp{4=3TLNv0zyRh=m1tCq! zqM_#pI(*Sj=ZX20-@=ctGxMj<0Xq?VZck_6ekcnWc_KXt7IZSZsb)5up_tgbTNW6? z^Sk_(C-!q$%2%Woibm&C@w|lQ;6S%BJH^%g0pHuAYK3TEIq!(qu)%7PteL?i9N5VRv{XgjsTsF3Orn%>+oL~n+uQP>W{Jf70A;8+t6g9 z(j&WKE?$|3NV<~z>v1d(9=ky?zrxWxM3P0eok}t%jp!jWsehX^gGJ+nU^hpV-7ts9c^4e-D?`?S2G-;{bMvM>v!Vt>HP$AF44IM2VGE7CID>9r0&$Hmm0~soF0{p)N``Yq*0s{ zADFjV%73W=VEMK}+cPfbX%BV!smDm^HTvUhh|I9pjV3)jzuN2+p<&9W$8JGMPzE1vEv!N41z%!b~V{Xj6>=YT;pogU6+;mW-v8v~?F zi)=T-V9*NN5H$SW%HTnANGDT8#(u(y0z5t71dFA*(``OhP@>y+bXei&exGV?c6hT* z6+ufQr(dOJhOVDz!96##f2Wa&wc0GBk>ZYM{BdYD>*9uKd&XA1&F6!G^9Vi^Y)arW zKoe+Z0w?+b0Ea>KRT1k8H z(r!1H%*rekZyyQ<+Li6_SqpqEs(dX;%eOM{tkmzZQl_B=P)t5 z5PU{o2nGfbIo{v_r-QvV_~tV{pvfi5a=1AKe17&I1%Dh*z*SsQJXSJWWPY16AMaDH zIC%(KCXqcc(4xq6yGe2=OZzS$<9f5r8~eK&8kv?~HaH|~{X{gp z{}Zru)HvjU zt;h|nrM};(18^tgI)10!+n%o33H6$hj~>rc2Eh057AyrR^;G3^C72H9;In&4HU@}K z_~*13;FT9Uz-)OE3*Y}V47X>bX!d}^2;cukknfs!v8!XSRnPm%Pe+^e6*T(Q(KT^B zt=+Tg$>g{)Xw7Ix=QWzoz1+g@v|870^zN7Xo>~SD@*Ff=DTh?Q{95xh>ly&qU09nQ zES?>5L$$qC1s*QHo0_@6zo2VXFx|Yu@(sYw*S4 zYuh>rc!-(2$;~h7Qr|P=gyRQ&XktmzCx?izfvoIaWE9p-U@Abg&fP-f*q@-o9ZbGG4 z&TN=kD_%I1TdVYAEda2UW)K{M*5zMb)3&Eyk6K6Yp;;{s((E||IF!6=NRGFn&$nhO zvs#eD`8y+i7~bLFV}S^CjuMb!x>=d9=R~E||trfTq!OBN~uOxT!xm94X zYu5Jk>s9NAyE_p;%7@9jL!~cht;|Rv_j>K^wZmV&9E9$mh4*2xbR7s>8zCNRyg{X} zWULJTB|Etc<%p;n<#K*eFRjvdYUQ-x0Dy5i5Ep5|t$ z6RY(s zO*)sk)JvG%>H&H@Bi@jz)HW7AKIBr1?l-c?JvP#{>%EKRe*cl0cU%i{?=GUmwZ}(b zWsHIM5*@2^%}WigeQXu#Y*pIcx&zO9Q?oO;=*nlMQOg#qq4p3mseX2(C-@U#9S9M! zImEc$4}iFLi-5;ukpQPuRwC0G;9zqKnQW+%7GI?aOQ;Y z#p1>Z2`-#0`G5dfVv)h)vB99Rd)Dsit=3#(c|YIx{{Kb1tai7uGBYCb#ryZ~U&4R= zN$0Kx@9s9yopsJFo8X>x-YtW3tLT2J@^aipcTb!@n0dEYi`)&)zdDG#(u@1CL0l!y ziQtdpACGsBAE$rm4F9H&-g~@w3I9vFG~i?99hP!$<5w;2(f7Qy_yJqNZDd`0e;K?y z3nHh3b61@UtH#MGetTAXx2=P_ntS&=iaUGKx@{F)H}k&7d%9Q-G1g7BL0dq*XF~%}et0wgvqnGi;H9 zI^;n==)J|5y$J3J?(qTliful{!EfW|XFOeF&p+FlP(s=a-^Ddyx(ScnnFqX_+pl$V z)LFjBkQ-pc3K!kn_iiBTl-^9j4q2}FEY7>E6#11@C2N}4hSI%cl)IF?+3F9effU4m zlLOvc*aN@w1^SL3ckb_T$W_kAS#aOq3-X71-hHEPo%

                                + + + +

                                An API + reference is available on the twistedmatrix web site.

                                + +